Rename volume.Builder to Mounter and volume.Cleaner to Unmounter

This commit is contained in:
saadali 2016-03-22 22:12:21 -07:00
parent 590038dcf1
commit 79012f6d53
63 changed files with 877 additions and 877 deletions

View File

@ -391,11 +391,11 @@ type Host interface {
The secret volume plugin will be responsible for: The secret volume plugin will be responsible for:
1. Returning a `volume.Builder` implementation from `NewBuilder` that: 1. Returning a `volume.Mounter` implementation from `NewMounter` that:
1. Retrieves the secret data for the volume from the API server 1. Retrieves the secret data for the volume from the API server
2. Places the secret data onto the container's filesystem 2. Places the secret data onto the container's filesystem
3. Sets the correct security attributes for the volume based on the pod's `SecurityContext` 3. Sets the correct security attributes for the volume based on the pod's `SecurityContext`
2. Returning a `volume.Cleaner` implementation from `NewClear` that cleans the volume from the 2. Returning a `volume.Unmounter` implementation from `NewUnmounter` that cleans the volume from the
container's filesystem container's filesystem
### Kubelet: Node-level secret storage ### Kubelet: Node-level secret storage

View File

@ -278,7 +278,7 @@ criteria to activate the kubelet SELinux label management for volumes are:
3. The `pod.Spec.SecurityContext.SELinuxOptions` field is set 3. The `pod.Spec.SecurityContext.SELinuxOptions` field is set
4. The volume plugin supports SELinux label management 4. The volume plugin supports SELinux label management
The `volume.Builder` interface should have a new method added that indicates whether the plugin The `volume.Mounter` interface should have a new method added that indicates whether the plugin
supports SELinux label management: supports SELinux label management:
```go ```go

View File

@ -301,13 +301,13 @@ or read the `pod.Spec.SecurityContext.FSGroup` field.
### Volume changes ### Volume changes
The `volume.Builder` interface should have a new method added that indicates whether the plugin The `volume.Mounter` interface should have a new method added that indicates whether the plugin
supports ownership management: supports ownership management:
```go ```go
package volume package volume
type Builder interface { type Mounter interface {
// other methods omitted // other methods omitted
// SupportsOwnershipManagement indicates that this volume supports having ownership // SupportsOwnershipManagement indicates that this volume supports having ownership
@ -403,7 +403,7 @@ func (kl *Kubelet) mountExternalVolumes(pod *api.Pod) (kubecontainer.VolumeMap,
// Try to use a plugin for this volume. // Try to use a plugin for this volume.
internal := volume.NewSpecFromVolume(volSpec) internal := volume.NewSpecFromVolume(volSpec)
builder, err := kl.newVolumeBuilderFromPlugins(internal, pod, volume.VolumeOptions{RootContext: rootContext}, kl.mounter) builder, err := kl.newVolumeMounterFromPlugins(internal, pod, volume.VolumeOptions{RootContext: rootContext}, kl.mounter)
if err != nil { if err != nil {
glog.Errorf("Could not create volume builder for pod %s: %v", pod.UID, err) glog.Errorf("Could not create volume builder for pod %s: %v", pod.UID, err)
return nil, err return nil, err

View File

@ -502,12 +502,12 @@ func (c *PersistentVolumeProvisionerController) GetKubeClient() clientset.Interf
return c.client.GetKubeClient() return c.client.GetKubeClient()
} }
func (c *PersistentVolumeProvisionerController) NewWrapperBuilder(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) { func (c *PersistentVolumeProvisionerController) NewWrapperMounter(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return nil, fmt.Errorf("NewWrapperBuilder not supported by PVClaimBinder's VolumeHost implementation") return nil, fmt.Errorf("NewWrapperMounter not supported by PVClaimBinder's VolumeHost implementation")
} }
func (c *PersistentVolumeProvisionerController) NewWrapperCleaner(volName string, spec volume.Spec, podUID types.UID) (volume.Cleaner, error) { func (c *PersistentVolumeProvisionerController) NewWrapperUnmounter(volName string, spec volume.Spec, podUID types.UID) (volume.Unmounter, error) {
return nil, fmt.Errorf("NewWrapperCleaner not supported by PVClaimBinder's VolumeHost implementation") return nil, fmt.Errorf("NewWrapperUnmounter not supported by PVClaimBinder's VolumeHost implementation")
} }
func (c *PersistentVolumeProvisionerController) GetCloudProvider() cloudprovider.Interface { func (c *PersistentVolumeProvisionerController) GetCloudProvider() cloudprovider.Interface {

View File

@ -385,12 +385,12 @@ func (f *PersistentVolumeRecycler) GetKubeClient() clientset.Interface {
return f.kubeClient return f.kubeClient
} }
func (f *PersistentVolumeRecycler) NewWrapperBuilder(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) { func (f *PersistentVolumeRecycler) NewWrapperMounter(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return nil, fmt.Errorf("NewWrapperBuilder not supported by PVClaimBinder's VolumeHost implementation") return nil, fmt.Errorf("NewWrapperMounter not supported by PVClaimBinder's VolumeHost implementation")
} }
func (f *PersistentVolumeRecycler) NewWrapperCleaner(volName string, spec volume.Spec, podUID types.UID) (volume.Cleaner, error) { func (f *PersistentVolumeRecycler) NewWrapperUnmounter(volName string, spec volume.Spec, podUID types.UID) (volume.Unmounter, error) {
return nil, fmt.Errorf("NewWrapperCleaner not supported by PVClaimBinder's VolumeHost implementation") return nil, fmt.Errorf("NewWrapperUnmounter not supported by PVClaimBinder's VolumeHost implementation")
} }
func (f *PersistentVolumeRecycler) GetCloudProvider() cloudprovider.Interface { func (f *PersistentVolumeRecycler) GetCloudProvider() cloudprovider.Interface {

View File

@ -367,8 +367,8 @@ type RunContainerOptions struct {
// VolumeInfo contains information about the volume. // VolumeInfo contains information about the volume.
type VolumeInfo struct { type VolumeInfo struct {
// Builder is the volume's builder // Mounter is the volume's mounter
Builder volume.Builder Mounter volume.Mounter
// SELinuxLabeled indicates whether this volume has had the // SELinuxLabeled indicates whether this volume has had the
// pod's SELinux label applied to it or not // pod's SELinux label applied to it or not
SELinuxLabeled bool SELinuxLabeled bool

View File

@ -1208,9 +1208,9 @@ func (kl *Kubelet) relabelVolumes(pod *api.Pod, volumes kubecontainer.VolumeMap)
volumeContext := fmt.Sprintf("%s:%s:%s:%s", rootDirSELinuxOptions.User, rootDirSELinuxOptions.Role, rootDirSELinuxOptions.Type, rootDirSELinuxOptions.Level) volumeContext := fmt.Sprintf("%s:%s:%s:%s", rootDirSELinuxOptions.User, rootDirSELinuxOptions.Role, rootDirSELinuxOptions.Type, rootDirSELinuxOptions.Level)
for _, vol := range volumes { for _, vol := range volumes {
if vol.Builder.GetAttributes().Managed && vol.Builder.GetAttributes().SupportsSELinux { if vol.Mounter.GetAttributes().Managed && vol.Mounter.GetAttributes().SupportsSELinux {
// Relabel the volume and its content to match the 'Level' of the pod // Relabel the volume and its content to match the 'Level' of the pod
err := filepath.Walk(vol.Builder.GetPath(), func(path string, info os.FileInfo, err error) error { err := filepath.Walk(vol.Mounter.GetPath(), func(path string, info os.FileInfo, err error) error {
if err != nil { if err != nil {
return err return err
} }
@ -1246,14 +1246,14 @@ func makeMounts(pod *api.Pod, podDir string, container *api.Container, hostName,
// If the volume supports SELinux and it has not been // If the volume supports SELinux and it has not been
// relabeled already and it is not a read-only volume, // relabeled already and it is not a read-only volume,
// relabel it and mark it as labeled // relabel it and mark it as labeled
if vol.Builder.GetAttributes().Managed && vol.Builder.GetAttributes().SupportsSELinux && !vol.SELinuxLabeled { if vol.Mounter.GetAttributes().Managed && vol.Mounter.GetAttributes().SupportsSELinux && !vol.SELinuxLabeled {
vol.SELinuxLabeled = true vol.SELinuxLabeled = true
relabelVolume = true relabelVolume = true
} }
mounts = append(mounts, kubecontainer.Mount{ mounts = append(mounts, kubecontainer.Mount{
Name: mount.Name, Name: mount.Name,
ContainerPath: mount.MountPath, ContainerPath: mount.MountPath,
HostPath: vol.Builder.GetPath(), HostPath: vol.Mounter.GetPath(),
ReadOnly: mount.ReadOnly, ReadOnly: mount.ReadOnly,
SELinuxRelabel: relabelVolume, SELinuxRelabel: relabelVolume,
}) })
@ -1980,12 +1980,12 @@ func (kl *Kubelet) cleanupOrphanedVolumes(pods []*api.Pod, runningPods []*kubeco
// TODO(yifan): Refactor this hacky string manipulation. // TODO(yifan): Refactor this hacky string manipulation.
kl.volumeManager.DeleteVolumes(types.UID(parts[0])) kl.volumeManager.DeleteVolumes(types.UID(parts[0]))
// Get path reference count // Get path reference count
refs, err := mount.GetMountRefs(kl.mounter, cleanerTuple.Cleaner.GetPath()) refs, err := mount.GetMountRefs(kl.mounter, cleanerTuple.Unmounter.GetPath())
if err != nil { if err != nil {
return fmt.Errorf("Could not get mount path references %v", err) return fmt.Errorf("Could not get mount path references %v", err)
} }
//TODO (jonesdl) This should not block other kubelet synchronization procedures //TODO (jonesdl) This should not block other kubelet synchronization procedures
err = cleanerTuple.Cleaner.TearDown() err = cleanerTuple.Unmounter.TearDown()
if err != nil { if err != nil {
glog.Errorf("Could not tear down volume %q: %v", name, err) glog.Errorf("Could not tear down volume %q: %v", name, err)
} }

View File

@ -548,12 +548,12 @@ func TestGetPodVolumesFromDisk(t *testing.T) {
volumesFound := kubelet.getPodVolumesFromDisk() volumesFound := kubelet.getPodVolumesFromDisk()
if len(volumesFound) != len(expectedPaths) { if len(volumesFound) != len(expectedPaths) {
t.Errorf("Expected to find %d cleaners, got %d", len(expectedPaths), len(volumesFound)) t.Errorf("Expected to find %d unmounters, got %d", len(expectedPaths), len(volumesFound))
} }
for _, ep := range expectedPaths { for _, ep := range expectedPaths {
found := false found := false
for _, cl := range volumesFound { for _, cl := range volumesFound {
if ep == cl.Cleaner.GetPath() { if ep == cl.Unmounter.GetPath() {
found = true found = true
break break
} }
@ -639,12 +639,12 @@ func TestCleanupOrphanedVolumes(t *testing.T) {
volumesFound := kubelet.getPodVolumesFromDisk() volumesFound := kubelet.getPodVolumesFromDisk()
if len(volumesFound) != len(pathsOnDisk) { if len(volumesFound) != len(pathsOnDisk) {
t.Errorf("Expected to find %d cleaners, got %d", len(pathsOnDisk), len(volumesFound)) t.Errorf("Expected to find %d unmounters, got %d", len(pathsOnDisk), len(volumesFound))
} }
for _, ep := range pathsOnDisk { for _, ep := range pathsOnDisk {
found := false found := false
for _, cl := range volumesFound { for _, cl := range volumesFound {
if ep == cl.Cleaner.GetPath() { if ep == cl.Unmounter.GetPath() {
found = true found = true
break break
} }
@ -661,10 +661,10 @@ func TestCleanupOrphanedVolumes(t *testing.T) {
} }
volumesFound = kubelet.getPodVolumesFromDisk() volumesFound = kubelet.getPodVolumesFromDisk()
if len(volumesFound) != 0 { if len(volumesFound) != 0 {
t.Errorf("Expected to find 0 cleaners, got %d", len(volumesFound)) t.Errorf("Expected to find 0 unmounters, got %d", len(volumesFound))
} }
for _, cl := range volumesFound { for _, cl := range volumesFound {
t.Errorf("Found unexpected volume %s", cl.Cleaner.GetPath()) t.Errorf("Found unexpected volume %s", cl.Unmounter.GetPath())
} }
} }
@ -716,9 +716,9 @@ func TestMakeVolumeMounts(t *testing.T) {
} }
podVolumes := kubecontainer.VolumeMap{ podVolumes := kubecontainer.VolumeMap{
"disk": kubecontainer.VolumeInfo{Builder: &stubVolume{path: "/mnt/disk"}}, "disk": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/mnt/disk"}},
"disk4": kubecontainer.VolumeInfo{Builder: &stubVolume{path: "/mnt/host"}}, "disk4": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/mnt/host"}},
"disk5": kubecontainer.VolumeInfo{Builder: &stubVolume{path: "/var/lib/kubelet/podID/volumes/empty/disk5"}}, "disk5": kubecontainer.VolumeInfo{Mounter: &stubVolume{path: "/var/lib/kubelet/podID/volumes/empty/disk5"}},
} }
pod := api.Pod{ pod := api.Pod{

View File

@ -543,7 +543,7 @@ func (r *Runtime) makePodManifest(pod *api.Pod, pullSecrets []api.Secret) (*appc
manifest.Volumes = append(manifest.Volumes, appctypes.Volume{ manifest.Volumes = append(manifest.Volumes, appctypes.Volume{
Name: convertToACName(vname), Name: convertToACName(vname),
Kind: "host", Kind: "host",
Source: volume.Builder.GetPath(), Source: volume.Mounter.GetPath(),
}) })
} }

View File

@ -58,21 +58,21 @@ func (vh *volumeHost) GetKubeClient() clientset.Interface {
return vh.kubelet.kubeClient return vh.kubelet.kubeClient
} }
func (vh *volumeHost) NewWrapperBuilder(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) { func (vh *volumeHost) NewWrapperMounter(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}" // The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
wrapperVolumeName := "wrapped_" + volName wrapperVolumeName := "wrapped_" + volName
if spec.Volume != nil { if spec.Volume != nil {
spec.Volume.Name = wrapperVolumeName spec.Volume.Name = wrapperVolumeName
} }
b, err := vh.kubelet.newVolumeBuilderFromPlugins(&spec, pod, opts) b, err := vh.kubelet.newVolumeMounterFromPlugins(&spec, pod, opts)
if err == nil && b == nil { if err == nil && b == nil {
return nil, errUnsupportedVolumeType return nil, errUnsupportedVolumeType
} }
return b, nil return b, nil
} }
func (vh *volumeHost) NewWrapperCleaner(volName string, spec volume.Spec, podUID types.UID) (volume.Cleaner, error) { func (vh *volumeHost) NewWrapperUnmounter(volName string, spec volume.Spec, podUID types.UID) (volume.Unmounter, error) {
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}" // The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
wrapperVolumeName := "wrapped_" + volName wrapperVolumeName := "wrapped_" + volName
if spec.Volume != nil { if spec.Volume != nil {
@ -87,7 +87,7 @@ func (vh *volumeHost) NewWrapperCleaner(volName string, spec volume.Spec, podUID
// Not found but not an error // Not found but not an error
return nil, nil return nil, nil
} }
c, err := plugin.NewCleaner(spec.Name(), podUID) c, err := plugin.NewUnmounter(spec.Name(), podUID)
if err == nil && c == nil { if err == nil && c == nil {
return nil, errUnsupportedVolumeType return nil, errUnsupportedVolumeType
} }
@ -127,18 +127,18 @@ func (kl *Kubelet) mountExternalVolumes(pod *api.Pod) (kubecontainer.VolumeMap,
// Try to use a plugin for this volume. // Try to use a plugin for this volume.
internal := volume.NewSpecFromVolume(volSpec) internal := volume.NewSpecFromVolume(volSpec)
builder, err := kl.newVolumeBuilderFromPlugins(internal, pod, volume.VolumeOptions{RootContext: rootContext}) mounter, err := kl.newVolumeMounterFromPlugins(internal, pod, volume.VolumeOptions{RootContext: rootContext})
if err != nil { if err != nil {
glog.Errorf("Could not create volume builder for pod %s: %v", pod.UID, err) glog.Errorf("Could not create volume mounter for pod %s: %v", pod.UID, err)
return nil, err return nil, err
} }
if builder == nil { if mounter == nil {
return nil, errUnsupportedVolumeType return nil, errUnsupportedVolumeType
} }
// some volumes require attachment before builder's setup. // some volumes require attachment before mounter's setup.
// The plugin can be nil, but non-nil errors are legitimate errors. // The plugin can be nil, but non-nil errors are legitimate errors.
// For non-nil plugins, Attachment to a node is required before Builder's setup. // For non-nil plugins, Attachment to a node is required before Mounter's setup.
attacher, err := kl.newVolumeAttacherFromPlugins(internal, pod, volume.VolumeOptions{RootContext: rootContext}) attacher, err := kl.newVolumeAttacherFromPlugins(internal, pod, volume.VolumeOptions{RootContext: rootContext})
if err != nil { if err != nil {
glog.Errorf("Could not create volume attacher for pod %s: %v", pod.UID, err) glog.Errorf("Could not create volume attacher for pod %s: %v", pod.UID, err)
@ -151,11 +151,11 @@ func (kl *Kubelet) mountExternalVolumes(pod *api.Pod) (kubecontainer.VolumeMap,
} }
} }
err = builder.SetUp(fsGroup) err = mounter.SetUp(fsGroup)
if err != nil { if err != nil {
return nil, err return nil, err
} }
podVolumes[volSpec.Name] = kubecontainer.VolumeInfo{Builder: builder} podVolumes[volSpec.Name] = kubecontainer.VolumeInfo{Mounter: mounter}
} }
return podVolumes, nil return podVolumes, nil
} }
@ -173,7 +173,7 @@ func (kl *Kubelet) ListVolumesForPod(podUID types.UID) (map[string]volume.Volume
return result, false return result, false
} }
for name, info := range vm { for name, info := range vm {
result[name] = info.Builder result[name] = info.Mounter
} }
return result, true return result, true
} }
@ -206,14 +206,14 @@ func (kl *Kubelet) getPodVolumes(podUID types.UID) ([]*volumeTuple, error) {
} }
// cleanerTuple is a union struct to allow separating detaching from the cleaner. // cleanerTuple is a union struct to allow separating detaching from the cleaner.
// some volumes require detachment but not all. Cleaner cannot be nil but Detacher is optional. // some volumes require detachment but not all. Unmounter cannot be nil but Detacher is optional.
type cleanerTuple struct { type cleanerTuple struct {
Cleaner volume.Cleaner Unmounter volume.Unmounter
Detacher *volume.Detacher Detacher *volume.Detacher
} }
// getPodVolumesFromDisk examines directory structure to determine volumes that // getPodVolumesFromDisk examines directory structure to determine volumes that
// are presently active and mounted. Returns a union struct containing a volume.Cleaner // are presently active and mounted. Returns a union struct containing a volume.Unmounter
// and potentially a volume.Detacher. // and potentially a volume.Detacher.
func (kl *Kubelet) getPodVolumesFromDisk() map[string]cleanerTuple { func (kl *Kubelet) getPodVolumesFromDisk() map[string]cleanerTuple {
currentVolumes := make(map[string]cleanerTuple) currentVolumes := make(map[string]cleanerTuple)
@ -231,23 +231,23 @@ func (kl *Kubelet) getPodVolumesFromDisk() map[string]cleanerTuple {
} }
for _, volume := range volumes { for _, volume := range volumes {
identifier := fmt.Sprintf("%s/%s", podUID, volume.Name) identifier := fmt.Sprintf("%s/%s", podUID, volume.Name)
glog.V(4).Infof("Making a volume.Cleaner for volume %s/%s of pod %s", volume.Kind, volume.Name, podUID) glog.V(4).Infof("Making a volume.Unmounter for volume %s/%s of pod %s", volume.Kind, volume.Name, podUID)
// TODO(thockin) This should instead return a reference to an extant // TODO(thockin) This should instead return a reference to an extant
// volume object, except that we don't actually hold on to pod specs // volume object, except that we don't actually hold on to pod specs
// or volume objects. // or volume objects.
// Try to use a plugin for this volume. // Try to use a plugin for this volume.
cleaner, err := kl.newVolumeCleanerFromPlugins(volume.Kind, volume.Name, podUID) unmounter, err := kl.newVolumeUnmounterFromPlugins(volume.Kind, volume.Name, podUID)
if err != nil { if err != nil {
glog.Errorf("Could not create volume cleaner for %s: %v", volume.Name, err) glog.Errorf("Could not create volume unmounter for %s: %v", volume.Name, err)
continue continue
} }
if cleaner == nil { if unmounter == nil {
glog.Errorf("Could not create volume cleaner for %s: %v", volume.Name, errUnsupportedVolumeType) glog.Errorf("Could not create volume unmounter for %s: %v", volume.Name, errUnsupportedVolumeType)
continue continue
} }
tuple := cleanerTuple{Cleaner: cleaner} tuple := cleanerTuple{Unmounter: unmounter}
detacher, err := kl.newVolumeDetacherFromPlugins(volume.Kind, volume.Name, podUID) detacher, err := kl.newVolumeDetacherFromPlugins(volume.Kind, volume.Name, podUID)
// plugin can be nil but a non-nil error is a legitimate error // plugin can be nil but a non-nil error is a legitimate error
if err != nil { if err != nil {
@ -263,7 +263,7 @@ func (kl *Kubelet) getPodVolumesFromDisk() map[string]cleanerTuple {
return currentVolumes return currentVolumes
} }
func (kl *Kubelet) newVolumeBuilderFromPlugins(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) { func (kl *Kubelet) newVolumeMounterFromPlugins(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
plugin, err := kl.volumePluginMgr.FindPluginBySpec(spec) plugin, err := kl.volumePluginMgr.FindPluginBySpec(spec)
if err != nil { if err != nil {
return nil, fmt.Errorf("can't use volume plugins for %s: %v", spec.Name(), err) return nil, fmt.Errorf("can't use volume plugins for %s: %v", spec.Name(), err)
@ -272,12 +272,12 @@ func (kl *Kubelet) newVolumeBuilderFromPlugins(spec *volume.Spec, pod *api.Pod,
// Not found but not an error // Not found but not an error
return nil, nil return nil, nil
} }
builder, err := plugin.NewBuilder(spec, pod, opts) physicalMounter, err := plugin.NewMounter(spec, pod, opts)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to instantiate volume builder for %s: %v", spec.Name(), err) return nil, fmt.Errorf("failed to instantiate volume physicalMounter for %s: %v", spec.Name(), err)
} }
glog.V(10).Infof("Used volume plugin %q to mount %s", plugin.Name(), spec.Name()) glog.V(10).Infof("Used volume plugin %q to mount %s", plugin.Name(), spec.Name())
return builder, nil return physicalMounter, nil
} }
func (kl *Kubelet) newVolumeAttacherFromPlugins(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Attacher, error) { func (kl *Kubelet) newVolumeAttacherFromPlugins(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Attacher, error) {
@ -298,7 +298,7 @@ func (kl *Kubelet) newVolumeAttacherFromPlugins(spec *volume.Spec, pod *api.Pod,
return attacher, nil return attacher, nil
} }
func (kl *Kubelet) newVolumeCleanerFromPlugins(kind string, name string, podUID types.UID) (volume.Cleaner, error) { func (kl *Kubelet) newVolumeUnmounterFromPlugins(kind string, name string, podUID types.UID) (volume.Unmounter, error) {
plugName := strings.UnescapeQualifiedNameForDisk(kind) plugName := strings.UnescapeQualifiedNameForDisk(kind)
plugin, err := kl.volumePluginMgr.FindPluginByName(plugName) plugin, err := kl.volumePluginMgr.FindPluginByName(plugName)
if err != nil { if err != nil {
@ -309,12 +309,12 @@ func (kl *Kubelet) newVolumeCleanerFromPlugins(kind string, name string, podUID
// Not found but not an error. // Not found but not an error.
return nil, nil return nil, nil
} }
cleaner, err := plugin.NewCleaner(name, podUID) unmounter, err := plugin.NewUnmounter(name, podUID)
if err != nil { if err != nil {
return nil, fmt.Errorf("failed to instantiate volume plugin for %s/%s: %v", podUID, kind, err) return nil, fmt.Errorf("failed to instantiate volume plugin for %s/%s: %v", podUID, kind, err)
} }
glog.V(5).Infof("Used volume plugin %q to unmount %s/%s", plugin.Name(), podUID, kind) glog.V(5).Infof("Used volume plugin %q to unmount %s/%s", plugin.Name(), podUID, kind)
return cleaner, nil return unmounter, nil
} }
func (kl *Kubelet) newVolumeDetacherFromPlugins(kind string, name string, podUID types.UID) (volume.Detacher, error) { func (kl *Kubelet) newVolumeDetacherFromPlugins(kind string, name string, podUID types.UID) (volume.Detacher, error) {

View File

@ -72,12 +72,12 @@ func (plugin *awsElasticBlockStorePlugin) GetAccessModes() []api.PersistentVolum
} }
} }
func (plugin *awsElasticBlockStorePlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Builder, error) { func (plugin *awsElasticBlockStorePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function. // Inject real implementations here, test through the internal function.
return plugin.newBuilderInternal(spec, pod.UID, &AWSDiskUtil{}, plugin.host.GetMounter()) return plugin.newMounterInternal(spec, pod.UID, &AWSDiskUtil{}, plugin.host.GetMounter())
} }
func (plugin *awsElasticBlockStorePlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Builder, error) { func (plugin *awsElasticBlockStorePlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Mounter, error) {
// EBSs used directly in a pod have a ReadOnly flag set by the pod author. // EBSs used directly in a pod have a ReadOnly flag set by the pod author.
// EBSs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV // EBSs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
var readOnly bool var readOnly bool
@ -97,7 +97,7 @@ func (plugin *awsElasticBlockStorePlugin) newBuilderInternal(spec *volume.Spec,
partition = strconv.Itoa(ebs.Partition) partition = strconv.Itoa(ebs.Partition)
} }
return &awsElasticBlockStoreBuilder{ return &awsElasticBlockStoreMounter{
awsElasticBlockStore: &awsElasticBlockStore{ awsElasticBlockStore: &awsElasticBlockStore{
podUID: podUID, podUID: podUID,
volName: spec.Name(), volName: spec.Name(),
@ -112,13 +112,13 @@ func (plugin *awsElasticBlockStorePlugin) newBuilderInternal(spec *volume.Spec,
diskMounter: &mount.SafeFormatAndMount{plugin.host.GetMounter(), exec.New()}}, nil diskMounter: &mount.SafeFormatAndMount{plugin.host.GetMounter(), exec.New()}}, nil
} }
func (plugin *awsElasticBlockStorePlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) { func (plugin *awsElasticBlockStorePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function. // Inject real implementations here, test through the internal function.
return plugin.newCleanerInternal(volName, podUID, &AWSDiskUtil{}, plugin.host.GetMounter()) return plugin.newUnmounterInternal(volName, podUID, &AWSDiskUtil{}, plugin.host.GetMounter())
} }
func (plugin *awsElasticBlockStorePlugin) newCleanerInternal(volName string, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Cleaner, error) { func (plugin *awsElasticBlockStorePlugin) newUnmounterInternal(volName string, podUID types.UID, manager ebsManager, mounter mount.Interface) (volume.Unmounter, error) {
return &awsElasticBlockStoreCleaner{&awsElasticBlockStore{ return &awsElasticBlockStoreUnmounter{&awsElasticBlockStore{
podUID: podUID, podUID: podUID,
volName: volName, volName: volName,
manager: manager, manager: manager,
@ -164,9 +164,9 @@ func (plugin *awsElasticBlockStorePlugin) newProvisionerInternal(options volume.
// Abstract interface to PD operations. // Abstract interface to PD operations.
type ebsManager interface { type ebsManager interface {
// Attaches the disk to the kubelet's host machine. // Attaches the disk to the kubelet's host machine.
AttachAndMountDisk(b *awsElasticBlockStoreBuilder, globalPDPath string) error AttachAndMountDisk(b *awsElasticBlockStoreMounter, globalPDPath string) error
// Detaches the disk from the kubelet's host machine. // Detaches the disk from the kubelet's host machine.
DetachDisk(c *awsElasticBlockStoreCleaner) error DetachDisk(c *awsElasticBlockStoreUnmounter) error
// Creates a volume // Creates a volume
CreateVolume(provisioner *awsElasticBlockStoreProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, err error) CreateVolume(provisioner *awsElasticBlockStoreProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, err error)
// Deletes a volume // Deletes a volume
@ -191,13 +191,13 @@ type awsElasticBlockStore struct {
} }
func detachDiskLogError(ebs *awsElasticBlockStore) { func detachDiskLogError(ebs *awsElasticBlockStore) {
err := ebs.manager.DetachDisk(&awsElasticBlockStoreCleaner{ebs}) err := ebs.manager.DetachDisk(&awsElasticBlockStoreUnmounter{ebs})
if err != nil { if err != nil {
glog.Warningf("Failed to detach disk: %v (%v)", ebs, err) glog.Warningf("Failed to detach disk: %v (%v)", ebs, err)
} }
} }
type awsElasticBlockStoreBuilder struct { type awsElasticBlockStoreMounter struct {
*awsElasticBlockStore *awsElasticBlockStore
// Filesystem type, optional. // Filesystem type, optional.
fsType string fsType string
@ -207,9 +207,9 @@ type awsElasticBlockStoreBuilder struct {
diskMounter *mount.SafeFormatAndMount diskMounter *mount.SafeFormatAndMount
} }
var _ volume.Builder = &awsElasticBlockStoreBuilder{} var _ volume.Mounter = &awsElasticBlockStoreMounter{}
func (b *awsElasticBlockStoreBuilder) GetAttributes() volume.Attributes { func (b *awsElasticBlockStoreMounter) GetAttributes() volume.Attributes {
return volume.Attributes{ return volume.Attributes{
ReadOnly: b.readOnly, ReadOnly: b.readOnly,
Managed: !b.readOnly, Managed: !b.readOnly,
@ -218,12 +218,12 @@ func (b *awsElasticBlockStoreBuilder) GetAttributes() volume.Attributes {
} }
// SetUp attaches the disk and bind mounts to the volume path. // SetUp attaches the disk and bind mounts to the volume path.
func (b *awsElasticBlockStoreBuilder) SetUp(fsGroup *int64) error { func (b *awsElasticBlockStoreMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup) return b.SetUpAt(b.GetPath(), fsGroup)
} }
// SetUpAt attaches the disk and bind mounts to the volume path. // SetUpAt attaches the disk and bind mounts to the volume path.
func (b *awsElasticBlockStoreBuilder) SetUpAt(dir string, fsGroup *int64) error { func (b *awsElasticBlockStoreMounter) SetUpAt(dir string, fsGroup *int64) error {
// TODO: handle failed mounts here. // TODO: handle failed mounts here.
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notMnt, err) glog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notMnt, err)
@ -317,21 +317,21 @@ func (ebs *awsElasticBlockStore) GetPath() string {
return ebs.plugin.host.GetPodVolumeDir(ebs.podUID, utilstrings.EscapeQualifiedNameForDisk(name), ebs.volName) return ebs.plugin.host.GetPodVolumeDir(ebs.podUID, utilstrings.EscapeQualifiedNameForDisk(name), ebs.volName)
} }
type awsElasticBlockStoreCleaner struct { type awsElasticBlockStoreUnmounter struct {
*awsElasticBlockStore *awsElasticBlockStore
} }
var _ volume.Cleaner = &awsElasticBlockStoreCleaner{} var _ volume.Unmounter = &awsElasticBlockStoreUnmounter{}
// Unmounts the bind mount, and detaches the disk only if the PD // Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet. // resource was the last reference to that disk on the kubelet.
func (c *awsElasticBlockStoreCleaner) TearDown() error { func (c *awsElasticBlockStoreUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath()) return c.TearDownAt(c.GetPath())
} }
// Unmounts the bind mount, and detaches the disk only if the PD // Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet. // resource was the last reference to that disk on the kubelet.
func (c *awsElasticBlockStoreCleaner) TearDownAt(dir string) error { func (c *awsElasticBlockStoreUnmounter) TearDownAt(dir string) error {
notMnt, err := c.mounter.IsLikelyNotMountPoint(dir) notMnt, err := c.mounter.IsLikelyNotMountPoint(dir)
if err != nil { if err != nil {
glog.V(2).Info("Error checking if mountpoint ", dir, ": ", err) glog.V(2).Info("Error checking if mountpoint ", dir, ": ", err)
@ -358,13 +358,13 @@ func (c *awsElasticBlockStoreCleaner) TearDownAt(dir string) error {
// If len(refs) is 1, then all bind mounts have been removed, and the // If len(refs) is 1, then all bind mounts have been removed, and the
// remaining reference is the global mount. It is safe to detach. // remaining reference is the global mount. It is safe to detach.
if len(refs) == 1 { if len(refs) == 1 {
// c.volumeID is not initially set for volume-cleaners, so set it here. // c.volumeID is not initially set for volume-unmounters, so set it here.
c.volumeID, err = getVolumeIDFromGlobalMount(c.plugin.host, refs[0]) c.volumeID, err = getVolumeIDFromGlobalMount(c.plugin.host, refs[0])
if err != nil { if err != nil {
glog.V(2).Info("Could not determine volumeID from mountpoint ", refs[0], ": ", err) glog.V(2).Info("Could not determine volumeID from mountpoint ", refs[0], ": ", err)
return err return err
} }
if err := c.manager.DetachDisk(&awsElasticBlockStoreCleaner{c.awsElasticBlockStore}); err != nil { if err := c.manager.DetachDisk(&awsElasticBlockStoreUnmounter{c.awsElasticBlockStore}); err != nil {
glog.V(2).Info("Error detaching disk ", c.volumeID, ": ", err) glog.V(2).Info("Error detaching disk ", c.volumeID, ": ", err)
return err return err
} }

View File

@ -94,7 +94,7 @@ type fakePDManager struct {
// TODO(jonesdl) To fully test this, we could create a loopback device // TODO(jonesdl) To fully test this, we could create a loopback device
// and mount that instead. // and mount that instead.
func (fake *fakePDManager) AttachAndMountDisk(b *awsElasticBlockStoreBuilder, globalPDPath string) error { func (fake *fakePDManager) AttachAndMountDisk(b *awsElasticBlockStoreMounter, globalPDPath string) error {
globalPath := makeGlobalPDPath(b.plugin.host, b.volumeID) globalPath := makeGlobalPDPath(b.plugin.host, b.volumeID)
err := os.MkdirAll(globalPath, 0750) err := os.MkdirAll(globalPath, 0750)
if err != nil { if err != nil {
@ -107,7 +107,7 @@ func (fake *fakePDManager) AttachAndMountDisk(b *awsElasticBlockStoreBuilder, gl
return nil return nil
} }
func (fake *fakePDManager) DetachDisk(c *awsElasticBlockStoreCleaner) error { func (fake *fakePDManager) DetachDisk(c *awsElasticBlockStoreUnmounter) error {
globalPath := makeGlobalPDPath(c.plugin.host, c.volumeID) globalPath := makeGlobalPDPath(c.plugin.host, c.volumeID)
err := os.RemoveAll(globalPath) err := os.RemoveAll(globalPath)
if err != nil { if err != nil {
@ -154,21 +154,21 @@ func TestPlugin(t *testing.T) {
} }
fakeManager := &fakePDManager{} fakeManager := &fakePDManager{}
fakeMounter := &mount.FakeMounter{} fakeMounter := &mount.FakeMounter{}
builder, err := plug.(*awsElasticBlockStorePlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter) mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~aws-ebs/vol1") volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~aws-ebs/vol1")
path := builder.GetPath() path := mounter.GetPath()
if path != volPath { if path != volPath {
t.Errorf("Got unexpected path: %s", path) t.Errorf("Got unexpected path: %s", path)
} }
if err := builder.SetUp(nil); err != nil { if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(path); err != nil { if _, err := os.Stat(path); err != nil {
@ -190,15 +190,15 @@ func TestPlugin(t *testing.T) {
} }
fakeManager = &fakePDManager{} fakeManager = &fakePDManager{}
cleaner, err := plug.(*awsElasticBlockStorePlugin).newCleanerInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter) unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
if cleaner == nil { if unmounter == nil {
t.Errorf("Got a nil Cleaner") t.Errorf("Got a nil Unmounter")
} }
if err := cleaner.TearDown(); err != nil { if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(path); err == nil { if _, err := os.Stat(path); err == nil {
@ -295,17 +295,17 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, clientset, nil)) plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, clientset, nil))
plug, _ := plugMgr.FindPluginByName(awsElasticBlockStorePluginName) plug, _ := plugMgr.FindPluginByName(awsElasticBlockStorePluginName)
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true) spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !builder.GetAttributes().ReadOnly { if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for builder.IsReadOnly") t.Errorf("Expected true for mounter.IsReadOnly")
} }
} }
func TestBuilderAndCleanerTypeAssert(t *testing.T) { func TestMounterAndUnmounterTypeAssert(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("awsebsTest") tmpDir, err := utiltesting.MkTmpdir("awsebsTest")
if err != nil { if err != nil {
t.Fatalf("can't make a temp dir: %v", err) t.Fatalf("can't make a temp dir: %v", err)
@ -328,13 +328,13 @@ func TestBuilderAndCleanerTypeAssert(t *testing.T) {
}, },
} }
builder, err := plug.(*awsElasticBlockStorePlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) mounter, err := plug.(*awsElasticBlockStorePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
if _, ok := builder.(volume.Cleaner); ok { if _, ok := mounter.(volume.Unmounter); ok {
t.Errorf("Volume Builder can be type-assert to Cleaner") t.Errorf("Volume Mounter can be type-assert to Unmounter")
} }
cleaner, err := plug.(*awsElasticBlockStorePlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{}) unmounter, err := plug.(*awsElasticBlockStorePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{}, &mount.FakeMounter{})
if _, ok := cleaner.(volume.Builder); ok { if _, ok := unmounter.(volume.Mounter); ok {
t.Errorf("Volume Cleaner can be type-assert to Builder") t.Errorf("Volume Unmounter can be type-assert to Mounter")
} }
} }

View File

@ -48,7 +48,7 @@ type AWSDiskUtil struct{}
// Attaches a disk to the current kubelet. // Attaches a disk to the current kubelet.
// Mounts the disk to it's global path. // Mounts the disk to it's global path.
func (diskUtil *AWSDiskUtil) AttachAndMountDisk(b *awsElasticBlockStoreBuilder, globalPDPath string) error { func (diskUtil *AWSDiskUtil) AttachAndMountDisk(b *awsElasticBlockStoreMounter, globalPDPath string) error {
glog.V(5).Infof("AttachAndMountDisk(...) called for PD %q. Will block for existing operations, if any. (globalPDPath=%q)\r\n", b.volumeID, globalPDPath) glog.V(5).Infof("AttachAndMountDisk(...) called for PD %q. Will block for existing operations, if any. (globalPDPath=%q)\r\n", b.volumeID, globalPDPath)
// Block execution until any pending detach operations for this PD have completed // Block execution until any pending detach operations for this PD have completed
@ -95,7 +95,7 @@ func (diskUtil *AWSDiskUtil) AttachAndMountDisk(b *awsElasticBlockStoreBuilder,
} }
// Unmounts the device and detaches the disk from the kubelet's host machine. // Unmounts the device and detaches the disk from the kubelet's host machine.
func (util *AWSDiskUtil) DetachDisk(c *awsElasticBlockStoreCleaner) error { func (util *AWSDiskUtil) DetachDisk(c *awsElasticBlockStoreUnmounter) error {
glog.V(5).Infof("DetachDisk(...) for PD %q\r\n", c.volumeID) glog.V(5).Infof("DetachDisk(...) for PD %q\r\n", c.volumeID)
if err := unmountPDAndRemoveGlobalPath(c); err != nil { if err := unmountPDAndRemoveGlobalPath(c); err != nil {
@ -168,7 +168,7 @@ func (util *AWSDiskUtil) CreateVolume(c *awsElasticBlockStoreProvisioner) (strin
} }
// Attaches the specified persistent disk device to node, verifies that it is attached, and retries if it fails. // Attaches the specified persistent disk device to node, verifies that it is attached, and retries if it fails.
func attachDiskAndVerify(b *awsElasticBlockStoreBuilder, xvdBeforeSet sets.String) (string, error) { func attachDiskAndVerify(b *awsElasticBlockStoreMounter, xvdBeforeSet sets.String) (string, error) {
var awsCloud *aws.AWSCloud var awsCloud *aws.AWSCloud
var attachError error var attachError error
@ -236,7 +236,7 @@ func verifyDevicePath(devicePaths []string) (string, error) {
// Detaches the specified persistent disk device from node, verifies that it is detached, and retries if it fails. // Detaches the specified persistent disk device from node, verifies that it is detached, and retries if it fails.
// This function is intended to be called asynchronously as a go routine. // This function is intended to be called asynchronously as a go routine.
func detachDiskAndVerify(c *awsElasticBlockStoreCleaner) { func detachDiskAndVerify(c *awsElasticBlockStoreUnmounter) {
glog.V(5).Infof("detachDiskAndVerify(...) for pd %q. Will block for pending operations", c.volumeID) glog.V(5).Infof("detachDiskAndVerify(...) for pd %q. Will block for pending operations", c.volumeID)
defer runtime.HandleCrash() defer runtime.HandleCrash()
@ -295,7 +295,7 @@ func detachDiskAndVerify(c *awsElasticBlockStoreCleaner) {
} }
// Unmount the global PD mount, which should be the only one, and delete it. // Unmount the global PD mount, which should be the only one, and delete it.
func unmountPDAndRemoveGlobalPath(c *awsElasticBlockStoreCleaner) error { func unmountPDAndRemoveGlobalPath(c *awsElasticBlockStoreUnmounter) error {
globalPDPath := makeGlobalPDPath(c.plugin.host, c.volumeID) globalPDPath := makeGlobalPDPath(c.plugin.host, c.volumeID)
err := c.mounter.Unmount(globalPDPath) err := c.mounter.Unmount(globalPDPath)

View File

@ -68,11 +68,11 @@ func (plugin *azureFilePlugin) GetAccessModes() []api.PersistentVolumeAccessMode
} }
} }
func (plugin *azureFilePlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Builder, error) { func (plugin *azureFilePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newBuilderInternal(spec, pod, &azureSvc{}, plugin.host.GetMounter()) return plugin.newMounterInternal(spec, pod, &azureSvc{}, plugin.host.GetMounter())
} }
func (plugin *azureFilePlugin) newBuilderInternal(spec *volume.Spec, pod *api.Pod, util azureUtil, mounter mount.Interface) (volume.Builder, error) { func (plugin *azureFilePlugin) newMounterInternal(spec *volume.Spec, pod *api.Pod, util azureUtil, mounter mount.Interface) (volume.Mounter, error) {
var source *api.AzureFileVolumeSource var source *api.AzureFileVolumeSource
var readOnly bool var readOnly bool
if spec.Volume != nil && spec.Volume.AzureFile != nil { if spec.Volume != nil && spec.Volume.AzureFile != nil {
@ -82,7 +82,7 @@ func (plugin *azureFilePlugin) newBuilderInternal(spec *volume.Spec, pod *api.Po
source = spec.PersistentVolume.Spec.AzureFile source = spec.PersistentVolume.Spec.AzureFile
readOnly = spec.ReadOnly readOnly = spec.ReadOnly
} }
return &azureFileBuilder{ return &azureFileMounter{
azureFile: &azureFile{ azureFile: &azureFile{
volName: spec.Name(), volName: spec.Name(),
mounter: mounter, mounter: mounter,
@ -96,12 +96,12 @@ func (plugin *azureFilePlugin) newBuilderInternal(spec *volume.Spec, pod *api.Po
}, nil }, nil
} }
func (plugin *azureFilePlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) { func (plugin *azureFilePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newCleanerInternal(volName, podUID, plugin.host.GetMounter()) return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter())
} }
func (plugin *azureFilePlugin) newCleanerInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) { func (plugin *azureFilePlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {
return &azureFileCleaner{&azureFile{ return &azureFileUnmounter{&azureFile{
volName: volName, volName: volName,
mounter: mounter, mounter: mounter,
pod: &api.Pod{ObjectMeta: api.ObjectMeta{UID: podUID}}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{UID: podUID}},
@ -123,7 +123,7 @@ func (azureFileVolume *azureFile) GetPath() string {
return azureFileVolume.plugin.host.GetPodVolumeDir(azureFileVolume.pod.UID, strings.EscapeQualifiedNameForDisk(name), azureFileVolume.volName) return azureFileVolume.plugin.host.GetPodVolumeDir(azureFileVolume.pod.UID, strings.EscapeQualifiedNameForDisk(name), azureFileVolume.volName)
} }
type azureFileBuilder struct { type azureFileMounter struct {
*azureFile *azureFile
util azureUtil util azureUtil
secretName string secretName string
@ -131,9 +131,9 @@ type azureFileBuilder struct {
readOnly bool readOnly bool
} }
var _ volume.Builder = &azureFileBuilder{} var _ volume.Mounter = &azureFileMounter{}
func (b *azureFileBuilder) GetAttributes() volume.Attributes { func (b *azureFileMounter) GetAttributes() volume.Attributes {
return volume.Attributes{ return volume.Attributes{
ReadOnly: b.readOnly, ReadOnly: b.readOnly,
Managed: !b.readOnly, Managed: !b.readOnly,
@ -142,11 +142,11 @@ func (b *azureFileBuilder) GetAttributes() volume.Attributes {
} }
// SetUp attaches the disk and bind mounts to the volume path. // SetUp attaches the disk and bind mounts to the volume path.
func (b *azureFileBuilder) SetUp(fsGroup *int64) error { func (b *azureFileMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup) return b.SetUpAt(b.GetPath(), fsGroup)
} }
func (b *azureFileBuilder) SetUpAt(dir string, fsGroup *int64) error { func (b *azureFileMounter) SetUpAt(dir string, fsGroup *int64) error {
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("AzureFile mount set up: %s %v %v", dir, !notMnt, err) glog.V(4).Infof("AzureFile mount set up: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
@ -195,17 +195,17 @@ func (b *azureFileBuilder) SetUpAt(dir string, fsGroup *int64) error {
return nil return nil
} }
var _ volume.Cleaner = &azureFileCleaner{} var _ volume.Unmounter = &azureFileUnmounter{}
type azureFileCleaner struct { type azureFileUnmounter struct {
*azureFile *azureFile
} }
func (c *azureFileCleaner) TearDown() error { func (c *azureFileUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath()) return c.TearDownAt(c.GetPath())
} }
func (c *azureFileCleaner) TearDownAt(dir string) error { func (c *azureFileUnmounter) TearDownAt(dir string) error {
notMnt, err := c.mounter.IsLikelyNotMountPoint(dir) notMnt, err := c.mounter.IsLikelyNotMountPoint(dir)
if err != nil { if err != nil {
glog.Errorf("Error checking IsLikelyNotMountPoint: %v", err) glog.Errorf("Error checking IsLikelyNotMountPoint: %v", err)

View File

@ -105,20 +105,20 @@ func TestPlugin(t *testing.T) {
} }
fake := &mount.FakeMounter{} fake := &mount.FakeMounter{}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, err := plug.(*azureFilePlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), pod, &fakeAzureSvc{}, fake) mounter, err := plug.(*azureFilePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &fakeAzureSvc{}, fake)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~azure-file/vol1") volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~azure-file/vol1")
path := builder.GetPath() path := mounter.GetPath()
if path != volPath { if path != volPath {
t.Errorf("Got unexpected path: %s", path) t.Errorf("Got unexpected path: %s", path)
} }
if err := builder.SetUp(nil); err != nil { if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(path); err != nil { if _, err := os.Stat(path); err != nil {
@ -136,15 +136,15 @@ func TestPlugin(t *testing.T) {
} }
} }
cleaner, err := plug.(*azureFilePlugin).newCleanerInternal("vol1", types.UID("poduid"), &mount.FakeMounter{}) unmounter, err := plug.(*azureFilePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
if cleaner == nil { if unmounter == nil {
t.Errorf("Got a nil Cleaner") t.Errorf("Got a nil Unmounter")
} }
if err := cleaner.TearDown(); err != nil { if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(path); err == nil { if _, err := os.Stat(path); err == nil {
@ -188,13 +188,13 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost("/tmp/fake", client, nil)) plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost("/tmp/fake", client, nil))
plug, _ := plugMgr.FindPluginByName(azureFilePluginName) plug, _ := plugMgr.FindPluginByName(azureFilePluginName)
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true) spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !builder.GetAttributes().ReadOnly { if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for builder.IsReadOnly") t.Errorf("Expected true for mounter.IsReadOnly")
} }
} }
@ -204,7 +204,7 @@ func (s *fakeAzureSvc) GetAzureCredentials(host volume.VolumeHost, nameSpace, se
return "name", "key", nil return "name", "key", nil
} }
func TestBuilderAndCleanerTypeAssert(t *testing.T) { func TestMounterAndUnmounterTypeAssert(t *testing.T) {
tmpDir, err := ioutil.TempDir(os.TempDir(), "azurefileTest") tmpDir, err := ioutil.TempDir(os.TempDir(), "azurefileTest")
if err != nil { if err != nil {
t.Fatalf("can't make a temp dir: %v", err) t.Fatalf("can't make a temp dir: %v", err)
@ -228,13 +228,13 @@ func TestBuilderAndCleanerTypeAssert(t *testing.T) {
} }
fake := &mount.FakeMounter{} fake := &mount.FakeMounter{}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, err := plug.(*azureFilePlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), pod, &fakeAzureSvc{}, fake) mounter, err := plug.(*azureFilePlugin).newMounterInternal(volume.NewSpecFromVolume(spec), pod, &fakeAzureSvc{}, fake)
if _, ok := builder.(volume.Cleaner); ok { if _, ok := mounter.(volume.Unmounter); ok {
t.Errorf("Volume Builder can be type-assert to Cleaner") t.Errorf("Volume Mounter can be type-assert to Unmounter")
} }
cleaner, err := plug.(*azureFilePlugin).newCleanerInternal("vol1", types.UID("poduid"), &mount.FakeMounter{}) unmounter, err := plug.(*azureFilePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
if _, ok := cleaner.(volume.Builder); ok { if _, ok := unmounter.(volume.Mounter); ok {
t.Errorf("Volume Cleaner can be type-assert to Builder") t.Errorf("Volume Unmounter can be type-assert to Mounter")
} }
} }

View File

@ -65,7 +65,7 @@ func (plugin *cephfsPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
} }
} }
func (plugin *cephfsPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Builder, error) { func (plugin *cephfsPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
cephvs := plugin.getVolumeSource(spec) cephvs := plugin.getVolumeSource(spec)
secret := "" secret := ""
if cephvs.SecretRef != nil { if cephvs.SecretRef != nil {
@ -84,10 +84,10 @@ func (plugin *cephfsPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume
glog.V(1).Infof("found ceph secret info: %s", name) glog.V(1).Infof("found ceph secret info: %s", name)
} }
} }
return plugin.newBuilderInternal(spec, pod.UID, plugin.host.GetMounter(), secret) return plugin.newMounterInternal(spec, pod.UID, plugin.host.GetMounter(), secret)
} }
func (plugin *cephfsPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface, secret string) (volume.Builder, error) { func (plugin *cephfsPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, mounter mount.Interface, secret string) (volume.Mounter, error) {
cephvs := plugin.getVolumeSource(spec) cephvs := plugin.getVolumeSource(spec)
id := cephvs.User id := cephvs.User
if id == "" { if id == "" {
@ -105,7 +105,7 @@ func (plugin *cephfsPlugin) newBuilderInternal(spec *volume.Spec, podUID types.U
secret_file = "/etc/ceph/" + id + ".secret" secret_file = "/etc/ceph/" + id + ".secret"
} }
return &cephfsBuilder{ return &cephfsMounter{
cephfs: &cephfs{ cephfs: &cephfs{
podUID: podUID, podUID: podUID,
volName: spec.Name(), volName: spec.Name(),
@ -120,12 +120,12 @@ func (plugin *cephfsPlugin) newBuilderInternal(spec *volume.Spec, podUID types.U
}, nil }, nil
} }
func (plugin *cephfsPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) { func (plugin *cephfsPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newCleanerInternal(volName, podUID, plugin.host.GetMounter()) return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter())
} }
func (plugin *cephfsPlugin) newCleanerInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) { func (plugin *cephfsPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {
return &cephfsCleaner{ return &cephfsUnmounter{
cephfs: &cephfs{ cephfs: &cephfs{
podUID: podUID, podUID: podUID,
volName: volName, volName: volName,
@ -157,13 +157,13 @@ type cephfs struct {
volume.MetricsNil volume.MetricsNil
} }
type cephfsBuilder struct { type cephfsMounter struct {
*cephfs *cephfs
} }
var _ volume.Builder = &cephfsBuilder{} var _ volume.Mounter = &cephfsMounter{}
func (cephfsVolume *cephfsBuilder) GetAttributes() volume.Attributes { func (cephfsVolume *cephfsMounter) GetAttributes() volume.Attributes {
return volume.Attributes{ return volume.Attributes{
ReadOnly: cephfsVolume.readonly, ReadOnly: cephfsVolume.readonly,
Managed: false, Managed: false,
@ -172,12 +172,12 @@ func (cephfsVolume *cephfsBuilder) GetAttributes() volume.Attributes {
} }
// SetUp attaches the disk and bind mounts to the volume path. // SetUp attaches the disk and bind mounts to the volume path.
func (cephfsVolume *cephfsBuilder) SetUp(fsGroup *int64) error { func (cephfsVolume *cephfsMounter) SetUp(fsGroup *int64) error {
return cephfsVolume.SetUpAt(cephfsVolume.GetPath(), fsGroup) return cephfsVolume.SetUpAt(cephfsVolume.GetPath(), fsGroup)
} }
// SetUpAt attaches the disk and bind mounts to the volume path. // SetUpAt attaches the disk and bind mounts to the volume path.
func (cephfsVolume *cephfsBuilder) SetUpAt(dir string, fsGroup *int64) error { func (cephfsVolume *cephfsMounter) SetUpAt(dir string, fsGroup *int64) error {
notMnt, err := cephfsVolume.mounter.IsLikelyNotMountPoint(dir) notMnt, err := cephfsVolume.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("CephFS mount set up: %s %v %v", dir, !notMnt, err) glog.V(4).Infof("CephFS mount set up: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
@ -199,19 +199,19 @@ func (cephfsVolume *cephfsBuilder) SetUpAt(dir string, fsGroup *int64) error {
return err return err
} }
type cephfsCleaner struct { type cephfsUnmounter struct {
*cephfs *cephfs
} }
var _ volume.Cleaner = &cephfsCleaner{} var _ volume.Unmounter = &cephfsUnmounter{}
// TearDown unmounts the bind mount // TearDown unmounts the bind mount
func (cephfsVolume *cephfsCleaner) TearDown() error { func (cephfsVolume *cephfsUnmounter) TearDown() error {
return cephfsVolume.TearDownAt(cephfsVolume.GetPath()) return cephfsVolume.TearDownAt(cephfsVolume.GetPath())
} }
// TearDownAt unmounts the bind mount // TearDownAt unmounts the bind mount
func (cephfsVolume *cephfsCleaner) TearDownAt(dir string) error { func (cephfsVolume *cephfsUnmounter) TearDownAt(dir string) error {
return cephfsVolume.cleanup(dir) return cephfsVolume.cleanup(dir)
} }

View File

@ -76,20 +76,20 @@ func TestPlugin(t *testing.T) {
}, },
} }
builder, err := plug.(*cephfsPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}, "secrets") mounter, err := plug.(*cephfsPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &mount.FakeMounter{}, "secrets")
volumePath := builder.GetPath() volumePath := mounter.GetPath()
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
volpath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~cephfs/vol1") volpath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~cephfs/vol1")
path := builder.GetPath() path := mounter.GetPath()
if path != volpath { if path != volpath {
t.Errorf("Got unexpected path: %s", path) t.Errorf("Got unexpected path: %s", path)
} }
if err := builder.SetUp(nil); err != nil { if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(volumePath); err != nil { if _, err := os.Stat(volumePath); err != nil {
@ -99,14 +99,14 @@ func TestPlugin(t *testing.T) {
t.Errorf("SetUp() failed: %v", err) t.Errorf("SetUp() failed: %v", err)
} }
} }
cleaner, err := plug.(*cephfsPlugin).newCleanerInternal("vol1", types.UID("poduid"), &mount.FakeMounter{}) unmounter, err := plug.(*cephfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
if cleaner == nil { if unmounter == nil {
t.Errorf("Got a nil Cleaner") t.Errorf("Got a nil Unmounter")
} }
if err := cleaner.TearDown(); err != nil { if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(volumePath); err == nil { if _, err := os.Stat(volumePath); err == nil {

View File

@ -74,11 +74,11 @@ func (plugin *cinderPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
} }
} }
func (plugin *cinderPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Builder, error) { func (plugin *cinderPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newBuilderInternal(spec, pod.UID, &CinderDiskUtil{}, plugin.host.GetMounter()) return plugin.newMounterInternal(spec, pod.UID, &CinderDiskUtil{}, plugin.host.GetMounter())
} }
func (plugin *cinderPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Builder, error) { func (plugin *cinderPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Mounter, error) {
var cinder *api.CinderVolumeSource var cinder *api.CinderVolumeSource
if spec.Volume != nil && spec.Volume.Cinder != nil { if spec.Volume != nil && spec.Volume.Cinder != nil {
cinder = spec.Volume.Cinder cinder = spec.Volume.Cinder
@ -90,7 +90,7 @@ func (plugin *cinderPlugin) newBuilderInternal(spec *volume.Spec, podUID types.U
fsType := cinder.FSType fsType := cinder.FSType
readOnly := cinder.ReadOnly readOnly := cinder.ReadOnly
return &cinderVolumeBuilder{ return &cinderVolumeMounter{
cinderVolume: &cinderVolume{ cinderVolume: &cinderVolume{
podUID: podUID, podUID: podUID,
volName: spec.Name(), volName: spec.Name(),
@ -104,12 +104,12 @@ func (plugin *cinderPlugin) newBuilderInternal(spec *volume.Spec, podUID types.U
blockDeviceMounter: &mount.SafeFormatAndMount{mounter, exec.New()}}, nil blockDeviceMounter: &mount.SafeFormatAndMount{mounter, exec.New()}}, nil
} }
func (plugin *cinderPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) { func (plugin *cinderPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newCleanerInternal(volName, podUID, &CinderDiskUtil{}, plugin.host.GetMounter()) return plugin.newUnmounterInternal(volName, podUID, &CinderDiskUtil{}, plugin.host.GetMounter())
} }
func (plugin *cinderPlugin) newCleanerInternal(volName string, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Cleaner, error) { func (plugin *cinderPlugin) newUnmounterInternal(volName string, podUID types.UID, manager cdManager, mounter mount.Interface) (volume.Unmounter, error) {
return &cinderVolumeCleaner{ return &cinderVolumeUnmounter{
&cinderVolume{ &cinderVolume{
podUID: podUID, podUID: podUID,
volName: volName, volName: volName,
@ -170,18 +170,18 @@ func (plugin *cinderPlugin) getCloudProvider() (*openstack.OpenStack, error) {
// Abstract interface to PD operations. // Abstract interface to PD operations.
type cdManager interface { type cdManager interface {
// Attaches the disk to the kubelet's host machine. // Attaches the disk to the kubelet's host machine.
AttachDisk(builder *cinderVolumeBuilder, globalPDPath string) error AttachDisk(mounter *cinderVolumeMounter, globalPDPath string) error
// Detaches the disk from the kubelet's host machine. // Detaches the disk from the kubelet's host machine.
DetachDisk(cleaner *cinderVolumeCleaner) error DetachDisk(unmounter *cinderVolumeUnmounter) error
// Creates a volume // Creates a volume
CreateVolume(provisioner *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, err error) CreateVolume(provisioner *cinderVolumeProvisioner) (volumeID string, volumeSizeGB int, err error)
// Deletes a volume // Deletes a volume
DeleteVolume(deleter *cinderVolumeDeleter) error DeleteVolume(deleter *cinderVolumeDeleter) error
} }
var _ volume.Builder = &cinderVolumeBuilder{} var _ volume.Mounter = &cinderVolumeMounter{}
type cinderVolumeBuilder struct { type cinderVolumeMounter struct {
*cinderVolume *cinderVolume
fsType string fsType string
readOnly bool readOnly bool
@ -212,13 +212,13 @@ type cinderVolume struct {
} }
func detachDiskLogError(cd *cinderVolume) { func detachDiskLogError(cd *cinderVolume) {
err := cd.manager.DetachDisk(&cinderVolumeCleaner{cd}) err := cd.manager.DetachDisk(&cinderVolumeUnmounter{cd})
if err != nil { if err != nil {
glog.Warningf("Failed to detach disk: %v (%v)", cd, err) glog.Warningf("Failed to detach disk: %v (%v)", cd, err)
} }
} }
func (b *cinderVolumeBuilder) GetAttributes() volume.Attributes { func (b *cinderVolumeMounter) GetAttributes() volume.Attributes {
return volume.Attributes{ return volume.Attributes{
ReadOnly: b.readOnly, ReadOnly: b.readOnly,
Managed: !b.readOnly, Managed: !b.readOnly,
@ -226,12 +226,12 @@ func (b *cinderVolumeBuilder) GetAttributes() volume.Attributes {
} }
} }
func (b *cinderVolumeBuilder) SetUp(fsGroup *int64) error { func (b *cinderVolumeMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup) return b.SetUpAt(b.GetPath(), fsGroup)
} }
// SetUp attaches the disk and bind mounts to the volume path. // SetUp attaches the disk and bind mounts to the volume path.
func (b *cinderVolumeBuilder) SetUpAt(dir string, fsGroup *int64) error { func (b *cinderVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
glog.V(5).Infof("Cinder SetUp %s to %s", b.pdName, dir) glog.V(5).Infof("Cinder SetUp %s to %s", b.pdName, dir)
b.plugin.volumeLocks.LockKey(b.pdName) b.plugin.volumeLocks.LockKey(b.pdName)
@ -314,19 +314,19 @@ func (cd *cinderVolume) GetPath() string {
return cd.plugin.host.GetPodVolumeDir(cd.podUID, strings.EscapeQualifiedNameForDisk(name), cd.volName) return cd.plugin.host.GetPodVolumeDir(cd.podUID, strings.EscapeQualifiedNameForDisk(name), cd.volName)
} }
type cinderVolumeCleaner struct { type cinderVolumeUnmounter struct {
*cinderVolume *cinderVolume
} }
var _ volume.Cleaner = &cinderVolumeCleaner{} var _ volume.Unmounter = &cinderVolumeUnmounter{}
func (c *cinderVolumeCleaner) TearDown() error { func (c *cinderVolumeUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath()) return c.TearDownAt(c.GetPath())
} }
// Unmounts the bind mount, and detaches the disk only if the PD // Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet. // resource was the last reference to that disk on the kubelet.
func (c *cinderVolumeCleaner) TearDownAt(dir string) error { func (c *cinderVolumeUnmounter) TearDownAt(dir string) error {
glog.V(5).Infof("Cinder TearDown of %s", dir) glog.V(5).Infof("Cinder TearDown of %s", dir)
notmnt, err := c.mounter.IsLikelyNotMountPoint(dir) notmnt, err := c.mounter.IsLikelyNotMountPoint(dir)
if err != nil { if err != nil {
@ -339,8 +339,8 @@ func (c *cinderVolumeCleaner) TearDownAt(dir string) error {
} }
// Find Cinder volumeID to lock the right volume // Find Cinder volumeID to lock the right volume
// TODO: refactor VolumePlugin.NewCleaner to get full volume.Spec just like // TODO: refactor VolumePlugin.NewUnmounter to get full volume.Spec just like
// NewBuilder. We could then find volumeID there without probing MountRefs. // NewMounter. We could then find volumeID there without probing MountRefs.
refs, err := mount.GetMountRefs(c.mounter, dir) refs, err := mount.GetMountRefs(c.mounter, dir)
if err != nil { if err != nil {
glog.V(4).Infof("GetMountRefs failed: %v", err) glog.V(4).Infof("GetMountRefs failed: %v", err)

View File

@ -73,7 +73,7 @@ func getFakeDeviceName(host volume.VolumeHost, pdName string) string {
// Real Cinder AttachDisk attaches a cinder volume. If it is not yet mounted, // Real Cinder AttachDisk attaches a cinder volume. If it is not yet mounted,
// it mounts it it to globalPDPath. // it mounts it it to globalPDPath.
// We create a dummy directory (="device") and bind-mount it to globalPDPath // We create a dummy directory (="device") and bind-mount it to globalPDPath
func (fake *fakePDManager) AttachDisk(b *cinderVolumeBuilder, globalPDPath string) error { func (fake *fakePDManager) AttachDisk(b *cinderVolumeMounter, globalPDPath string) error {
globalPath := makeGlobalPDName(b.plugin.host, b.pdName) globalPath := makeGlobalPDName(b.plugin.host, b.pdName)
fakeDeviceName := getFakeDeviceName(b.plugin.host, b.pdName) fakeDeviceName := getFakeDeviceName(b.plugin.host, b.pdName)
err := os.MkdirAll(fakeDeviceName, 0750) err := os.MkdirAll(fakeDeviceName, 0750)
@ -104,7 +104,7 @@ func (fake *fakePDManager) AttachDisk(b *cinderVolumeBuilder, globalPDPath strin
return nil return nil
} }
func (fake *fakePDManager) DetachDisk(c *cinderVolumeCleaner) error { func (fake *fakePDManager) DetachDisk(c *cinderVolumeUnmounter) error {
globalPath := makeGlobalPDName(c.plugin.host, c.pdName) globalPath := makeGlobalPDName(c.plugin.host, c.pdName)
fakeDeviceName := getFakeDeviceName(c.plugin.host, c.pdName) fakeDeviceName := getFakeDeviceName(c.plugin.host, c.pdName)
// unmount the bind-mount - should be fast // unmount the bind-mount - should be fast
@ -154,20 +154,20 @@ func TestPlugin(t *testing.T) {
}, },
}, },
} }
builder, err := plug.(*cinderPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{0}, &mount.FakeMounter{}) mounter, err := plug.(*cinderPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{0}, &mount.FakeMounter{})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~cinder/vol1") volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~cinder/vol1")
path := builder.GetPath() path := mounter.GetPath()
if path != volPath { if path != volPath {
t.Errorf("Got unexpected path: %s", path) t.Errorf("Got unexpected path: %s", path)
} }
if err := builder.SetUp(nil); err != nil { if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(path); err != nil { if _, err := os.Stat(path); err != nil {
@ -185,15 +185,15 @@ func TestPlugin(t *testing.T) {
} }
} }
cleaner, err := plug.(*cinderPlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakePDManager{0}, &mount.FakeMounter{}) unmounter, err := plug.(*cinderPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{0}, &mount.FakeMounter{})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
if cleaner == nil { if unmounter == nil {
t.Errorf("Got a nil Cleaner") t.Errorf("Got a nil Unmounter")
} }
if err := cleaner.TearDown(); err != nil { if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(path); err == nil { if _, err := os.Stat(path); err == nil {
@ -270,54 +270,54 @@ func TestAttachDetachRace(t *testing.T) {
} }
fakeMounter := &mount.FakeMounter{} fakeMounter := &mount.FakeMounter{}
// SetUp the volume for 1st time // SetUp the volume for 1st time
builder, err := plug.(*cinderPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{time.Second}, fakeMounter) mounter, err := plug.(*cinderPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakePDManager{time.Second}, fakeMounter)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
if err := builder.SetUp(nil); err != nil { if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
path := builder.GetPath() path := mounter.GetPath()
// TearDown the 1st volume and SetUp the 2nd volume (to different pod) at the same time // TearDown the 1st volume and SetUp the 2nd volume (to different pod) at the same time
builder, err = plug.(*cinderPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid2"), &fakePDManager{time.Second}, fakeMounter) mounter, err = plug.(*cinderPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid2"), &fakePDManager{time.Second}, fakeMounter)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
cleaner, err := plug.(*cinderPlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakePDManager{time.Second}, fakeMounter) unmounter, err := plug.(*cinderPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &fakePDManager{time.Second}, fakeMounter)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
var buildComplete uint32 = 0 var buildComplete uint32 = 0
go func() { go func() {
glog.Infof("Attaching volume") glog.Infof("Attaching volume")
if err := builder.SetUp(nil); err != nil { if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
glog.Infof("Volume attached") glog.Infof("Volume attached")
atomic.AddUint32(&buildComplete, 1) atomic.AddUint32(&buildComplete, 1)
}() }()
// builder is attaching the volume, which takes 1 second. Detach it in the middle of this interval // mounter is attaching the volume, which takes 1 second. Detach it in the middle of this interval
time.Sleep(time.Second / 2) time.Sleep(time.Second / 2)
glog.Infof("Detaching volume") glog.Infof("Detaching volume")
if err = cleaner.TearDown(); err != nil { if err = unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
glog.Infof("Volume detached") glog.Infof("Volume detached")
// wait for the builder to finish // wait for the mounter to finish
for atomic.LoadUint32(&buildComplete) == 0 { for atomic.LoadUint32(&buildComplete) == 0 {
time.Sleep(time.Millisecond * 100) time.Sleep(time.Millisecond * 100)
} }
@ -333,15 +333,15 @@ func TestAttachDetachRace(t *testing.T) {
} }
// TearDown the 2nd volume // TearDown the 2nd volume
cleaner, err = plug.(*cinderPlugin).newCleanerInternal("vol1", types.UID("poduid2"), &fakePDManager{0}, fakeMounter) unmounter, err = plug.(*cinderPlugin).newUnmounterInternal("vol1", types.UID("poduid2"), &fakePDManager{0}, fakeMounter)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
if cleaner == nil { if unmounter == nil {
t.Errorf("Got a nil Cleaner") t.Errorf("Got a nil Unmounter")
} }
if err := cleaner.TearDown(); err != nil { if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(path); err == nil { if _, err := os.Stat(path); err == nil {

View File

@ -33,7 +33,7 @@ type CinderDiskUtil struct{}
// Attaches a disk specified by a volume.CinderPersistenDisk to the current kubelet. // Attaches a disk specified by a volume.CinderPersistenDisk to the current kubelet.
// Mounts the disk to it's global path. // Mounts the disk to it's global path.
func (util *CinderDiskUtil) AttachDisk(b *cinderVolumeBuilder, globalPDPath string) error { func (util *CinderDiskUtil) AttachDisk(b *cinderVolumeMounter, globalPDPath string) error {
options := []string{} options := []string{}
if b.readOnly { if b.readOnly {
options = append(options, "ro") options = append(options, "ro")
@ -105,7 +105,7 @@ func makeDevicePath(diskid string) string {
} }
// Unmounts the device and detaches the disk from the kubelet's host machine. // Unmounts the device and detaches the disk from the kubelet's host machine.
func (util *CinderDiskUtil) DetachDisk(cd *cinderVolumeCleaner) error { func (util *CinderDiskUtil) DetachDisk(cd *cinderVolumeUnmounter) error {
globalPDPath := makeGlobalPDName(cd.plugin.host, cd.pdName) globalPDPath := makeGlobalPDName(cd.plugin.host, cd.pdName)
if err := cd.mounter.Unmount(globalPDPath); err != nil { if err := cd.mounter.Unmount(globalPDPath); err != nil {
return err return err

View File

@ -58,16 +58,16 @@ func (plugin *configMapPlugin) CanSupport(spec *volume.Spec) bool {
return spec.Volume != nil && spec.Volume.ConfigMap != nil return spec.Volume != nil && spec.Volume.ConfigMap != nil
} }
func (plugin *configMapPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) { func (plugin *configMapPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return &configMapVolumeBuilder{ return &configMapVolumeMounter{
configMapVolume: &configMapVolume{spec.Name(), pod.UID, plugin, plugin.host.GetMounter(), plugin.host.GetWriter(), volume.MetricsNil{}}, configMapVolume: &configMapVolume{spec.Name(), pod.UID, plugin, plugin.host.GetMounter(), plugin.host.GetWriter(), volume.MetricsNil{}},
source: *spec.Volume.ConfigMap, source: *spec.Volume.ConfigMap,
pod: *pod, pod: *pod,
opts: &opts}, nil opts: &opts}, nil
} }
func (plugin *configMapPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) { func (plugin *configMapPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return &configMapVolumeCleaner{&configMapVolume{volName, podUID, plugin, plugin.host.GetMounter(), plugin.host.GetWriter(), volume.MetricsNil{}}}, nil return &configMapVolumeUnmounter{&configMapVolume{volName, podUID, plugin, plugin.host.GetMounter(), plugin.host.GetWriter(), volume.MetricsNil{}}}, nil
} }
type configMapVolume struct { type configMapVolume struct {
@ -85,9 +85,9 @@ func (sv *configMapVolume) GetPath() string {
return sv.plugin.host.GetPodVolumeDir(sv.podUID, strings.EscapeQualifiedNameForDisk(configMapPluginName), sv.volName) return sv.plugin.host.GetPodVolumeDir(sv.podUID, strings.EscapeQualifiedNameForDisk(configMapPluginName), sv.volName)
} }
// configMapVolumeBuilder handles retrieving secrets from the API server // configMapVolumeMounter handles retrieving secrets from the API server
// and placing them into the volume on the host. // and placing them into the volume on the host.
type configMapVolumeBuilder struct { type configMapVolumeMounter struct {
*configMapVolume *configMapVolume
source api.ConfigMapVolumeSource source api.ConfigMapVolumeSource
@ -95,7 +95,7 @@ type configMapVolumeBuilder struct {
opts *volume.VolumeOptions opts *volume.VolumeOptions
} }
var _ volume.Builder = &configMapVolumeBuilder{} var _ volume.Mounter = &configMapVolumeMounter{}
func (sv *configMapVolume) GetAttributes() volume.Attributes { func (sv *configMapVolume) GetAttributes() volume.Attributes {
return volume.Attributes{ return volume.Attributes{
@ -110,15 +110,15 @@ var wrappedVolumeSpec = volume.Spec{
Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}}}, Volume: &api.Volume{VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}}},
} }
func (b *configMapVolumeBuilder) SetUp(fsGroup *int64) error { func (b *configMapVolumeMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup) return b.SetUpAt(b.GetPath(), fsGroup)
} }
func (b *configMapVolumeBuilder) SetUpAt(dir string, fsGroup *int64) error { func (b *configMapVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir) glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir)
// Wrap EmptyDir, let it do the setup. // Wrap EmptyDir, let it do the setup.
wrapped, err := b.plugin.host.NewWrapperBuilder(b.volName, wrappedVolumeSpec, &b.pod, *b.opts) wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec, &b.pod, *b.opts)
if err != nil { if err != nil {
return err return err
} }
@ -202,22 +202,22 @@ func totalBytes(configMap *api.ConfigMap) int {
return totalSize return totalSize
} }
// configMapVolumeCleaner handles cleaning up configMap volumes. // configMapVolumeUnmounter handles cleaning up configMap volumes.
type configMapVolumeCleaner struct { type configMapVolumeUnmounter struct {
*configMapVolume *configMapVolume
} }
var _ volume.Cleaner = &configMapVolumeCleaner{} var _ volume.Unmounter = &configMapVolumeUnmounter{}
func (c *configMapVolumeCleaner) TearDown() error { func (c *configMapVolumeUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath()) return c.TearDownAt(c.GetPath())
} }
func (c *configMapVolumeCleaner) TearDownAt(dir string) error { func (c *configMapVolumeUnmounter) TearDownAt(dir string) error {
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir) glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
// Wrap EmptyDir, let it do the teardown. // Wrap EmptyDir, let it do the teardown.
wrapped, err := c.plugin.host.NewWrapperCleaner(c.volName, wrappedVolumeSpec, c.podUID) wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec, c.podUID)
if err != nil { if err != nil {
return err return err
} }

View File

@ -229,21 +229,21 @@ func TestPlugin(t *testing.T) {
} }
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}}
builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
volumePath := builder.GetPath() volumePath := mounter.GetPath()
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~configmap/test_volume_name")) { if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~configmap/test_volume_name")) {
t.Errorf("Got unexpected path: %s", volumePath) t.Errorf("Got unexpected path: %s", volumePath)
} }
fsGroup := int64(1001) fsGroup := int64(1001)
err = builder.SetUp(&fsGroup) err = mounter.SetUp(&fsGroup)
if err != nil { if err != nil {
t.Errorf("Failed to setup volume: %v", err) t.Errorf("Failed to setup volume: %v", err)
} }
@ -284,23 +284,23 @@ func TestPluginReboot(t *testing.T) {
} }
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}}
builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid3/plugins/kubernetes.io~configmap/test_volume_name", rootDir) podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid3/plugins/kubernetes.io~configmap/test_volume_name", rootDir)
util.SetReady(podMetadataDir) util.SetReady(podMetadataDir)
volumePath := builder.GetPath() volumePath := mounter.GetPath()
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid3/volumes/kubernetes.io~configmap/test_volume_name")) { if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid3/volumes/kubernetes.io~configmap/test_volume_name")) {
t.Errorf("Got unexpected path: %s", volumePath) t.Errorf("Got unexpected path: %s", volumePath)
} }
fsGroup := int64(1001) fsGroup := int64(1001)
err = builder.SetUp(&fsGroup) err = mounter.SetUp(&fsGroup)
if err != nil { if err != nil {
t.Errorf("Failed to setup volume: %v", err) t.Errorf("Failed to setup volume: %v", err)
} }
@ -362,15 +362,15 @@ func doTestConfigMapDataInVolume(volumePath string, configMap api.ConfigMap, t *
} }
func doTestCleanAndTeardown(plugin volume.VolumePlugin, podUID types.UID, testVolumeName, volumePath string, t *testing.T) { func doTestCleanAndTeardown(plugin volume.VolumePlugin, podUID types.UID, testVolumeName, volumePath string, t *testing.T) {
cleaner, err := plugin.NewCleaner(testVolumeName, podUID) unmounter, err := plugin.NewUnmounter(testVolumeName, podUID)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
if cleaner == nil { if unmounter == nil {
t.Errorf("Got a nil Cleaner") t.Errorf("Got a nil Unmounter")
} }
if err := cleaner.TearDown(); err != nil { if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(volumePath); err == nil { if _, err := os.Stat(volumePath); err == nil {

View File

@ -68,7 +68,7 @@ func (plugin *downwardAPIPlugin) CanSupport(spec *volume.Spec) bool {
return spec.Volume != nil && spec.Volume.DownwardAPI != nil return spec.Volume != nil && spec.Volume.DownwardAPI != nil
} }
func (plugin *downwardAPIPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) { func (plugin *downwardAPIPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
v := &downwardAPIVolume{ v := &downwardAPIVolume{
volName: spec.Name(), volName: spec.Name(),
pod: pod, pod: pod,
@ -79,14 +79,14 @@ func (plugin *downwardAPIPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opt
for _, fileInfo := range spec.Volume.DownwardAPI.Items { for _, fileInfo := range spec.Volume.DownwardAPI.Items {
v.fieldReferenceFileNames[fileInfo.FieldRef.FieldPath] = path.Clean(fileInfo.Path) v.fieldReferenceFileNames[fileInfo.FieldRef.FieldPath] = path.Clean(fileInfo.Path)
} }
return &downwardAPIVolumeBuilder{ return &downwardAPIVolumeMounter{
downwardAPIVolume: v, downwardAPIVolume: v,
opts: &opts, opts: &opts,
}, nil }, nil
} }
func (plugin *downwardAPIPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) { func (plugin *downwardAPIPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return &downwardAPIVolumeCleaner{ return &downwardAPIVolumeUnmounter{
&downwardAPIVolume{ &downwardAPIVolume{
volName: volName, volName: volName,
podUID: podUID, podUID: podUID,
@ -100,20 +100,20 @@ type downwardAPIVolume struct {
volName string volName string
fieldReferenceFileNames map[string]string fieldReferenceFileNames map[string]string
pod *api.Pod pod *api.Pod
podUID types.UID // TODO: remove this redundancy as soon NewCleaner func will have *api.POD and not only types.UID podUID types.UID // TODO: remove this redundancy as soon NewUnmounter func will have *api.POD and not only types.UID
plugin *downwardAPIPlugin plugin *downwardAPIPlugin
volume.MetricsNil volume.MetricsNil
} }
// downwardAPIVolumeBuilder fetches info from downward API from the pod // downwardAPIVolumeMounter fetches info from downward API from the pod
// and dumps it in files // and dumps it in files
type downwardAPIVolumeBuilder struct { type downwardAPIVolumeMounter struct {
*downwardAPIVolume *downwardAPIVolume
opts *volume.VolumeOptions opts *volume.VolumeOptions
} }
// downwardAPIVolumeBuilder implements volume.Builder interface // downwardAPIVolumeMounter implements volume.Mounter interface
var _ volume.Builder = &downwardAPIVolumeBuilder{} var _ volume.Mounter = &downwardAPIVolumeMounter{}
// downward API volumes are always ReadOnlyManaged // downward API volumes are always ReadOnlyManaged
func (d *downwardAPIVolume) GetAttributes() volume.Attributes { func (d *downwardAPIVolume) GetAttributes() volume.Attributes {
@ -128,14 +128,14 @@ func (d *downwardAPIVolume) GetAttributes() volume.Attributes {
// This function is not idempotent by design. We want the data to be refreshed periodically. // This function is not idempotent by design. We want the data to be refreshed periodically.
// The internal sync interval of kubelet will drive the refresh of data. // The internal sync interval of kubelet will drive the refresh of data.
// TODO: Add volume specific ticker and refresh loop // TODO: Add volume specific ticker and refresh loop
func (b *downwardAPIVolumeBuilder) SetUp(fsGroup *int64) error { func (b *downwardAPIVolumeMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup) return b.SetUpAt(b.GetPath(), fsGroup)
} }
func (b *downwardAPIVolumeBuilder) SetUpAt(dir string, fsGroup *int64) error { func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
glog.V(3).Infof("Setting up a downwardAPI volume %v for pod %v/%v at %v", b.volName, b.pod.Namespace, b.pod.Name, dir) glog.V(3).Infof("Setting up a downwardAPI volume %v for pod %v/%v at %v", b.volName, b.pod.Namespace, b.pod.Name, dir)
// Wrap EmptyDir. Here we rely on the idempotency of the wrapped plugin to avoid repeatedly mounting // Wrap EmptyDir. Here we rely on the idempotency of the wrapped plugin to avoid repeatedly mounting
wrapped, err := b.plugin.host.NewWrapperBuilder(b.volName, wrappedVolumeSpec, b.pod, *b.opts) wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec, b.pod, *b.opts)
if err != nil { if err != nil {
glog.Errorf("Couldn't setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error()) glog.Errorf("Couldn't setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
return err return err
@ -357,28 +357,28 @@ func (d *downwardAPIVolume) GetPath() string {
} }
// downwardAPIVolumeCleander handles cleaning up downwardAPI volumes // downwardAPIVolumeCleander handles cleaning up downwardAPI volumes
type downwardAPIVolumeCleaner struct { type downwardAPIVolumeUnmounter struct {
*downwardAPIVolume *downwardAPIVolume
} }
// downwardAPIVolumeCleaner implements volume.Cleaner interface // downwardAPIVolumeUnmounter implements volume.Unmounter interface
var _ volume.Cleaner = &downwardAPIVolumeCleaner{} var _ volume.Unmounter = &downwardAPIVolumeUnmounter{}
func (c *downwardAPIVolumeCleaner) TearDown() error { func (c *downwardAPIVolumeUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath()) return c.TearDownAt(c.GetPath())
} }
func (c *downwardAPIVolumeCleaner) TearDownAt(dir string) error { func (c *downwardAPIVolumeUnmounter) TearDownAt(dir string) error {
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir) glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
// Wrap EmptyDir, let it do the teardown. // Wrap EmptyDir, let it do the teardown.
wrapped, err := c.plugin.host.NewWrapperCleaner(c.volName, wrappedVolumeSpec, c.podUID) wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec, c.podUID)
if err != nil { if err != nil {
return err return err
} }
return wrapped.TearDownAt(dir) return wrapped.TearDownAt(dir)
} }
func (b *downwardAPIVolumeBuilder) getMetaDir() string { func (b *downwardAPIVolumeMounter) getMetaDir() string {
return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedNameForDisk(downwardAPIPluginName)), b.volName) return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedNameForDisk(downwardAPIPluginName)), b.volName)
} }

View File

@ -65,15 +65,15 @@ func TestCanSupport(t *testing.T) {
} }
func CleanEverything(plugin volume.VolumePlugin, testVolumeName, volumePath string, testPodUID types.UID, t *testing.T) { func CleanEverything(plugin volume.VolumePlugin, testVolumeName, volumePath string, testPodUID types.UID, t *testing.T) {
cleaner, err := plugin.NewCleaner(testVolumeName, testPodUID) unmounter, err := plugin.NewUnmounter(testVolumeName, testPodUID)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
if cleaner == nil { if unmounter == nil {
t.Errorf("Got a nil Cleaner") t.Errorf("Got a nil Unmounter")
} }
if err := cleaner.TearDown(); err != nil { if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(volumePath); err == nil { if _, err := os.Stat(volumePath); err == nil {
@ -121,18 +121,18 @@ func TestLabels(t *testing.T) {
t.Errorf("Can't find the plugin by name") t.Errorf("Can't find the plugin by name")
} }
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Labels: labels}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Labels: labels}}
builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
volumePath := builder.GetPath() volumePath := mounter.GetPath()
err = builder.SetUp(nil) err = mounter.SetUp(nil)
if err != nil { if err != nil {
t.Errorf("Failed to setup volume: %v", err) t.Errorf("Failed to setup volume: %v", err)
} }
@ -199,17 +199,17 @@ func TestAnnotations(t *testing.T) {
t.Errorf("Can't find the plugin by name") t.Errorf("Can't find the plugin by name")
} }
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Annotations: annotations}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Annotations: annotations}}
builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
volumePath := builder.GetPath() volumePath := mounter.GetPath()
err = builder.SetUp(nil) err = mounter.SetUp(nil)
if err != nil { if err != nil {
t.Errorf("Failed to setup volume: %v", err) t.Errorf("Failed to setup volume: %v", err)
} }
@ -261,17 +261,17 @@ func TestName(t *testing.T) {
t.Errorf("Can't find the plugin by name") t.Errorf("Can't find the plugin by name")
} }
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Name: testName}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Name: testName}}
builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
volumePath := builder.GetPath() volumePath := mounter.GetPath()
err = builder.SetUp(nil) err = mounter.SetUp(nil)
if err != nil { if err != nil {
t.Errorf("Failed to setup volume: %v", err) t.Errorf("Failed to setup volume: %v", err)
} }
@ -324,17 +324,17 @@ func TestNamespace(t *testing.T) {
t.Errorf("Can't find the plugin by name") t.Errorf("Can't find the plugin by name")
} }
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Namespace: testNamespace}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Namespace: testNamespace}}
builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
volumePath := builder.GetPath() volumePath := mounter.GetPath()
err = builder.SetUp(nil) err = mounter.SetUp(nil)
if err != nil { if err != nil {
t.Errorf("Failed to setup volume: %v", err) t.Errorf("Failed to setup volume: %v", err)
} }
@ -389,17 +389,17 @@ func TestWriteTwiceNoUpdate(t *testing.T) {
t.Errorf("Can't find the plugin by name") t.Errorf("Can't find the plugin by name")
} }
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Labels: labels}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Labels: labels}}
builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
volumePath := builder.GetPath() volumePath := mounter.GetPath()
err = builder.SetUp(nil) err = mounter.SetUp(nil)
if err != nil { if err != nil {
t.Errorf("Failed to setup volume: %v", err) t.Errorf("Failed to setup volume: %v", err)
} }
@ -410,7 +410,7 @@ func TestWriteTwiceNoUpdate(t *testing.T) {
t.Errorf(".current should be a link... %s\n", err.Error()) t.Errorf(".current should be a link... %s\n", err.Error())
} }
err = builder.SetUp(nil) // now re-run Setup err = mounter.SetUp(nil) // now re-run Setup
if err != nil { if err != nil {
t.Errorf("Failed to re-setup volume: %v", err) t.Errorf("Failed to re-setup volume: %v", err)
} }
@ -475,17 +475,17 @@ func TestWriteTwiceWithUpdate(t *testing.T) {
t.Errorf("Can't find the plugin by name") t.Errorf("Can't find the plugin by name")
} }
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Labels: labels}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Labels: labels}}
builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
volumePath := builder.GetPath() volumePath := mounter.GetPath()
err = builder.SetUp(nil) err = mounter.SetUp(nil)
if err != nil { if err != nil {
t.Errorf("Failed to setup volume: %v", err) t.Errorf("Failed to setup volume: %v", err)
} }
@ -512,7 +512,7 @@ func TestWriteTwiceWithUpdate(t *testing.T) {
// Now update the labels // Now update the labels
pod.ObjectMeta.Labels = newLabels pod.ObjectMeta.Labels = newLabels
err = builder.SetUp(nil) // now re-run Setup err = mounter.SetUp(nil) // now re-run Setup
if err != nil { if err != nil {
t.Errorf("Failed to re-setup volume: %v", err) t.Errorf("Failed to re-setup volume: %v", err)
} }
@ -583,17 +583,17 @@ func TestWriteWithUnixPath(t *testing.T) {
t.Errorf("Can't find the plugin by name") t.Errorf("Can't find the plugin by name")
} }
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Labels: labels, Annotations: annotations}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Labels: labels, Annotations: annotations}}
builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
volumePath := builder.GetPath() volumePath := mounter.GetPath()
err = builder.SetUp(nil) err = mounter.SetUp(nil)
if err != nil { if err != nil {
t.Errorf("Failed to setup volume: %v", err) t.Errorf("Failed to setup volume: %v", err)
} }
@ -665,17 +665,17 @@ func TestWriteWithUnixPathBadPath(t *testing.T) {
} }
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Labels: labels}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID, Labels: labels}}
builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil { if err != nil {
t.Fatalf("Failed to make a new Builder: %v", err) t.Fatalf("Failed to make a new Mounter: %v", err)
} else if builder == nil { } else if mounter == nil {
t.Fatalf("Got a nil Builder") t.Fatalf("Got a nil Mounter")
} }
volumePath := builder.GetPath() volumePath := mounter.GetPath()
defer CleanEverything(plugin, testVolumeName, volumePath, testPodUID, t) defer CleanEverything(plugin, testVolumeName, volumePath, testPodUID, t)
err = builder.SetUp(nil) err = mounter.SetUp(nil)
if err != nil { if err != nil {
t.Fatalf("Failed to setup volume: %v", err) t.Fatalf("Failed to setup volume: %v", err)
} }

View File

@ -71,11 +71,11 @@ func (plugin *emptyDirPlugin) CanSupport(spec *volume.Spec) bool {
return false return false
} }
func (plugin *emptyDirPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) { func (plugin *emptyDirPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newBuilderInternal(spec, pod, plugin.host.GetMounter(), &realMountDetector{plugin.host.GetMounter()}, opts) return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter(), &realMountDetector{plugin.host.GetMounter()}, opts)
} }
func (plugin *emptyDirPlugin) newBuilderInternal(spec *volume.Spec, pod *api.Pod, mounter mount.Interface, mountDetector mountDetector, opts volume.VolumeOptions) (volume.Builder, error) { func (plugin *emptyDirPlugin) newMounterInternal(spec *volume.Spec, pod *api.Pod, mounter mount.Interface, mountDetector mountDetector, opts volume.VolumeOptions) (volume.Mounter, error) {
medium := api.StorageMediumDefault medium := api.StorageMediumDefault
if spec.Volume.EmptyDir != nil { // Support a non-specified source as EmptyDir. if spec.Volume.EmptyDir != nil { // Support a non-specified source as EmptyDir.
medium = spec.Volume.EmptyDir.Medium medium = spec.Volume.EmptyDir.Medium
@ -92,12 +92,12 @@ func (plugin *emptyDirPlugin) newBuilderInternal(spec *volume.Spec, pod *api.Pod
}, nil }, nil
} }
func (plugin *emptyDirPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) { func (plugin *emptyDirPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function. // Inject real implementations here, test through the internal function.
return plugin.newCleanerInternal(volName, podUID, plugin.host.GetMounter(), &realMountDetector{plugin.host.GetMounter()}) return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter(), &realMountDetector{plugin.host.GetMounter()})
} }
func (plugin *emptyDirPlugin) newCleanerInternal(volName string, podUID types.UID, mounter mount.Interface, mountDetector mountDetector) (volume.Cleaner, error) { func (plugin *emptyDirPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface, mountDetector mountDetector) (volume.Unmounter, error) {
ed := &emptyDir{ ed := &emptyDir{
pod: &api.Pod{ObjectMeta: api.ObjectMeta{UID: podUID}}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{UID: podUID}},
volName: volName, volName: volName,

View File

@ -137,9 +137,9 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: config.medium}}, VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: config.medium}},
} }
mounter = mount.FakeMounter{} physicalMounter = mount.FakeMounter{}
mountDetector = fakeMountDetector{} mountDetector = fakeMountDetector{}
pod = &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod = &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
) )
// Set up the SELinux options on the pod // Set up the SELinux options on the pod
@ -161,7 +161,7 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
} }
if config.idempotent { if config.idempotent {
mounter.MountPoints = []mount.MountPoint{ physicalMounter.MountPoints = []mount.MountPoint{
{ {
Path: volumePath, Path: volumePath,
}, },
@ -169,24 +169,24 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
util.SetReady(metadataDir) util.SetReady(metadataDir)
} }
builder, err := plug.(*emptyDirPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), mounter, err := plug.(*emptyDirPlugin).newMounterInternal(volume.NewSpecFromVolume(spec),
pod, pod,
&mounter, &physicalMounter,
&mountDetector, &mountDetector,
volume.VolumeOptions{RootContext: config.rootContext}) volume.VolumeOptions{RootContext: config.rootContext})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
volPath := builder.GetPath() volPath := mounter.GetPath()
if volPath != volumePath { if volPath != volumePath {
t.Errorf("Got unexpected path: %s", volPath) t.Errorf("Got unexpected path: %s", volPath)
} }
if err := builder.SetUp(nil); err != nil { if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
@ -210,30 +210,30 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
} }
// Check the number of mounts performed during setup // Check the number of mounts performed during setup
if e, a := config.expectedSetupMounts, len(mounter.Log); e != a { if e, a := config.expectedSetupMounts, len(physicalMounter.Log); e != a {
t.Errorf("Expected %v mounter calls during setup, got %v", e, a) t.Errorf("Expected %v physicalMounter calls during setup, got %v", e, a)
} else if config.expectedSetupMounts == 1 && } else if config.expectedSetupMounts == 1 &&
(mounter.Log[0].Action != mount.FakeActionMount || mounter.Log[0].FSType != "tmpfs") { (physicalMounter.Log[0].Action != mount.FakeActionMount || physicalMounter.Log[0].FSType != "tmpfs") {
t.Errorf("Unexpected mounter action during setup: %#v", mounter.Log[0]) t.Errorf("Unexpected physicalMounter action during setup: %#v", physicalMounter.Log[0])
} }
mounter.ResetLog() physicalMounter.ResetLog()
// Make a cleaner for the volume // Make a unmounter for the volume
teardownMedium := mediumUnknown teardownMedium := mediumUnknown
if config.medium == api.StorageMediumMemory { if config.medium == api.StorageMediumMemory {
teardownMedium = mediumMemory teardownMedium = mediumMemory
} }
cleanerMountDetector := &fakeMountDetector{medium: teardownMedium, isMount: config.shouldBeMountedBeforeTeardown} unmounterMountDetector := &fakeMountDetector{medium: teardownMedium, isMount: config.shouldBeMountedBeforeTeardown}
cleaner, err := plug.(*emptyDirPlugin).newCleanerInternal(volumeName, types.UID("poduid"), &mounter, cleanerMountDetector) unmounter, err := plug.(*emptyDirPlugin).newUnmounterInternal(volumeName, types.UID("poduid"), &physicalMounter, unmounterMountDetector)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
if cleaner == nil { if unmounter == nil {
t.Errorf("Got a nil Cleaner") t.Errorf("Got a nil Unmounter")
} }
// Tear down the volume // Tear down the volume
if err := cleaner.TearDown(); err != nil { if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(volPath); err == nil { if _, err := os.Stat(volPath); err == nil {
@ -242,13 +242,13 @@ func doTestPlugin(t *testing.T, config pluginTestConfig) {
t.Errorf("SetUp() failed: %v", err) t.Errorf("SetUp() failed: %v", err)
} }
// Check the number of mounter calls during tardown // Check the number of physicalMounter calls during tardown
if e, a := config.expectedTeardownMounts, len(mounter.Log); e != a { if e, a := config.expectedTeardownMounts, len(physicalMounter.Log); e != a {
t.Errorf("Expected %v mounter calls during teardown, got %v", e, a) t.Errorf("Expected %v physicalMounter calls during teardown, got %v", e, a)
} else if config.expectedTeardownMounts == 1 && mounter.Log[0].Action != mount.FakeActionUnmount { } else if config.expectedTeardownMounts == 1 && physicalMounter.Log[0].Action != mount.FakeActionUnmount {
t.Errorf("Unexpected mounter action during teardown: %#v", mounter.Log[0]) t.Errorf("Unexpected physicalMounter action during teardown: %#v", physicalMounter.Log[0])
} }
mounter.ResetLog() physicalMounter.ResetLog()
} }
func TestPluginBackCompat(t *testing.T) { func TestPluginBackCompat(t *testing.T) {
@ -264,15 +264,15 @@ func TestPluginBackCompat(t *testing.T) {
Name: "vol1", Name: "vol1",
} }
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, err := plug.NewBuilder(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{RootContext: ""}) mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{RootContext: ""})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
volPath := builder.GetPath() volPath := mounter.GetPath()
if volPath != path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/vol1") { if volPath != path.Join(basePath, "pods/poduid/volumes/kubernetes.io~empty-dir/vol1") {
t.Errorf("Got unexpected path: %s", volPath) t.Errorf("Got unexpected path: %s", volPath)
} }
@ -293,13 +293,13 @@ func TestMetrics(t *testing.T) {
Name: "vol1", Name: "vol1",
} }
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, err := plug.NewBuilder(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{RootContext: ""}) mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{RootContext: ""})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
// Need to create the subdirectory // Need to create the subdirectory
os.MkdirAll(builder.GetPath(), 0755) os.MkdirAll(mounter.GetPath(), 0755)
expectedEmptyDirUsage, err := volumetest.FindEmptyDirectoryUsageOnTmpfs() expectedEmptyDirUsage, err := volumetest.FindEmptyDirectoryUsageOnTmpfs()
if err != nil { if err != nil {
@ -307,7 +307,7 @@ func TestMetrics(t *testing.T) {
} }
// TODO(pwittroc): Move this into a reusable testing utility // TODO(pwittroc): Move this into a reusable testing utility
metrics, err := builder.GetMetrics() metrics, err := mounter.GetMetrics()
if err != nil { if err != nil {
t.Errorf("Unexpected error when calling GetMetrics %v", err) t.Errorf("Unexpected error when calling GetMetrics %v", err)
} }

View File

@ -28,13 +28,13 @@ import (
type diskManager interface { type diskManager interface {
MakeGlobalPDName(disk fcDisk) string MakeGlobalPDName(disk fcDisk) string
// Attaches the disk to the kubelet's host machine. // Attaches the disk to the kubelet's host machine.
AttachDisk(b fcDiskBuilder) error AttachDisk(b fcDiskMounter) error
// Detaches the disk from the kubelet's host machine. // Detaches the disk from the kubelet's host machine.
DetachDisk(disk fcDiskCleaner, mntPath string) error DetachDisk(disk fcDiskUnmounter, mntPath string) error
} }
// utility to mount a disk based filesystem // utility to mount a disk based filesystem
func diskSetUp(manager diskManager, b fcDiskBuilder, volPath string, mounter mount.Interface, fsGroup *int64) error { func diskSetUp(manager diskManager, b fcDiskMounter, volPath string, mounter mount.Interface, fsGroup *int64) error {
globalPDPath := manager.MakeGlobalPDName(*b.fcDisk) globalPDPath := manager.MakeGlobalPDName(*b.fcDisk)
// TODO: handle failed mounts here. // TODO: handle failed mounts here.
noMnt, err := mounter.IsLikelyNotMountPoint(volPath) noMnt, err := mounter.IsLikelyNotMountPoint(volPath)
@ -74,7 +74,7 @@ func diskSetUp(manager diskManager, b fcDiskBuilder, volPath string, mounter mou
} }
// utility to tear down a disk based filesystem // utility to tear down a disk based filesystem
func diskTearDown(manager diskManager, c fcDiskCleaner, volPath string, mounter mount.Interface) error { func diskTearDown(manager diskManager, c fcDiskUnmounter, volPath string, mounter mount.Interface) error {
noMnt, err := mounter.IsLikelyNotMountPoint(volPath) noMnt, err := mounter.IsLikelyNotMountPoint(volPath)
if err != nil { if err != nil {
glog.Errorf("cannot validate mountpoint %s", volPath) glog.Errorf("cannot validate mountpoint %s", volPath)

View File

@ -70,12 +70,12 @@ func (plugin *fcPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
} }
} }
func (plugin *fcPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Builder, error) { func (plugin *fcPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function. // Inject real implementations here, test through the internal function.
return plugin.newBuilderInternal(spec, pod.UID, &FCUtil{}, plugin.host.GetMounter()) return plugin.newMounterInternal(spec, pod.UID, &FCUtil{}, plugin.host.GetMounter())
} }
func (plugin *fcPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Builder, error) { func (plugin *fcPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Mounter, error) {
// fc volumes used directly in a pod have a ReadOnly flag set by the pod author. // fc volumes used directly in a pod have a ReadOnly flag set by the pod author.
// fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV // fc volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
var readOnly bool var readOnly bool
@ -94,7 +94,7 @@ func (plugin *fcPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID,
lun := strconv.Itoa(*fc.Lun) lun := strconv.Itoa(*fc.Lun)
return &fcDiskBuilder{ return &fcDiskMounter{
fcDisk: &fcDisk{ fcDisk: &fcDisk{
podUID: podUID, podUID: podUID,
volName: spec.Name(), volName: spec.Name(),
@ -109,13 +109,13 @@ func (plugin *fcPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID,
}, nil }, nil
} }
func (plugin *fcPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) { func (plugin *fcPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function. // Inject real implementations here, test through the internal function.
return plugin.newCleanerInternal(volName, podUID, &FCUtil{}, plugin.host.GetMounter()) return plugin.newUnmounterInternal(volName, podUID, &FCUtil{}, plugin.host.GetMounter())
} }
func (plugin *fcPlugin) newCleanerInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Cleaner, error) { func (plugin *fcPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Unmounter, error) {
return &fcDiskCleaner{ return &fcDiskUnmounter{
fcDisk: &fcDisk{ fcDisk: &fcDisk{
podUID: podUID, podUID: podUID,
volName: volName, volName: volName,
@ -152,27 +152,27 @@ func (fc *fcDisk) GetPath() string {
return fc.plugin.host.GetPodVolumeDir(fc.podUID, strings.EscapeQualifiedNameForDisk(name), fc.volName) return fc.plugin.host.GetPodVolumeDir(fc.podUID, strings.EscapeQualifiedNameForDisk(name), fc.volName)
} }
type fcDiskBuilder struct { type fcDiskMounter struct {
*fcDisk *fcDisk
readOnly bool readOnly bool
fsType string fsType string
mounter *mount.SafeFormatAndMount mounter *mount.SafeFormatAndMount
} }
var _ volume.Builder = &fcDiskBuilder{} var _ volume.Mounter = &fcDiskMounter{}
func (b *fcDiskBuilder) GetAttributes() volume.Attributes { func (b *fcDiskMounter) GetAttributes() volume.Attributes {
return volume.Attributes{ return volume.Attributes{
ReadOnly: b.readOnly, ReadOnly: b.readOnly,
Managed: !b.readOnly, Managed: !b.readOnly,
SupportsSELinux: true, SupportsSELinux: true,
} }
} }
func (b *fcDiskBuilder) SetUp(fsGroup *int64) error { func (b *fcDiskMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup) return b.SetUpAt(b.GetPath(), fsGroup)
} }
func (b *fcDiskBuilder) SetUpAt(dir string, fsGroup *int64) error { func (b *fcDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
// diskSetUp checks mountpoints and prevent repeated calls // diskSetUp checks mountpoints and prevent repeated calls
err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup) err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup)
if err != nil { if err != nil {
@ -181,19 +181,19 @@ func (b *fcDiskBuilder) SetUpAt(dir string, fsGroup *int64) error {
return err return err
} }
type fcDiskCleaner struct { type fcDiskUnmounter struct {
*fcDisk *fcDisk
mounter mount.Interface mounter mount.Interface
} }
var _ volume.Cleaner = &fcDiskCleaner{} var _ volume.Unmounter = &fcDiskUnmounter{}
// Unmounts the bind mount, and detaches the disk only if the disk // Unmounts the bind mount, and detaches the disk only if the disk
// resource was the last reference to that disk on the kubelet. // resource was the last reference to that disk on the kubelet.
func (c *fcDiskCleaner) TearDown() error { func (c *fcDiskUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath()) return c.TearDownAt(c.GetPath())
} }
func (c *fcDiskCleaner) TearDownAt(dir string) error { func (c *fcDiskUnmounter) TearDownAt(dir string) error {
return diskTearDown(c.manager, *c, dir, c.mounter) return diskTearDown(c.manager, *c, dir, c.mounter)
} }

View File

@ -99,7 +99,7 @@ func (fake *fakeDiskManager) Cleanup() {
func (fake *fakeDiskManager) MakeGlobalPDName(disk fcDisk) string { func (fake *fakeDiskManager) MakeGlobalPDName(disk fcDisk) string {
return fake.tmpDir return fake.tmpDir
} }
func (fake *fakeDiskManager) AttachDisk(b fcDiskBuilder) error { func (fake *fakeDiskManager) AttachDisk(b fcDiskMounter) error {
globalPath := b.manager.MakeGlobalPDName(*b.fcDisk) globalPath := b.manager.MakeGlobalPDName(*b.fcDisk)
err := os.MkdirAll(globalPath, 0750) err := os.MkdirAll(globalPath, 0750)
if err != nil { if err != nil {
@ -113,7 +113,7 @@ func (fake *fakeDiskManager) AttachDisk(b fcDiskBuilder) error {
return nil return nil
} }
func (fake *fakeDiskManager) DetachDisk(c fcDiskCleaner, mntPath string) error { func (fake *fakeDiskManager) DetachDisk(c fcDiskUnmounter, mntPath string) error {
globalPath := c.manager.MakeGlobalPDName(*c.fcDisk) globalPath := c.manager.MakeGlobalPDName(*c.fcDisk)
err := os.RemoveAll(globalPath) err := os.RemoveAll(globalPath)
if err != nil { if err != nil {
@ -140,21 +140,21 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
fakeManager := NewFakeDiskManager() fakeManager := NewFakeDiskManager()
defer fakeManager.Cleanup() defer fakeManager.Cleanup()
fakeMounter := &mount.FakeMounter{} fakeMounter := &mount.FakeMounter{}
builder, err := plug.(*fcPlugin).newBuilderInternal(spec, types.UID("poduid"), fakeManager, fakeMounter) mounter, err := plug.(*fcPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder: %v", err) t.Errorf("Got a nil Mounter: %v", err)
} }
path := builder.GetPath() path := mounter.GetPath()
expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~fc/vol1", tmpDir) expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~fc/vol1", tmpDir)
if path != expectedPath { if path != expectedPath {
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path) t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path)
} }
if err := builder.SetUp(nil); err != nil { if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(path); err != nil { if _, err := os.Stat(path); err != nil {
@ -177,15 +177,15 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
fakeManager2 := NewFakeDiskManager() fakeManager2 := NewFakeDiskManager()
defer fakeManager2.Cleanup() defer fakeManager2.Cleanup()
cleaner, err := plug.(*fcPlugin).newCleanerInternal("vol1", types.UID("poduid"), fakeManager2, fakeMounter) unmounter, err := plug.(*fcPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager2, fakeMounter)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
if cleaner == nil { if unmounter == nil {
t.Errorf("Got a nil Cleaner: %v", err) t.Errorf("Got a nil Unmounter: %v", err)
} }
if err := cleaner.TearDown(); err != nil { if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(path); err == nil { if _, err := os.Stat(path); err == nil {
@ -277,12 +277,12 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil)) plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(fcPluginName) plug, _ := plugMgr.FindPluginByName(fcPluginName)
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true) spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !builder.GetAttributes().ReadOnly { if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for builder.IsReadOnly") t.Errorf("Expected true for mounter.IsReadOnly")
} }
} }

View File

@ -155,7 +155,7 @@ func searchDisk(wwns []string, lun string, io ioHandler) (string, string) {
return disk, dm return disk, dm
} }
func (util *FCUtil) AttachDisk(b fcDiskBuilder) error { func (util *FCUtil) AttachDisk(b fcDiskMounter) error {
devicePath := "" devicePath := ""
wwns := b.wwns wwns := b.wwns
lun := b.lun lun := b.lun
@ -192,7 +192,7 @@ func (util *FCUtil) AttachDisk(b fcDiskBuilder) error {
return err return err
} }
func (util *FCUtil) DetachDisk(c fcDiskCleaner, mntPath string) error { func (util *FCUtil) DetachDisk(c fcDiskUnmounter, mntPath string) error {
if err := c.mounter.Unmount(mntPath); err != nil { if err := c.mounter.Unmount(mntPath); err != nil {
return fmt.Errorf("fc detach disk: failed to unmount: %s\nError: %v", mntPath, err) return fmt.Errorf("fc detach disk: failed to unmount: %s\nError: %v", mntPath, err)
} }

View File

@ -100,8 +100,8 @@ func (plugin *flexVolumePlugin) getVolumeSource(spec *volume.Spec) *api.FlexVolu
return source return source
} }
// NewBuilder is the builder routine to build the volume. // NewMounter is the mounter routine to build the volume.
func (plugin *flexVolumePlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Builder, error) { func (plugin *flexVolumePlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
fv := plugin.getVolumeSource(spec) fv := plugin.getVolumeSource(spec)
secret := "" secret := ""
if fv.SecretRef != nil { if fv.SecretRef != nil {
@ -120,13 +120,13 @@ func (plugin *flexVolumePlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ vo
glog.V(1).Infof("found flex volume secret info: %s", name) glog.V(1).Infof("found flex volume secret info: %s", name)
} }
} }
return plugin.newBuilderInternal(spec, pod, &flexVolumeUtil{}, plugin.host.GetMounter(), exec.New(), secret) return plugin.newMounterInternal(spec, pod, &flexVolumeUtil{}, plugin.host.GetMounter(), exec.New(), secret)
} }
// newBuilderInternal is the internal builder routine to build the volume. // newMounterInternal is the internal mounter routine to build the volume.
func (plugin *flexVolumePlugin) newBuilderInternal(spec *volume.Spec, pod *api.Pod, manager flexVolumeManager, mounter mount.Interface, runner exec.Interface, secret string) (volume.Builder, error) { func (plugin *flexVolumePlugin) newMounterInternal(spec *volume.Spec, pod *api.Pod, manager flexVolumeManager, mounter mount.Interface, runner exec.Interface, secret string) (volume.Mounter, error) {
source := plugin.getVolumeSource(spec) source := plugin.getVolumeSource(spec)
return &flexVolumeBuilder{ return &flexVolumeMounter{
flexVolumeDisk: &flexVolumeDisk{ flexVolumeDisk: &flexVolumeDisk{
podUID: pod.UID, podUID: pod.UID,
podNamespace: pod.Namespace, podNamespace: pod.Namespace,
@ -147,14 +147,14 @@ func (plugin *flexVolumePlugin) newBuilderInternal(spec *volume.Spec, pod *api.P
}, nil }, nil
} }
// NewCleaner is the cleaner routine to clean the volume. // NewUnmounter is the unmounter routine to clean the volume.
func (plugin *flexVolumePlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) { func (plugin *flexVolumePlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newCleanerInternal(volName, podUID, &flexVolumeUtil{}, plugin.host.GetMounter(), exec.New()) return plugin.newUnmounterInternal(volName, podUID, &flexVolumeUtil{}, plugin.host.GetMounter(), exec.New())
} }
// newCleanerInternal is the internal cleaner routine to clean the volume. // newUnmounterInternal is the internal unmounter routine to clean the volume.
func (plugin *flexVolumePlugin) newCleanerInternal(volName string, podUID types.UID, manager flexVolumeManager, mounter mount.Interface, runner exec.Interface) (volume.Cleaner, error) { func (plugin *flexVolumePlugin) newUnmounterInternal(volName string, podUID types.UID, manager flexVolumeManager, mounter mount.Interface, runner exec.Interface) (volume.Unmounter, error) {
return &flexVolumeCleaner{ return &flexVolumeUnmounter{
flexVolumeDisk: &flexVolumeDisk{ flexVolumeDisk: &flexVolumeDisk{
podUID: podUID, podUID: podUID,
volName: volName, volName: volName,
@ -190,8 +190,8 @@ type flexVolumeDisk struct {
plugin *flexVolumePlugin plugin *flexVolumePlugin
} }
// FlexVolumeCleaner is the disk that will be cleaned by this plugin. // FlexVolumeUnmounter is the disk that will be cleaned by this plugin.
type flexVolumeCleaner struct { type flexVolumeUnmounter struct {
*flexVolumeDisk *flexVolumeDisk
// Runner used to teardown the volume. // Runner used to teardown the volume.
runner exec.Interface runner exec.Interface
@ -201,8 +201,8 @@ type flexVolumeCleaner struct {
volume.MetricsNil volume.MetricsNil
} }
// FlexVolumeBuilder is the disk that will be exposed by this plugin. // FlexVolumeMounter is the disk that will be exposed by this plugin.
type flexVolumeBuilder struct { type flexVolumeMounter struct {
*flexVolumeDisk *flexVolumeDisk
// fsType is the type of the filesystem to create on the volume. // fsType is the type of the filesystem to create on the volume.
fsType string fsType string
@ -223,13 +223,13 @@ type flexVolumeBuilder struct {
} }
// SetUp creates new directory. // SetUp creates new directory.
func (f *flexVolumeBuilder) SetUp(fsGroup *int64) error { func (f *flexVolumeMounter) SetUp(fsGroup *int64) error {
return f.SetUpAt(f.GetPath(), fsGroup) return f.SetUpAt(f.GetPath(), fsGroup)
} }
// GetAttributes get the flex volume attributes. The attributes will be queried // GetAttributes get the flex volume attributes. The attributes will be queried
// using plugin callout after we finalize the callout syntax. // using plugin callout after we finalize the callout syntax.
func (f flexVolumeBuilder) GetAttributes() volume.Attributes { func (f flexVolumeMounter) GetAttributes() volume.Attributes {
return volume.Attributes{ return volume.Attributes{
ReadOnly: f.readOnly, ReadOnly: f.readOnly,
Managed: false, Managed: false,
@ -240,17 +240,17 @@ func (f flexVolumeBuilder) GetAttributes() volume.Attributes {
// flexVolumeManager is the abstract interface to flex volume ops. // flexVolumeManager is the abstract interface to flex volume ops.
type flexVolumeManager interface { type flexVolumeManager interface {
// Attaches the disk to the kubelet's host machine. // Attaches the disk to the kubelet's host machine.
attach(builder *flexVolumeBuilder) (string, error) attach(mounter *flexVolumeMounter) (string, error)
// Detaches the disk from the kubelet's host machine. // Detaches the disk from the kubelet's host machine.
detach(cleaner *flexVolumeCleaner, dir string) error detach(unmounter *flexVolumeUnmounter, dir string) error
// Mounts the disk on the Kubelet's host machine. // Mounts the disk on the Kubelet's host machine.
mount(builder *flexVolumeBuilder, mnt, dir string) error mount(mounter *flexVolumeMounter, mnt, dir string) error
// Unmounts the disk from the Kubelet's host machine. // Unmounts the disk from the Kubelet's host machine.
unmount(builder *flexVolumeCleaner, dir string) error unmount(unounter *flexVolumeUnmounter, dir string) error
} }
// SetUpAt creates new directory. // SetUpAt creates new directory.
func (f *flexVolumeBuilder) SetUpAt(dir string, fsGroup *int64) error { func (f *flexVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
notmnt, err := f.blockDeviceMounter.IsLikelyNotMountPoint(dir) notmnt, err := f.blockDeviceMounter.IsLikelyNotMountPoint(dir)
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
@ -318,7 +318,7 @@ func (f *flexVolumeBuilder) SetUpAt(dir string, fsGroup *int64) error {
} }
// IsReadOnly returns true if the volume is read only. // IsReadOnly returns true if the volume is read only.
func (f *flexVolumeBuilder) IsReadOnly() bool { func (f *flexVolumeMounter) IsReadOnly() bool {
return f.readOnly return f.readOnly
} }
@ -329,13 +329,13 @@ func (f *flexVolumeDisk) GetPath() string {
} }
// TearDown simply deletes everything in the directory. // TearDown simply deletes everything in the directory.
func (f *flexVolumeCleaner) TearDown() error { func (f *flexVolumeUnmounter) TearDown() error {
path := f.GetPath() path := f.GetPath()
return f.TearDownAt(path) return f.TearDownAt(path)
} }
// TearDownAt simply deletes everything in the directory. // TearDownAt simply deletes everything in the directory.
func (f *flexVolumeCleaner) TearDownAt(dir string) error { func (f *flexVolumeUnmounter) TearDownAt(dir string) error {
notmnt, err := f.mounter.IsLikelyNotMountPoint(dir) notmnt, err := f.mounter.IsLikelyNotMountPoint(dir)
if err != nil { if err != nil {

View File

@ -239,20 +239,20 @@ func doTestPluginAttachDetach(t *testing.T, spec *volume.Spec, tmpDir string) {
} }
fake := &mount.FakeMounter{} fake := &mount.FakeMounter{}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, err := plugin.(*flexVolumePlugin).newBuilderInternal(spec, pod, &flexVolumeUtil{}, fake, exec.New(), "") mounter, err := plugin.(*flexVolumePlugin).newMounterInternal(spec, pod, &flexVolumeUtil{}, fake, exec.New(), "")
volumePath := builder.GetPath() volumePath := mounter.GetPath()
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
path := builder.GetPath() path := mounter.GetPath()
expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~fakeAttacher/vol1", tmpDir) expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~fakeAttacher/vol1", tmpDir)
if path != expectedPath { if path != expectedPath {
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path) t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path)
} }
if err := builder.SetUp(nil); err != nil { if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(volumePath); err != nil { if _, err := os.Stat(volumePath); err != nil {
@ -263,7 +263,7 @@ func doTestPluginAttachDetach(t *testing.T, spec *volume.Spec, tmpDir string) {
} }
} }
t.Logf("Setup successful") t.Logf("Setup successful")
if builder.(*flexVolumeBuilder).readOnly { if mounter.(*flexVolumeMounter).readOnly {
t.Errorf("The volume source should not be read-only and it is.") t.Errorf("The volume source should not be read-only and it is.")
} }
@ -276,14 +276,14 @@ func doTestPluginAttachDetach(t *testing.T, spec *volume.Spec, tmpDir string) {
} }
fake.ResetLog() fake.ResetLog()
cleaner, err := plugin.(*flexVolumePlugin).newCleanerInternal("vol1", types.UID("poduid"), &flexVolumeUtil{}, fake, exec.New()) unmounter, err := plugin.(*flexVolumePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &flexVolumeUtil{}, fake, exec.New())
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
if cleaner == nil { if unmounter == nil {
t.Errorf("Got a nil Cleaner") t.Errorf("Got a nil Unmounter")
} }
if err := cleaner.TearDown(); err != nil { if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(volumePath); err == nil { if _, err := os.Stat(volumePath); err == nil {
@ -318,20 +318,20 @@ func doTestPluginMountUnmount(t *testing.T, spec *volume.Spec, tmpDir string) {
} }
fake := &mount.FakeMounter{} fake := &mount.FakeMounter{}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, err := plugin.(*flexVolumePlugin).newBuilderInternal(spec, pod, &flexVolumeUtil{}, fake, exec.New(), "") mounter, err := plugin.(*flexVolumePlugin).newMounterInternal(spec, pod, &flexVolumeUtil{}, fake, exec.New(), "")
volumePath := builder.GetPath() volumePath := mounter.GetPath()
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
path := builder.GetPath() path := mounter.GetPath()
expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~fakeMounter/vol1", tmpDir) expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~fakeMounter/vol1", tmpDir)
if path != expectedPath { if path != expectedPath {
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path) t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path)
} }
if err := builder.SetUp(nil); err != nil { if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(volumePath); err != nil { if _, err := os.Stat(volumePath); err != nil {
@ -342,18 +342,18 @@ func doTestPluginMountUnmount(t *testing.T, spec *volume.Spec, tmpDir string) {
} }
} }
t.Logf("Setup successful") t.Logf("Setup successful")
if builder.(*flexVolumeBuilder).readOnly { if mounter.(*flexVolumeMounter).readOnly {
t.Errorf("The volume source should not be read-only and it is.") t.Errorf("The volume source should not be read-only and it is.")
} }
cleaner, err := plugin.(*flexVolumePlugin).newCleanerInternal("vol1", types.UID("poduid"), &flexVolumeUtil{}, fake, exec.New()) unmounter, err := plugin.(*flexVolumePlugin).newUnmounterInternal("vol1", types.UID("poduid"), &flexVolumeUtil{}, fake, exec.New())
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
if cleaner == nil { if unmounter == nil {
t.Errorf("Got a nil Cleaner") t.Errorf("Got a nil Unmounter")
} }
if err := cleaner.TearDown(); err != nil { if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(volumePath); err == nil { if _, err := os.Stat(volumePath); err == nil {

View File

@ -105,7 +105,7 @@ func (u *flexVolumeUtil) init(plugin *flexVolumePlugin) error {
} }
// Attach exposes a volume on the host. // Attach exposes a volume on the host.
func (u *flexVolumeUtil) attach(f *flexVolumeBuilder) (string, error) { func (u *flexVolumeUtil) attach(f *flexVolumeMounter) (string, error) {
execPath := f.execPath execPath := f.execPath
var options string var options string
@ -141,7 +141,7 @@ func (u *flexVolumeUtil) attach(f *flexVolumeBuilder) (string, error) {
} }
// Detach detaches a volume from the host. // Detach detaches a volume from the host.
func (u *flexVolumeUtil) detach(f *flexVolumeCleaner, mntDevice string) error { func (u *flexVolumeUtil) detach(f *flexVolumeUnmounter, mntDevice string) error {
execPath := f.execPath execPath := f.execPath
// Executable provider command. // Executable provider command.
@ -163,7 +163,7 @@ func (u *flexVolumeUtil) detach(f *flexVolumeCleaner, mntDevice string) error {
} }
// Mount mounts the volume on the host. // Mount mounts the volume on the host.
func (u *flexVolumeUtil) mount(f *flexVolumeBuilder, mntDevice, dir string) error { func (u *flexVolumeUtil) mount(f *flexVolumeMounter, mntDevice, dir string) error {
execPath := f.execPath execPath := f.execPath
var options string var options string
@ -199,7 +199,7 @@ func (u *flexVolumeUtil) mount(f *flexVolumeBuilder, mntDevice, dir string) erro
} }
// Unmount unmounts the volume on the host. // Unmount unmounts the volume on the host.
func (u *flexVolumeUtil) unmount(f *flexVolumeCleaner, dir string) error { func (u *flexVolumeUtil) unmount(f *flexVolumeUnmounter, dir string) error {
execPath := f.execPath execPath := f.execPath
// Executable provider command. // Executable provider command.

View File

@ -85,9 +85,9 @@ func (p *flockerPlugin) getFlockerVolumeSource(spec *volume.Spec) (*api.FlockerV
return spec.PersistentVolume.Spec.Flocker, readOnly return spec.PersistentVolume.Spec.Flocker, readOnly
} }
func (p *flockerPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) { func (p *flockerPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
source, readOnly := p.getFlockerVolumeSource(spec) source, readOnly := p.getFlockerVolumeSource(spec)
builder := flockerBuilder{ mounter := flockerMounter{
flocker: &flocker{ flocker: &flocker{
datasetName: source.DatasetName, datasetName: source.DatasetName,
pod: pod, pod: pod,
@ -98,15 +98,15 @@ func (p *flockerPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.
opts: opts, opts: opts,
readOnly: readOnly, readOnly: readOnly,
} }
return &builder, nil return &mounter, nil
} }
func (p *flockerPlugin) NewCleaner(datasetName string, podUID types.UID) (volume.Cleaner, error) { func (p *flockerPlugin) NewUnmounter(datasetName string, podUID types.UID) (volume.Unmounter, error) {
// Flocker agent will take care of this, there is nothing we can do here // Flocker agent will take care of this, there is nothing we can do here
return nil, nil return nil, nil
} }
type flockerBuilder struct { type flockerMounter struct {
*flocker *flocker
client flockerclient.Clientable client flockerclient.Clientable
exe exec.Interface exe exec.Interface
@ -115,24 +115,24 @@ type flockerBuilder struct {
volume.MetricsNil volume.MetricsNil
} }
func (b flockerBuilder) GetAttributes() volume.Attributes { func (b flockerMounter) GetAttributes() volume.Attributes {
return volume.Attributes{ return volume.Attributes{
ReadOnly: b.readOnly, ReadOnly: b.readOnly,
Managed: false, Managed: false,
SupportsSELinux: false, SupportsSELinux: false,
} }
} }
func (b flockerBuilder) GetPath() string { func (b flockerMounter) GetPath() string {
return b.flocker.path return b.flocker.path
} }
func (b flockerBuilder) SetUp(fsGroup *int64) error { func (b flockerMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.flocker.datasetName, fsGroup) return b.SetUpAt(b.flocker.datasetName, fsGroup)
} }
// newFlockerClient uses environment variables and pod attributes to return a // newFlockerClient uses environment variables and pod attributes to return a
// flocker client capable of talking with the Flocker control service. // flocker client capable of talking with the Flocker control service.
func (b flockerBuilder) newFlockerClient() (*flockerclient.Client, error) { func (b flockerMounter) newFlockerClient() (*flockerclient.Client, error) {
host := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_HOST", defaultHost) host := env.GetEnvAsStringOrFallback("FLOCKER_CONTROL_SERVICE_HOST", defaultHost)
port, err := env.GetEnvAsIntOrFallback("FLOCKER_CONTROL_SERVICE_PORT", defaultPort) port, err := env.GetEnvAsIntOrFallback("FLOCKER_CONTROL_SERVICE_PORT", defaultPort)
@ -147,7 +147,7 @@ func (b flockerBuilder) newFlockerClient() (*flockerclient.Client, error) {
return c, err return c, err
} }
func (b *flockerBuilder) getMetaDir() string { func (b *flockerMounter) getMetaDir() string {
return path.Join( return path.Join(
b.plugin.host.GetPodPluginDir( b.plugin.host.GetPodPluginDir(
b.flocker.pod.UID, strings.EscapeQualifiedNameForDisk(flockerPluginName), b.flocker.pod.UID, strings.EscapeQualifiedNameForDisk(flockerPluginName),
@ -167,7 +167,7 @@ control service:
need to update the Primary UUID for this volume. need to update the Primary UUID for this volume.
5. Wait until the Primary UUID was updated or timeout. 5. Wait until the Primary UUID was updated or timeout.
*/ */
func (b flockerBuilder) SetUpAt(dir string, fsGroup *int64) error { func (b flockerMounter) SetUpAt(dir string, fsGroup *int64) error {
if volumeutil.IsReady(b.getMetaDir()) { if volumeutil.IsReady(b.getMetaDir()) {
return nil return nil
} }
@ -214,7 +214,7 @@ func (b flockerBuilder) SetUpAt(dir string, fsGroup *int64) error {
// updateDatasetPrimary will update the primary in Flocker and wait for it to // updateDatasetPrimary will update the primary in Flocker and wait for it to
// be ready. If it never gets to ready state it will timeout and error. // be ready. If it never gets to ready state it will timeout and error.
func (b flockerBuilder) updateDatasetPrimary(datasetID, primaryUUID string) error { func (b flockerMounter) updateDatasetPrimary(datasetID, primaryUUID string) error {
// We need to update the primary and wait for it to be ready // We need to update the primary and wait for it to be ready
_, err := b.client.UpdatePrimaryForDataset(primaryUUID, datasetID) _, err := b.client.UpdatePrimaryForDataset(primaryUUID, datasetID)
if err != nil { if err != nil {

View File

@ -115,7 +115,7 @@ func TestGetFlockerVolumeSource(t *testing.T) {
assert.Equal(spec.PersistentVolume.Spec.Flocker, vs) assert.Equal(spec.PersistentVolume.Spec.Flocker, vs)
} }
func TestNewBuilder(t *testing.T) { func TestNewMounter(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
plugMgr, _ := newInitializedVolumePlugMgr(t) plugMgr, _ := newInitializedVolumePlugMgr(t)
@ -132,22 +132,22 @@ func TestNewBuilder(t *testing.T) {
}, },
} }
_, err = plug.NewBuilder(spec, &api.Pod{}, volume.VolumeOptions{}) _, err = plug.NewMounter(spec, &api.Pod{}, volume.VolumeOptions{})
assert.NoError(err) assert.NoError(err)
} }
func TestNewCleaner(t *testing.T) { func TestNewUnmounter(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
p := flockerPlugin{} p := flockerPlugin{}
cleaner, err := p.NewCleaner("", types.UID("")) unmounter, err := p.NewUnmounter("", types.UID(""))
assert.Nil(cleaner) assert.Nil(unmounter)
assert.NoError(err) assert.NoError(err)
} }
func TestIsReadOnly(t *testing.T) { func TestIsReadOnly(t *testing.T) {
b := &flockerBuilder{readOnly: true} b := &flockerMounter{readOnly: true}
assert.True(t, b.GetAttributes().ReadOnly) assert.True(t, b.GetAttributes().ReadOnly)
} }
@ -156,7 +156,7 @@ func TestGetPath(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
b := flockerBuilder{flocker: &flocker{path: expectedPath}} b := flockerMounter{flocker: &flocker{path: expectedPath}}
assert.Equal(expectedPath, b.GetPath()) assert.Equal(expectedPath, b.GetPath())
} }
@ -209,7 +209,7 @@ func TestSetUpAtInternal(t *testing.T) {
assert.NoError(err) assert.NoError(err)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
b := flockerBuilder{flocker: &flocker{pod: pod, plugin: plug.(*flockerPlugin)}} b := flockerMounter{flocker: &flocker{pod: pod, plugin: plug.(*flockerPlugin)}}
b.client = newMockFlockerClient("dataset-id", "primary-uid", mockPath) b.client = newMockFlockerClient("dataset-id", "primary-uid", mockPath)
assert.NoError(b.SetUpAt(dir, nil)) assert.NoError(b.SetUpAt(dir, nil))

View File

@ -71,12 +71,12 @@ func (plugin *gcePersistentDiskPlugin) GetAccessModes() []api.PersistentVolumeAc
} }
} }
func (plugin *gcePersistentDiskPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Builder, error) { func (plugin *gcePersistentDiskPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function. // Inject real implementations here, test through the internal function.
return plugin.newBuilderInternal(spec, pod.UID, &GCEDiskUtil{}, plugin.host.GetMounter()) return plugin.newMounterInternal(spec, pod.UID, &GCEDiskUtil{}, plugin.host.GetMounter())
} }
func (plugin *gcePersistentDiskPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Builder, error) { func (plugin *gcePersistentDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Mounter, error) {
// GCEPDs used directly in a pod have a ReadOnly flag set by the pod author. // GCEPDs used directly in a pod have a ReadOnly flag set by the pod author.
// GCEPDs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV // GCEPDs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
var readOnly bool var readOnly bool
@ -97,7 +97,7 @@ func (plugin *gcePersistentDiskPlugin) newBuilderInternal(spec *volume.Spec, pod
partition = strconv.Itoa(gce.Partition) partition = strconv.Itoa(gce.Partition)
} }
return &gcePersistentDiskBuilder{ return &gcePersistentDiskMounter{
gcePersistentDisk: &gcePersistentDisk{ gcePersistentDisk: &gcePersistentDisk{
podUID: podUID, podUID: podUID,
volName: spec.Name(), volName: spec.Name(),
@ -112,13 +112,13 @@ func (plugin *gcePersistentDiskPlugin) newBuilderInternal(spec *volume.Spec, pod
diskMounter: &mount.SafeFormatAndMount{mounter, exec.New()}}, nil diskMounter: &mount.SafeFormatAndMount{mounter, exec.New()}}, nil
} }
func (plugin *gcePersistentDiskPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) { func (plugin *gcePersistentDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function. // Inject real implementations here, test through the internal function.
return plugin.newCleanerInternal(volName, podUID, &GCEDiskUtil{}, plugin.host.GetMounter()) return plugin.newUnmounterInternal(volName, podUID, &GCEDiskUtil{}, plugin.host.GetMounter())
} }
func (plugin *gcePersistentDiskPlugin) newCleanerInternal(volName string, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Cleaner, error) { func (plugin *gcePersistentDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Unmounter, error) {
return &gcePersistentDiskCleaner{&gcePersistentDisk{ return &gcePersistentDiskUnmounter{&gcePersistentDisk{
podUID: podUID, podUID: podUID,
volName: volName, volName: volName,
manager: manager, manager: manager,
@ -164,9 +164,9 @@ func (plugin *gcePersistentDiskPlugin) newProvisionerInternal(options volume.Vol
// Abstract interface to PD operations. // Abstract interface to PD operations.
type pdManager interface { type pdManager interface {
// Attaches the disk to the kubelet's host machine. // Attaches the disk to the kubelet's host machine.
AttachAndMountDisk(b *gcePersistentDiskBuilder, globalPDPath string) error AttachAndMountDisk(b *gcePersistentDiskMounter, globalPDPath string) error
// Detaches the disk from the kubelet's host machine. // Detaches the disk from the kubelet's host machine.
DetachDisk(c *gcePersistentDiskCleaner) error DetachDisk(c *gcePersistentDiskUnmounter) error
// Creates a volume // Creates a volume
CreateVolume(provisioner *gcePersistentDiskProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, err error) CreateVolume(provisioner *gcePersistentDiskProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, err error)
// Deletes a volume // Deletes a volume
@ -191,13 +191,13 @@ type gcePersistentDisk struct {
} }
func detachDiskLogError(pd *gcePersistentDisk) { func detachDiskLogError(pd *gcePersistentDisk) {
err := pd.manager.DetachDisk(&gcePersistentDiskCleaner{pd}) err := pd.manager.DetachDisk(&gcePersistentDiskUnmounter{pd})
if err != nil { if err != nil {
glog.Warningf("Failed to detach disk: %v (%v)", pd, err) glog.Warningf("Failed to detach disk: %v (%v)", pd, err)
} }
} }
type gcePersistentDiskBuilder struct { type gcePersistentDiskMounter struct {
*gcePersistentDisk *gcePersistentDisk
// Filesystem type, optional. // Filesystem type, optional.
fsType string fsType string
@ -207,9 +207,9 @@ type gcePersistentDiskBuilder struct {
diskMounter *mount.SafeFormatAndMount diskMounter *mount.SafeFormatAndMount
} }
var _ volume.Builder = &gcePersistentDiskBuilder{} var _ volume.Mounter = &gcePersistentDiskMounter{}
func (b *gcePersistentDiskBuilder) GetAttributes() volume.Attributes { func (b *gcePersistentDiskMounter) GetAttributes() volume.Attributes {
return volume.Attributes{ return volume.Attributes{
ReadOnly: b.readOnly, ReadOnly: b.readOnly,
Managed: !b.readOnly, Managed: !b.readOnly,
@ -218,12 +218,12 @@ func (b *gcePersistentDiskBuilder) GetAttributes() volume.Attributes {
} }
// SetUp attaches the disk and bind mounts to the volume path. // SetUp attaches the disk and bind mounts to the volume path.
func (b *gcePersistentDiskBuilder) SetUp(fsGroup *int64) error { func (b *gcePersistentDiskMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup) return b.SetUpAt(b.GetPath(), fsGroup)
} }
// SetUpAt attaches the disk and bind mounts to the volume path. // SetUpAt attaches the disk and bind mounts to the volume path.
func (b *gcePersistentDiskBuilder) SetUpAt(dir string, fsGroup *int64) error { func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
// TODO: handle failed mounts here. // TODO: handle failed mounts here.
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notMnt, err) glog.V(4).Infof("PersistentDisk set up: %s %v %v", dir, !notMnt, err)
@ -295,21 +295,21 @@ func (pd *gcePersistentDisk) GetPath() string {
return pd.plugin.host.GetPodVolumeDir(pd.podUID, strings.EscapeQualifiedNameForDisk(name), pd.volName) return pd.plugin.host.GetPodVolumeDir(pd.podUID, strings.EscapeQualifiedNameForDisk(name), pd.volName)
} }
type gcePersistentDiskCleaner struct { type gcePersistentDiskUnmounter struct {
*gcePersistentDisk *gcePersistentDisk
} }
var _ volume.Cleaner = &gcePersistentDiskCleaner{} var _ volume.Unmounter = &gcePersistentDiskUnmounter{}
// Unmounts the bind mount, and detaches the disk only if the PD // Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet. // resource was the last reference to that disk on the kubelet.
func (c *gcePersistentDiskCleaner) TearDown() error { func (c *gcePersistentDiskUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath()) return c.TearDownAt(c.GetPath())
} }
// Unmounts the bind mount, and detaches the disk only if the PD // Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet. // resource was the last reference to that disk on the kubelet.
func (c *gcePersistentDiskCleaner) TearDownAt(dir string) error { func (c *gcePersistentDiskUnmounter) TearDownAt(dir string) error {
notMnt, err := c.mounter.IsLikelyNotMountPoint(dir) notMnt, err := c.mounter.IsLikelyNotMountPoint(dir)
if err != nil { if err != nil {
return err return err
@ -329,7 +329,7 @@ func (c *gcePersistentDiskCleaner) TearDownAt(dir string) error {
// If len(refs) is 1, then all bind mounts have been removed, and the // If len(refs) is 1, then all bind mounts have been removed, and the
// remaining reference is the global mount. It is safe to detach. // remaining reference is the global mount. It is safe to detach.
if len(refs) == 1 { if len(refs) == 1 {
// c.pdName is not initially set for volume-cleaners, so set it here. // c.pdName is not initially set for volume-unmounters, so set it here.
c.pdName = path.Base(refs[0]) c.pdName = path.Base(refs[0])
if err := c.manager.DetachDisk(c); err != nil { if err := c.manager.DetachDisk(c); err != nil {
return err return err

View File

@ -90,7 +90,7 @@ type fakePDManager struct {
// TODO(jonesdl) To fully test this, we could create a loopback device // TODO(jonesdl) To fully test this, we could create a loopback device
// and mount that instead. // and mount that instead.
func (fake *fakePDManager) AttachAndMountDisk(b *gcePersistentDiskBuilder, globalPDPath string) error { func (fake *fakePDManager) AttachAndMountDisk(b *gcePersistentDiskMounter, globalPDPath string) error {
globalPath := makeGlobalPDName(b.plugin.host, b.pdName) globalPath := makeGlobalPDName(b.plugin.host, b.pdName)
err := os.MkdirAll(globalPath, 0750) err := os.MkdirAll(globalPath, 0750)
if err != nil { if err != nil {
@ -103,7 +103,7 @@ func (fake *fakePDManager) AttachAndMountDisk(b *gcePersistentDiskBuilder, globa
return nil return nil
} }
func (fake *fakePDManager) DetachDisk(c *gcePersistentDiskCleaner) error { func (fake *fakePDManager) DetachDisk(c *gcePersistentDiskUnmounter) error {
globalPath := makeGlobalPDName(c.plugin.host, c.pdName) globalPath := makeGlobalPDName(c.plugin.host, c.pdName)
err := os.RemoveAll(globalPath) err := os.RemoveAll(globalPath)
if err != nil { if err != nil {
@ -150,21 +150,21 @@ func TestPlugin(t *testing.T) {
} }
fakeManager := &fakePDManager{} fakeManager := &fakePDManager{}
fakeMounter := &mount.FakeMounter{} fakeMounter := &mount.FakeMounter{}
builder, err := plug.(*gcePersistentDiskPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter) mounter, err := plug.(*gcePersistentDiskPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~gce-pd/vol1") volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~gce-pd/vol1")
path := builder.GetPath() path := mounter.GetPath()
if path != volPath { if path != volPath {
t.Errorf("Got unexpected path: %s", path) t.Errorf("Got unexpected path: %s", path)
} }
if err := builder.SetUp(nil); err != nil { if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(path); err != nil { if _, err := os.Stat(path); err != nil {
@ -186,15 +186,15 @@ func TestPlugin(t *testing.T) {
} }
fakeManager = &fakePDManager{} fakeManager = &fakePDManager{}
cleaner, err := plug.(*gcePersistentDiskPlugin).newCleanerInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter) unmounter, err := plug.(*gcePersistentDiskPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
if cleaner == nil { if unmounter == nil {
t.Errorf("Got a nil Cleaner") t.Errorf("Got a nil Unmounter")
} }
if err := cleaner.TearDown(); err != nil { if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(path); err == nil { if _, err := os.Stat(path); err == nil {
@ -291,12 +291,12 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil)) plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(gcePersistentDiskPluginName) plug, _ := plugMgr.FindPluginByName(gcePersistentDiskPluginName)
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true) spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !builder.GetAttributes().ReadOnly { if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for builder.IsReadOnly") t.Errorf("Expected true for mounter.IsReadOnly")
} }
} }

View File

@ -54,7 +54,7 @@ type GCEDiskUtil struct{}
// Attaches a disk specified by a volume.GCEPersistentDisk to the current kubelet. // Attaches a disk specified by a volume.GCEPersistentDisk to the current kubelet.
// Mounts the disk to it's global path. // Mounts the disk to it's global path.
func (diskUtil *GCEDiskUtil) AttachAndMountDisk(b *gcePersistentDiskBuilder, globalPDPath string) error { func (diskUtil *GCEDiskUtil) AttachAndMountDisk(b *gcePersistentDiskMounter, globalPDPath string) error {
glog.V(5).Infof("AttachAndMountDisk(...) called for PD %q. Will block for existing operations, if any. (globalPDPath=%q)\r\n", b.pdName, globalPDPath) glog.V(5).Infof("AttachAndMountDisk(...) called for PD %q. Will block for existing operations, if any. (globalPDPath=%q)\r\n", b.pdName, globalPDPath)
// Block execution until any pending detach operations for this PD have completed // Block execution until any pending detach operations for this PD have completed
@ -101,7 +101,7 @@ func (diskUtil *GCEDiskUtil) AttachAndMountDisk(b *gcePersistentDiskBuilder, glo
} }
// Unmounts the device and detaches the disk from the kubelet's host machine. // Unmounts the device and detaches the disk from the kubelet's host machine.
func (util *GCEDiskUtil) DetachDisk(c *gcePersistentDiskCleaner) error { func (util *GCEDiskUtil) DetachDisk(c *gcePersistentDiskUnmounter) error {
glog.V(5).Infof("DetachDisk(...) for PD %q\r\n", c.pdName) glog.V(5).Infof("DetachDisk(...) for PD %q\r\n", c.pdName)
if err := unmountPDAndRemoveGlobalPath(c); err != nil { if err := unmountPDAndRemoveGlobalPath(c); err != nil {
@ -165,7 +165,7 @@ func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (strin
} }
// Attaches the specified persistent disk device to node, verifies that it is attached, and retries if it fails. // Attaches the specified persistent disk device to node, verifies that it is attached, and retries if it fails.
func attachDiskAndVerify(b *gcePersistentDiskBuilder, sdBeforeSet sets.String) (string, error) { func attachDiskAndVerify(b *gcePersistentDiskMounter, sdBeforeSet sets.String) (string, error) {
devicePaths := getDiskByIdPaths(b.gcePersistentDisk) devicePaths := getDiskByIdPaths(b.gcePersistentDisk)
var gceCloud *gcecloud.GCECloud var gceCloud *gcecloud.GCECloud
for numRetries := 0; numRetries < maxRetries; numRetries++ { for numRetries := 0; numRetries < maxRetries; numRetries++ {
@ -230,7 +230,7 @@ func verifyDevicePath(devicePaths []string, sdBeforeSet sets.String) (string, er
// Detaches the specified persistent disk device from node, verifies that it is detached, and retries if it fails. // Detaches the specified persistent disk device from node, verifies that it is detached, and retries if it fails.
// This function is intended to be called asynchronously as a go routine. // This function is intended to be called asynchronously as a go routine.
func detachDiskAndVerify(c *gcePersistentDiskCleaner) { func detachDiskAndVerify(c *gcePersistentDiskUnmounter) {
glog.V(5).Infof("detachDiskAndVerify(...) for pd %q. Will block for pending operations", c.pdName) glog.V(5).Infof("detachDiskAndVerify(...) for pd %q. Will block for pending operations", c.pdName)
defer runtime.HandleCrash() defer runtime.HandleCrash()
@ -287,7 +287,7 @@ func detachDiskAndVerify(c *gcePersistentDiskCleaner) {
} }
// Unmount the global PD mount, which should be the only one, and delete it. // Unmount the global PD mount, which should be the only one, and delete it.
func unmountPDAndRemoveGlobalPath(c *gcePersistentDiskCleaner) error { func unmountPDAndRemoveGlobalPath(c *gcePersistentDiskUnmounter) error {
globalPDPath := makeGlobalPDName(c.plugin.host, c.pdName) globalPDPath := makeGlobalPDName(c.plugin.host, c.pdName)
err := c.mounter.Unmount(globalPDPath) err := c.mounter.Unmount(globalPDPath)

View File

@ -62,8 +62,8 @@ func (plugin *gitRepoPlugin) CanSupport(spec *volume.Spec) bool {
return spec.Volume != nil && spec.Volume.GitRepo != nil return spec.Volume != nil && spec.Volume.GitRepo != nil
} }
func (plugin *gitRepoPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) { func (plugin *gitRepoPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return &gitRepoVolumeBuilder{ return &gitRepoVolumeMounter{
gitRepoVolume: &gitRepoVolume{ gitRepoVolume: &gitRepoVolume{
volName: spec.Name(), volName: spec.Name(),
podUID: pod.UID, podUID: pod.UID,
@ -78,8 +78,8 @@ func (plugin *gitRepoPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts vo
}, nil }, nil
} }
func (plugin *gitRepoPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) { func (plugin *gitRepoPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return &gitRepoVolumeCleaner{ return &gitRepoVolumeUnmounter{
&gitRepoVolume{ &gitRepoVolume{
volName: volName, volName: volName,
podUID: podUID, podUID: podUID,
@ -104,8 +104,8 @@ func (gr *gitRepoVolume) GetPath() string {
return gr.plugin.host.GetPodVolumeDir(gr.podUID, utilstrings.EscapeQualifiedNameForDisk(name), gr.volName) return gr.plugin.host.GetPodVolumeDir(gr.podUID, utilstrings.EscapeQualifiedNameForDisk(name), gr.volName)
} }
// gitRepoVolumeBuilder builds git repo volumes. // gitRepoVolumeMounter builds git repo volumes.
type gitRepoVolumeBuilder struct { type gitRepoVolumeMounter struct {
*gitRepoVolume *gitRepoVolume
pod api.Pod pod api.Pod
@ -116,9 +116,9 @@ type gitRepoVolumeBuilder struct {
opts volume.VolumeOptions opts volume.VolumeOptions
} }
var _ volume.Builder = &gitRepoVolumeBuilder{} var _ volume.Mounter = &gitRepoVolumeMounter{}
func (b *gitRepoVolumeBuilder) GetAttributes() volume.Attributes { func (b *gitRepoVolumeMounter) GetAttributes() volume.Attributes {
return volume.Attributes{ return volume.Attributes{
ReadOnly: false, ReadOnly: false,
Managed: true, Managed: true,
@ -127,18 +127,18 @@ func (b *gitRepoVolumeBuilder) GetAttributes() volume.Attributes {
} }
// SetUp creates new directory and clones a git repo. // SetUp creates new directory and clones a git repo.
func (b *gitRepoVolumeBuilder) SetUp(fsGroup *int64) error { func (b *gitRepoVolumeMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup) return b.SetUpAt(b.GetPath(), fsGroup)
} }
// SetUpAt creates new directory and clones a git repo. // SetUpAt creates new directory and clones a git repo.
func (b *gitRepoVolumeBuilder) SetUpAt(dir string, fsGroup *int64) error { func (b *gitRepoVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
if volumeutil.IsReady(b.getMetaDir()) { if volumeutil.IsReady(b.getMetaDir()) {
return nil return nil
} }
// Wrap EmptyDir, let it do the setup. // Wrap EmptyDir, let it do the setup.
wrapped, err := b.plugin.host.NewWrapperBuilder(b.volName, wrappedVolumeSpec, &b.pod, b.opts) wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec, &b.pod, b.opts)
if err != nil { if err != nil {
return err return err
} }
@ -192,33 +192,33 @@ func (b *gitRepoVolumeBuilder) SetUpAt(dir string, fsGroup *int64) error {
return nil return nil
} }
func (b *gitRepoVolumeBuilder) getMetaDir() string { func (b *gitRepoVolumeMounter) getMetaDir() string {
return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedNameForDisk(gitRepoPluginName)), b.volName) return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedNameForDisk(gitRepoPluginName)), b.volName)
} }
func (b *gitRepoVolumeBuilder) execCommand(command string, args []string, dir string) ([]byte, error) { func (b *gitRepoVolumeMounter) execCommand(command string, args []string, dir string) ([]byte, error) {
cmd := b.exec.Command(command, args...) cmd := b.exec.Command(command, args...)
cmd.SetDir(dir) cmd.SetDir(dir)
return cmd.CombinedOutput() return cmd.CombinedOutput()
} }
// gitRepoVolumeCleaner cleans git repo volumes. // gitRepoVolumeUnmounter cleans git repo volumes.
type gitRepoVolumeCleaner struct { type gitRepoVolumeUnmounter struct {
*gitRepoVolume *gitRepoVolume
} }
var _ volume.Cleaner = &gitRepoVolumeCleaner{} var _ volume.Unmounter = &gitRepoVolumeUnmounter{}
// TearDown simply deletes everything in the directory. // TearDown simply deletes everything in the directory.
func (c *gitRepoVolumeCleaner) TearDown() error { func (c *gitRepoVolumeUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath()) return c.TearDownAt(c.GetPath())
} }
// TearDownAt simply deletes everything in the directory. // TearDownAt simply deletes everything in the directory.
func (c *gitRepoVolumeCleaner) TearDownAt(dir string) error { func (c *gitRepoVolumeUnmounter) TearDownAt(dir string) error {
// Wrap EmptyDir, let it do the teardown. // Wrap EmptyDir, let it do the teardown.
wrapped, err := c.plugin.host.NewWrapperCleaner(c.volName, wrappedVolumeSpec, c.podUID) wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec, c.podUID)
if err != nil { if err != nil {
return err return err
} }

View File

@ -230,20 +230,20 @@ func doTestPlugin(scenario struct {
return allErrs return allErrs
} }
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, err := plug.NewBuilder(volume.NewSpecFromVolume(scenario.vol), pod, volume.VolumeOptions{RootContext: ""}) mounter, err := plug.NewMounter(volume.NewSpecFromVolume(scenario.vol), pod, volume.VolumeOptions{RootContext: ""})
if err != nil { if err != nil {
allErrs = append(allErrs, allErrs = append(allErrs,
fmt.Errorf("Failed to make a new Builder: %v", err)) fmt.Errorf("Failed to make a new Mounter: %v", err))
return allErrs return allErrs
} }
if builder == nil { if mounter == nil {
allErrs = append(allErrs, allErrs = append(allErrs,
fmt.Errorf("Got a nil Builder")) fmt.Errorf("Got a nil Mounter"))
return allErrs return allErrs
} }
path := builder.GetPath() path := mounter.GetPath()
suffix := fmt.Sprintf("pods/poduid/volumes/kubernetes.io~git-repo/%v", scenario.vol.Name) suffix := fmt.Sprintf("pods/poduid/volumes/kubernetes.io~git-repo/%v", scenario.vol.Name)
if !strings.HasSuffix(path, suffix) { if !strings.HasSuffix(path, suffix) {
allErrs = append(allErrs, allErrs = append(allErrs,
@ -252,7 +252,7 @@ func doTestPlugin(scenario struct {
} }
// Test setUp() // Test setUp()
setUpErrs := doTestSetUp(scenario, builder) setUpErrs := doTestSetUp(scenario, mounter)
allErrs = append(allErrs, setUpErrs...) allErrs = append(allErrs, setUpErrs...)
if _, err := os.Stat(path); err != nil { if _, err := os.Stat(path); err != nil {
@ -280,19 +280,19 @@ func doTestPlugin(scenario struct {
} }
} }
cleaner, err := plug.NewCleaner("vol1", types.UID("poduid")) unmounter, err := plug.NewUnmounter("vol1", types.UID("poduid"))
if err != nil { if err != nil {
allErrs = append(allErrs, allErrs = append(allErrs,
fmt.Errorf("Failed to make a new Cleaner: %v", err)) fmt.Errorf("Failed to make a new Unmounter: %v", err))
return allErrs return allErrs
} }
if cleaner == nil { if unmounter == nil {
allErrs = append(allErrs, allErrs = append(allErrs,
fmt.Errorf("Got a nil Cleaner")) fmt.Errorf("Got a nil Unmounter"))
return allErrs return allErrs
} }
if err := cleaner.TearDown(); err != nil { if err := unmounter.TearDown(); err != nil {
allErrs = append(allErrs, allErrs = append(allErrs,
fmt.Errorf("Expected success, got: %v", err)) fmt.Errorf("Expected success, got: %v", err))
return allErrs return allErrs
@ -312,7 +312,7 @@ func doTestSetUp(scenario struct {
vol *api.Volume vol *api.Volume
expecteds []expectedCommand expecteds []expectedCommand
isExpectedFailure bool isExpectedFailure bool
}, builder volume.Builder) []error { }, mounter volume.Mounter) []error {
expecteds := scenario.expecteds expecteds := scenario.expecteds
allErrs := []error{} allErrs := []error{}
@ -349,7 +349,7 @@ func doTestSetUp(scenario struct {
CommandScript: fakeAction, CommandScript: fakeAction,
} }
g := builder.(*gitRepoVolumeBuilder) g := mounter.(*gitRepoVolumeMounter)
g.exec = &fake g.exec = &fake
g.SetUp(nil) g.SetUp(nil)

View File

@ -74,7 +74,7 @@ func (plugin *glusterfsPlugin) GetAccessModes() []api.PersistentVolumeAccessMode
} }
} }
func (plugin *glusterfsPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Builder, error) { func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
source, _ := plugin.getGlusterVolumeSource(spec) source, _ := plugin.getGlusterVolumeSource(spec)
ep_name := source.EndpointsName ep_name := source.EndpointsName
ns := pod.Namespace ns := pod.Namespace
@ -84,7 +84,7 @@ func (plugin *glusterfsPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ vol
return nil, err return nil, err
} }
glog.V(1).Infof("glusterfs: endpoints %v", ep) glog.V(1).Infof("glusterfs: endpoints %v", ep)
return plugin.newBuilderInternal(spec, ep, pod, plugin.host.GetMounter(), exec.New()) return plugin.newMounterInternal(spec, ep, pod, plugin.host.GetMounter(), exec.New())
} }
func (plugin *glusterfsPlugin) getGlusterVolumeSource(spec *volume.Spec) (*api.GlusterfsVolumeSource, bool) { func (plugin *glusterfsPlugin) getGlusterVolumeSource(spec *volume.Spec) (*api.GlusterfsVolumeSource, bool) {
@ -97,9 +97,9 @@ func (plugin *glusterfsPlugin) getGlusterVolumeSource(spec *volume.Spec) (*api.G
} }
} }
func (plugin *glusterfsPlugin) newBuilderInternal(spec *volume.Spec, ep *api.Endpoints, pod *api.Pod, mounter mount.Interface, exe exec.Interface) (volume.Builder, error) { func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *api.Endpoints, pod *api.Pod, mounter mount.Interface, exe exec.Interface) (volume.Mounter, error) {
source, readOnly := plugin.getGlusterVolumeSource(spec) source, readOnly := plugin.getGlusterVolumeSource(spec)
return &glusterfsBuilder{ return &glusterfsMounter{
glusterfs: &glusterfs{ glusterfs: &glusterfs{
volName: spec.Name(), volName: spec.Name(),
mounter: mounter, mounter: mounter,
@ -112,12 +112,12 @@ func (plugin *glusterfsPlugin) newBuilderInternal(spec *volume.Spec, ep *api.End
exe: exe}, nil exe: exe}, nil
} }
func (plugin *glusterfsPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) { func (plugin *glusterfsPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newCleanerInternal(volName, podUID, plugin.host.GetMounter()) return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter())
} }
func (plugin *glusterfsPlugin) newCleanerInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) { func (plugin *glusterfsPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {
return &glusterfsCleaner{&glusterfs{ return &glusterfsUnmounter{&glusterfs{
volName: volName, volName: volName,
mounter: mounter, mounter: mounter,
pod: &api.Pod{ObjectMeta: api.ObjectMeta{UID: podUID}}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{UID: podUID}},
@ -139,7 +139,7 @@ type glusterfs struct {
volume.MetricsNil volume.MetricsNil
} }
type glusterfsBuilder struct { type glusterfsMounter struct {
*glusterfs *glusterfs
hosts *api.Endpoints hosts *api.Endpoints
path string path string
@ -147,9 +147,9 @@ type glusterfsBuilder struct {
exe exec.Interface exe exec.Interface
} }
var _ volume.Builder = &glusterfsBuilder{} var _ volume.Mounter = &glusterfsMounter{}
func (b *glusterfsBuilder) GetAttributes() volume.Attributes { func (b *glusterfsMounter) GetAttributes() volume.Attributes {
return volume.Attributes{ return volume.Attributes{
ReadOnly: b.readOnly, ReadOnly: b.readOnly,
Managed: false, Managed: false,
@ -158,11 +158,11 @@ func (b *glusterfsBuilder) GetAttributes() volume.Attributes {
} }
// SetUp attaches the disk and bind mounts to the volume path. // SetUp attaches the disk and bind mounts to the volume path.
func (b *glusterfsBuilder) SetUp(fsGroup *int64) error { func (b *glusterfsMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup) return b.SetUpAt(b.GetPath(), fsGroup)
} }
func (b *glusterfsBuilder) SetUpAt(dir string, fsGroup *int64) error { func (b *glusterfsMounter) SetUpAt(dir string, fsGroup *int64) error {
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("glusterfs: mount set up: %s %v %v", dir, !notMnt, err) glog.V(4).Infof("glusterfs: mount set up: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
@ -179,7 +179,7 @@ func (b *glusterfsBuilder) SetUpAt(dir string, fsGroup *int64) error {
} }
// Cleanup upon failure. // Cleanup upon failure.
c := &glusterfsCleaner{b.glusterfs} c := &glusterfsUnmounter{b.glusterfs}
c.cleanup(dir) c.cleanup(dir)
return err return err
} }
@ -189,21 +189,21 @@ func (glusterfsVolume *glusterfs) GetPath() string {
return glusterfsVolume.plugin.host.GetPodVolumeDir(glusterfsVolume.pod.UID, strings.EscapeQualifiedNameForDisk(name), glusterfsVolume.volName) return glusterfsVolume.plugin.host.GetPodVolumeDir(glusterfsVolume.pod.UID, strings.EscapeQualifiedNameForDisk(name), glusterfsVolume.volName)
} }
type glusterfsCleaner struct { type glusterfsUnmounter struct {
*glusterfs *glusterfs
} }
var _ volume.Cleaner = &glusterfsCleaner{} var _ volume.Unmounter = &glusterfsUnmounter{}
func (c *glusterfsCleaner) TearDown() error { func (c *glusterfsUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath()) return c.TearDownAt(c.GetPath())
} }
func (c *glusterfsCleaner) TearDownAt(dir string) error { func (c *glusterfsUnmounter) TearDownAt(dir string) error {
return c.cleanup(dir) return c.cleanup(dir)
} }
func (c *glusterfsCleaner) cleanup(dir string) error { func (c *glusterfsUnmounter) cleanup(dir string) error {
notMnt, err := c.mounter.IsLikelyNotMountPoint(dir) notMnt, err := c.mounter.IsLikelyNotMountPoint(dir)
if err != nil { if err != nil {
return fmt.Errorf("glusterfs: Error checking IsLikelyNotMountPoint: %v", err) return fmt.Errorf("glusterfs: Error checking IsLikelyNotMountPoint: %v", err)
@ -228,7 +228,7 @@ func (c *glusterfsCleaner) cleanup(dir string) error {
return nil return nil
} }
func (b *glusterfsBuilder) setUpAtInternal(dir string) error { func (b *glusterfsMounter) setUpAtInternal(dir string) error {
var errs error var errs error
options := []string{} options := []string{}

View File

@ -113,20 +113,20 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
}, },
} }
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, err := plug.(*glusterfsPlugin).newBuilderInternal(spec, ep, pod, &mount.FakeMounter{}, &fake) mounter, err := plug.(*glusterfsPlugin).newMounterInternal(spec, ep, pod, &mount.FakeMounter{}, &fake)
volumePath := builder.GetPath() volumePath := mounter.GetPath()
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Error("Got a nil Builder") t.Error("Got a nil Mounter")
} }
path := builder.GetPath() path := mounter.GetPath()
expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~glusterfs/vol1", tmpDir) expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~glusterfs/vol1", tmpDir)
if path != expectedPath { if path != expectedPath {
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path) t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path)
} }
if err := builder.SetUp(nil); err != nil { if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(volumePath); err != nil { if _, err := os.Stat(volumePath); err != nil {
@ -136,14 +136,14 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
t.Errorf("SetUp() failed: %v", err) t.Errorf("SetUp() failed: %v", err)
} }
} }
cleaner, err := plug.(*glusterfsPlugin).newCleanerInternal("vol1", types.UID("poduid"), &mount.FakeMounter{}) unmounter, err := plug.(*glusterfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
if cleaner == nil { if unmounter == nil {
t.Error("Got a nil Cleaner") t.Error("Got a nil Unmounter")
} }
if err := cleaner.TearDown(); err != nil { if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(volumePath); err == nil { if _, err := os.Stat(volumePath); err == nil {
@ -226,12 +226,12 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil)) plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(glusterfsPluginName) plug, _ := plugMgr.FindPluginByName(glusterfsPluginName)
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true) spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !builder.GetAttributes().ReadOnly { if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for builder.IsReadOnly") t.Errorf("Expected true for mounter.IsReadOnly")
} }
} }

View File

@ -93,24 +93,24 @@ func (plugin *hostPathPlugin) GetAccessModes() []api.PersistentVolumeAccessMode
} }
} }
func (plugin *hostPathPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Builder, error) { func (plugin *hostPathPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
if spec.Volume != nil && spec.Volume.HostPath != nil { if spec.Volume != nil && spec.Volume.HostPath != nil {
path := spec.Volume.HostPath.Path path := spec.Volume.HostPath.Path
return &hostPathBuilder{ return &hostPathMounter{
hostPath: &hostPath{path: path}, hostPath: &hostPath{path: path},
readOnly: false, readOnly: false,
}, nil }, nil
} else { } else {
path := spec.PersistentVolume.Spec.HostPath.Path path := spec.PersistentVolume.Spec.HostPath.Path
return &hostPathBuilder{ return &hostPathMounter{
hostPath: &hostPath{path: path}, hostPath: &hostPath{path: path},
readOnly: spec.ReadOnly, readOnly: spec.ReadOnly,
}, nil }, nil
} }
} }
func (plugin *hostPathPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) { func (plugin *hostPathPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return &hostPathCleaner{&hostPath{ return &hostPathUnmounter{&hostPath{
path: "", path: "",
}}, nil }}, nil
} }
@ -167,14 +167,14 @@ func (hp *hostPath) GetPath() string {
return hp.path return hp.path
} }
type hostPathBuilder struct { type hostPathMounter struct {
*hostPath *hostPath
readOnly bool readOnly bool
} }
var _ volume.Builder = &hostPathBuilder{} var _ volume.Mounter = &hostPathMounter{}
func (b *hostPathBuilder) GetAttributes() volume.Attributes { func (b *hostPathMounter) GetAttributes() volume.Attributes {
return volume.Attributes{ return volume.Attributes{
ReadOnly: b.readOnly, ReadOnly: b.readOnly,
Managed: false, Managed: false,
@ -183,32 +183,32 @@ func (b *hostPathBuilder) GetAttributes() volume.Attributes {
} }
// SetUp does nothing. // SetUp does nothing.
func (b *hostPathBuilder) SetUp(fsGroup *int64) error { func (b *hostPathMounter) SetUp(fsGroup *int64) error {
return nil return nil
} }
// SetUpAt does not make sense for host paths - probably programmer error. // SetUpAt does not make sense for host paths - probably programmer error.
func (b *hostPathBuilder) SetUpAt(dir string, fsGroup *int64) error { func (b *hostPathMounter) SetUpAt(dir string, fsGroup *int64) error {
return fmt.Errorf("SetUpAt() does not make sense for host paths") return fmt.Errorf("SetUpAt() does not make sense for host paths")
} }
func (b *hostPathBuilder) GetPath() string { func (b *hostPathMounter) GetPath() string {
return b.path return b.path
} }
type hostPathCleaner struct { type hostPathUnmounter struct {
*hostPath *hostPath
} }
var _ volume.Cleaner = &hostPathCleaner{} var _ volume.Unmounter = &hostPathUnmounter{}
// TearDown does nothing. // TearDown does nothing.
func (c *hostPathCleaner) TearDown() error { func (c *hostPathUnmounter) TearDown() error {
return nil return nil
} }
// TearDownAt does not make sense for host paths - probably programmer error. // TearDownAt does not make sense for host paths - probably programmer error.
func (c *hostPathCleaner) TearDownAt(dir string) error { func (c *hostPathUnmounter) TearDownAt(dir string) error {
return fmt.Errorf("TearDownAt() does not make sense for host paths") return fmt.Errorf("TearDownAt() does not make sense for host paths")
} }

View File

@ -198,32 +198,32 @@ func TestPlugin(t *testing.T) {
VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/vol1"}}, VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: "/vol1"}},
} }
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, err := plug.NewBuilder(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{}) mounter, err := plug.NewMounter(volume.NewSpecFromVolume(spec), pod, volume.VolumeOptions{})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
path := builder.GetPath() path := mounter.GetPath()
if path != "/vol1" { if path != "/vol1" {
t.Errorf("Got unexpected path: %s", path) t.Errorf("Got unexpected path: %s", path)
} }
if err := builder.SetUp(nil); err != nil { if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
cleaner, err := plug.NewCleaner("vol1", types.UID("poduid")) unmounter, err := plug.NewUnmounter("vol1", types.UID("poduid"))
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
if cleaner == nil { if unmounter == nil {
t.Errorf("Got a nil Cleaner") t.Errorf("Got a nil Unmounter")
} }
if err := cleaner.TearDown(); err != nil { if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
} }
@ -262,12 +262,12 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", client, nil)) plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost("/tmp/fake", client, nil))
plug, _ := plugMgr.FindPluginByName(hostPathPluginName) plug, _ := plugMgr.FindPluginByName(hostPathPluginName)
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true) spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !builder.GetAttributes().ReadOnly { if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for builder.IsReadOnly") t.Errorf("Expected true for mounter.IsReadOnly")
} }
} }

View File

@ -28,13 +28,13 @@ import (
type diskManager interface { type diskManager interface {
MakeGlobalPDName(disk iscsiDisk) string MakeGlobalPDName(disk iscsiDisk) string
// Attaches the disk to the kubelet's host machine. // Attaches the disk to the kubelet's host machine.
AttachDisk(b iscsiDiskBuilder) error AttachDisk(b iscsiDiskMounter) error
// Detaches the disk from the kubelet's host machine. // Detaches the disk from the kubelet's host machine.
DetachDisk(disk iscsiDiskCleaner, mntPath string) error DetachDisk(disk iscsiDiskUnmounter, mntPath string) error
} }
// utility to mount a disk based filesystem // utility to mount a disk based filesystem
func diskSetUp(manager diskManager, b iscsiDiskBuilder, volPath string, mounter mount.Interface, fsGroup *int64) error { func diskSetUp(manager diskManager, b iscsiDiskMounter, volPath string, mounter mount.Interface, fsGroup *int64) error {
globalPDPath := manager.MakeGlobalPDName(*b.iscsiDisk) globalPDPath := manager.MakeGlobalPDName(*b.iscsiDisk)
// TODO: handle failed mounts here. // TODO: handle failed mounts here.
notMnt, err := mounter.IsLikelyNotMountPoint(volPath) notMnt, err := mounter.IsLikelyNotMountPoint(volPath)
@ -74,7 +74,7 @@ func diskSetUp(manager diskManager, b iscsiDiskBuilder, volPath string, mounter
} }
// utility to tear down a disk based filesystem // utility to tear down a disk based filesystem
func diskTearDown(manager diskManager, c iscsiDiskCleaner, volPath string, mounter mount.Interface) error { func diskTearDown(manager diskManager, c iscsiDiskUnmounter, volPath string, mounter mount.Interface) error {
notMnt, err := mounter.IsLikelyNotMountPoint(volPath) notMnt, err := mounter.IsLikelyNotMountPoint(volPath)
if err != nil { if err != nil {
glog.Errorf("cannot validate mountpoint %s", volPath) glog.Errorf("cannot validate mountpoint %s", volPath)

View File

@ -70,12 +70,12 @@ func (plugin *iscsiPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
} }
} }
func (plugin *iscsiPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Builder, error) { func (plugin *iscsiPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function. // Inject real implementations here, test through the internal function.
return plugin.newBuilderInternal(spec, pod.UID, &ISCSIUtil{}, plugin.host.GetMounter()) return plugin.newMounterInternal(spec, pod.UID, &ISCSIUtil{}, plugin.host.GetMounter())
} }
func (plugin *iscsiPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Builder, error) { func (plugin *iscsiPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Mounter, error) {
// iscsi volumes used directly in a pod have a ReadOnly flag set by the pod author. // iscsi volumes used directly in a pod have a ReadOnly flag set by the pod author.
// iscsi volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV // iscsi volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
var readOnly bool var readOnly bool
@ -89,11 +89,11 @@ func (plugin *iscsiPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UI
} }
lun := strconv.Itoa(iscsi.Lun) lun := strconv.Itoa(iscsi.Lun)
portal := portalBuilder(iscsi.TargetPortal) portal := portalMounter(iscsi.TargetPortal)
iface := iscsi.ISCSIInterface iface := iscsi.ISCSIInterface
return &iscsiDiskBuilder{ return &iscsiDiskMounter{
iscsiDisk: &iscsiDisk{ iscsiDisk: &iscsiDisk{
podUID: podUID, podUID: podUID,
volName: spec.Name(), volName: spec.Name(),
@ -109,13 +109,13 @@ func (plugin *iscsiPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UI
}, nil }, nil
} }
func (plugin *iscsiPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) { func (plugin *iscsiPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function. // Inject real implementations here, test through the internal function.
return plugin.newCleanerInternal(volName, podUID, &ISCSIUtil{}, plugin.host.GetMounter()) return plugin.newUnmounterInternal(volName, podUID, &ISCSIUtil{}, plugin.host.GetMounter())
} }
func (plugin *iscsiPlugin) newCleanerInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Cleaner, error) { func (plugin *iscsiPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Unmounter, error) {
return &iscsiDiskCleaner{ return &iscsiDiskUnmounter{
iscsiDisk: &iscsiDisk{ iscsiDisk: &iscsiDisk{
podUID: podUID, podUID: podUID,
volName: volName, volName: volName,
@ -150,16 +150,16 @@ func (iscsi *iscsiDisk) GetPath() string {
return iscsi.plugin.host.GetPodVolumeDir(iscsi.podUID, utilstrings.EscapeQualifiedNameForDisk(name), iscsi.volName) return iscsi.plugin.host.GetPodVolumeDir(iscsi.podUID, utilstrings.EscapeQualifiedNameForDisk(name), iscsi.volName)
} }
type iscsiDiskBuilder struct { type iscsiDiskMounter struct {
*iscsiDisk *iscsiDisk
readOnly bool readOnly bool
fsType string fsType string
mounter *mount.SafeFormatAndMount mounter *mount.SafeFormatAndMount
} }
var _ volume.Builder = &iscsiDiskBuilder{} var _ volume.Mounter = &iscsiDiskMounter{}
func (b *iscsiDiskBuilder) GetAttributes() volume.Attributes { func (b *iscsiDiskMounter) GetAttributes() volume.Attributes {
return volume.Attributes{ return volume.Attributes{
ReadOnly: b.readOnly, ReadOnly: b.readOnly,
Managed: !b.readOnly, Managed: !b.readOnly,
@ -167,11 +167,11 @@ func (b *iscsiDiskBuilder) GetAttributes() volume.Attributes {
} }
} }
func (b *iscsiDiskBuilder) SetUp(fsGroup *int64) error { func (b *iscsiDiskMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup) return b.SetUpAt(b.GetPath(), fsGroup)
} }
func (b *iscsiDiskBuilder) SetUpAt(dir string, fsGroup *int64) error { func (b *iscsiDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
// diskSetUp checks mountpoints and prevent repeated calls // diskSetUp checks mountpoints and prevent repeated calls
err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup) err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup)
if err != nil { if err != nil {
@ -180,24 +180,24 @@ func (b *iscsiDiskBuilder) SetUpAt(dir string, fsGroup *int64) error {
return err return err
} }
type iscsiDiskCleaner struct { type iscsiDiskUnmounter struct {
*iscsiDisk *iscsiDisk
mounter mount.Interface mounter mount.Interface
} }
var _ volume.Cleaner = &iscsiDiskCleaner{} var _ volume.Unmounter = &iscsiDiskUnmounter{}
// Unmounts the bind mount, and detaches the disk only if the disk // Unmounts the bind mount, and detaches the disk only if the disk
// resource was the last reference to that disk on the kubelet. // resource was the last reference to that disk on the kubelet.
func (c *iscsiDiskCleaner) TearDown() error { func (c *iscsiDiskUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath()) return c.TearDownAt(c.GetPath())
} }
func (c *iscsiDiskCleaner) TearDownAt(dir string) error { func (c *iscsiDiskUnmounter) TearDownAt(dir string) error {
return diskTearDown(c.manager, *c, dir, c.mounter) return diskTearDown(c.manager, *c, dir, c.mounter)
} }
func portalBuilder(portal string) string { func portalMounter(portal string) string {
if !strings.Contains(portal, ":") { if !strings.Contains(portal, ":") {
portal = portal + ":3260" portal = portal + ":3260"
} }

View File

@ -99,7 +99,7 @@ func (fake *fakeDiskManager) Cleanup() {
func (fake *fakeDiskManager) MakeGlobalPDName(disk iscsiDisk) string { func (fake *fakeDiskManager) MakeGlobalPDName(disk iscsiDisk) string {
return fake.tmpDir return fake.tmpDir
} }
func (fake *fakeDiskManager) AttachDisk(b iscsiDiskBuilder) error { func (fake *fakeDiskManager) AttachDisk(b iscsiDiskMounter) error {
globalPath := b.manager.MakeGlobalPDName(*b.iscsiDisk) globalPath := b.manager.MakeGlobalPDName(*b.iscsiDisk)
err := os.MkdirAll(globalPath, 0750) err := os.MkdirAll(globalPath, 0750)
if err != nil { if err != nil {
@ -113,7 +113,7 @@ func (fake *fakeDiskManager) AttachDisk(b iscsiDiskBuilder) error {
return nil return nil
} }
func (fake *fakeDiskManager) DetachDisk(c iscsiDiskCleaner, mntPath string) error { func (fake *fakeDiskManager) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
globalPath := c.manager.MakeGlobalPDName(*c.iscsiDisk) globalPath := c.manager.MakeGlobalPDName(*c.iscsiDisk)
err := os.RemoveAll(globalPath) err := os.RemoveAll(globalPath)
if err != nil { if err != nil {
@ -140,21 +140,21 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
fakeManager := NewFakeDiskManager() fakeManager := NewFakeDiskManager()
defer fakeManager.Cleanup() defer fakeManager.Cleanup()
fakeMounter := &mount.FakeMounter{} fakeMounter := &mount.FakeMounter{}
builder, err := plug.(*iscsiPlugin).newBuilderInternal(spec, types.UID("poduid"), fakeManager, fakeMounter) mounter, err := plug.(*iscsiPlugin).newMounterInternal(spec, types.UID("poduid"), fakeManager, fakeMounter)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Error("Got a nil Builder") t.Error("Got a nil Mounter")
} }
path := builder.GetPath() path := mounter.GetPath()
expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~iscsi/vol1", tmpDir) expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~iscsi/vol1", tmpDir)
if path != expectedPath { if path != expectedPath {
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path) t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path)
} }
if err := builder.SetUp(nil); err != nil { if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(path); err != nil { if _, err := os.Stat(path); err != nil {
@ -177,15 +177,15 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
fakeManager2 := NewFakeDiskManager() fakeManager2 := NewFakeDiskManager()
defer fakeManager2.Cleanup() defer fakeManager2.Cleanup()
cleaner, err := plug.(*iscsiPlugin).newCleanerInternal("vol1", types.UID("poduid"), fakeManager2, fakeMounter) unmounter, err := plug.(*iscsiPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager2, fakeMounter)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
if cleaner == nil { if unmounter == nil {
t.Error("Got a nil Cleaner") t.Error("Got a nil Unmounter")
} }
if err := cleaner.TearDown(); err != nil { if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(path); err == nil { if _, err := os.Stat(path); err == nil {
@ -277,21 +277,21 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil)) plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(iscsiPluginName) plug, _ := plugMgr.FindPluginByName(iscsiPluginName)
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true) spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !builder.GetAttributes().ReadOnly { if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for builder.IsReadOnly") t.Errorf("Expected true for mounter.IsReadOnly")
} }
} }
func TestPortalBuilder(t *testing.T) { func TestPortalMounter(t *testing.T) {
if portal := portalBuilder("127.0.0.1"); portal != "127.0.0.1:3260" { if portal := portalMounter("127.0.0.1"); portal != "127.0.0.1:3260" {
t.Errorf("wrong portal: %s", portal) t.Errorf("wrong portal: %s", portal)
} }
if portal := portalBuilder("127.0.0.1:3260"); portal != "127.0.0.1:3260" { if portal := portalMounter("127.0.0.1:3260"); portal != "127.0.0.1:3260" {
t.Errorf("wrong portal: %s", portal) t.Errorf("wrong portal: %s", portal)
} }
} }

View File

@ -97,7 +97,7 @@ func (util *ISCSIUtil) MakeGlobalPDName(iscsi iscsiDisk) string {
return makePDNameInternal(iscsi.plugin.host, iscsi.portal, iscsi.iqn, iscsi.lun) return makePDNameInternal(iscsi.plugin.host, iscsi.portal, iscsi.iqn, iscsi.lun)
} }
func (util *ISCSIUtil) AttachDisk(b iscsiDiskBuilder) error { func (util *ISCSIUtil) AttachDisk(b iscsiDiskMounter) error {
var devicePath string var devicePath string
if b.iface == "default" { if b.iface == "default" {
devicePath = strings.Join([]string{"/dev/disk/by-path/ip", b.portal, "iscsi", b.iqn, "lun", b.lun}, "-") devicePath = strings.Join([]string{"/dev/disk/by-path/ip", b.portal, "iscsi", b.iqn, "lun", b.lun}, "-")
@ -144,7 +144,7 @@ func (util *ISCSIUtil) AttachDisk(b iscsiDiskBuilder) error {
return err return err
} }
func (util *ISCSIUtil) DetachDisk(c iscsiDiskCleaner, mntPath string) error { func (util *ISCSIUtil) DetachDisk(c iscsiDiskUnmounter, mntPath string) error {
_, cnt, err := mount.GetDeviceNameFromMount(c.mounter, mntPath) _, cnt, err := mount.GetDeviceNameFromMount(c.mounter, mntPath)
if err != nil { if err != nil {
glog.Errorf("iscsi detach disk: failed to get device from mnt: %s\nError: %v", mntPath, err) glog.Errorf("iscsi detach disk: failed to get device from mnt: %s\nError: %v", mntPath, err)

View File

@ -80,11 +80,11 @@ func (plugin *nfsPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
} }
} }
func (plugin *nfsPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Builder, error) { func (plugin *nfsPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
return plugin.newBuilderInternal(spec, pod, plugin.host.GetMounter()) return plugin.newMounterInternal(spec, pod, plugin.host.GetMounter())
} }
func (plugin *nfsPlugin) newBuilderInternal(spec *volume.Spec, pod *api.Pod, mounter mount.Interface) (volume.Builder, error) { func (plugin *nfsPlugin) newMounterInternal(spec *volume.Spec, pod *api.Pod, mounter mount.Interface) (volume.Mounter, error) {
var source *api.NFSVolumeSource var source *api.NFSVolumeSource
var readOnly bool var readOnly bool
if spec.Volume != nil && spec.Volume.NFS != nil { if spec.Volume != nil && spec.Volume.NFS != nil {
@ -94,7 +94,7 @@ func (plugin *nfsPlugin) newBuilderInternal(spec *volume.Spec, pod *api.Pod, mou
source = spec.PersistentVolume.Spec.NFS source = spec.PersistentVolume.Spec.NFS
readOnly = spec.ReadOnly readOnly = spec.ReadOnly
} }
return &nfsBuilder{ return &nfsMounter{
nfs: &nfs{ nfs: &nfs{
volName: spec.Name(), volName: spec.Name(),
mounter: mounter, mounter: mounter,
@ -107,12 +107,12 @@ func (plugin *nfsPlugin) newBuilderInternal(spec *volume.Spec, pod *api.Pod, mou
}, nil }, nil
} }
func (plugin *nfsPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) { func (plugin *nfsPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return plugin.newCleanerInternal(volName, podUID, plugin.host.GetMounter()) return plugin.newUnmounterInternal(volName, podUID, plugin.host.GetMounter())
} }
func (plugin *nfsPlugin) newCleanerInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) { func (plugin *nfsPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {
return &nfsCleaner{&nfs{ return &nfsUnmounter{&nfs{
volName: volName, volName: volName,
mounter: mounter, mounter: mounter,
pod: &api.Pod{ObjectMeta: api.ObjectMeta{UID: podUID}}, pod: &api.Pod{ObjectMeta: api.ObjectMeta{UID: podUID}},
@ -140,16 +140,16 @@ func (nfsVolume *nfs) GetPath() string {
return nfsVolume.plugin.host.GetPodVolumeDir(nfsVolume.pod.UID, strings.EscapeQualifiedNameForDisk(name), nfsVolume.volName) return nfsVolume.plugin.host.GetPodVolumeDir(nfsVolume.pod.UID, strings.EscapeQualifiedNameForDisk(name), nfsVolume.volName)
} }
type nfsBuilder struct { type nfsMounter struct {
*nfs *nfs
server string server string
exportPath string exportPath string
readOnly bool readOnly bool
} }
var _ volume.Builder = &nfsBuilder{} var _ volume.Mounter = &nfsMounter{}
func (b *nfsBuilder) GetAttributes() volume.Attributes { func (b *nfsMounter) GetAttributes() volume.Attributes {
return volume.Attributes{ return volume.Attributes{
ReadOnly: b.readOnly, ReadOnly: b.readOnly,
Managed: false, Managed: false,
@ -158,11 +158,11 @@ func (b *nfsBuilder) GetAttributes() volume.Attributes {
} }
// SetUp attaches the disk and bind mounts to the volume path. // SetUp attaches the disk and bind mounts to the volume path.
func (b *nfsBuilder) SetUp(fsGroup *int64) error { func (b *nfsMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup) return b.SetUpAt(b.GetPath(), fsGroup)
} }
func (b *nfsBuilder) SetUpAt(dir string, fsGroup *int64) error { func (b *nfsMounter) SetUpAt(dir string, fsGroup *int64) error {
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("NFS mount set up: %s %v %v", dir, !notMnt, err) glog.V(4).Infof("NFS mount set up: %s %v %v", dir, !notMnt, err)
if err != nil && !os.IsNotExist(err) { if err != nil && !os.IsNotExist(err) {
@ -207,22 +207,22 @@ func (b *nfsBuilder) SetUpAt(dir string, fsGroup *int64) error {
} }
// //
//func (c *nfsCleaner) GetPath() string { //func (c *nfsUnmounter) GetPath() string {
// name := nfsPluginName // name := nfsPluginName
// return c.plugin.host.GetPodVolumeDir(c.pod.UID, strings.EscapeQualifiedNameForDisk(name), c.volName) // return c.plugin.host.GetPodVolumeDir(c.pod.UID, strings.EscapeQualifiedNameForDisk(name), c.volName)
//} //}
var _ volume.Cleaner = &nfsCleaner{} var _ volume.Unmounter = &nfsUnmounter{}
type nfsCleaner struct { type nfsUnmounter struct {
*nfs *nfs
} }
func (c *nfsCleaner) TearDown() error { func (c *nfsUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath()) return c.TearDownAt(c.GetPath())
} }
func (c *nfsCleaner) TearDownAt(dir string) error { func (c *nfsUnmounter) TearDownAt(dir string) error {
notMnt, err := c.mounter.IsLikelyNotMountPoint(dir) notMnt, err := c.mounter.IsLikelyNotMountPoint(dir)
if err != nil { if err != nil {
glog.Errorf("Error checking IsLikelyNotMountPoint: %v", err) glog.Errorf("Error checking IsLikelyNotMountPoint: %v", err)

View File

@ -148,20 +148,20 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
} }
fake := &mount.FakeMounter{} fake := &mount.FakeMounter{}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, err := plug.(*nfsPlugin).newBuilderInternal(spec, pod, fake) mounter, err := plug.(*nfsPlugin).newMounterInternal(spec, pod, fake)
volumePath := builder.GetPath() volumePath := mounter.GetPath()
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
path := builder.GetPath() path := mounter.GetPath()
expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~nfs/vol1", tmpDir) expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~nfs/vol1", tmpDir)
if path != expectedPath { if path != expectedPath {
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path) t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path)
} }
if err := builder.SetUp(nil); err != nil { if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(volumePath); err != nil { if _, err := os.Stat(volumePath); err != nil {
@ -171,7 +171,7 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
t.Errorf("SetUp() failed: %v", err) t.Errorf("SetUp() failed: %v", err)
} }
} }
if builder.(*nfsBuilder).readOnly { if mounter.(*nfsMounter).readOnly {
t.Errorf("The volume source should not be read-only and it is.") t.Errorf("The volume source should not be read-only and it is.")
} }
if len(fake.Log) != 1 { if len(fake.Log) != 1 {
@ -183,14 +183,14 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
} }
fake.ResetLog() fake.ResetLog()
cleaner, err := plug.(*nfsPlugin).newCleanerInternal("vol1", types.UID("poduid"), fake) unmounter, err := plug.(*nfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fake)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
if cleaner == nil { if unmounter == nil {
t.Errorf("Got a nil Cleaner") t.Errorf("Got a nil Unmounter")
} }
if err := cleaner.TearDown(); err != nil { if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(volumePath); err == nil { if _, err := os.Stat(volumePath); err == nil {
@ -272,12 +272,12 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, client, nil)) plugMgr.InitPlugins(ProbeVolumePlugins(volume.VolumeConfig{}), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(nfsPluginName) plug, _ := plugMgr.FindPluginByName(nfsPluginName)
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true) spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !builder.GetAttributes().ReadOnly { if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for builder.IsReadOnly") t.Errorf("Expected true for mounter.IsReadOnly")
} }
} }

View File

@ -53,7 +53,7 @@ func (plugin *persistentClaimPlugin) CanSupport(spec *volume.Spec) bool {
return spec.Volume != nil && spec.Volume.PersistentVolumeClaim != nil return spec.Volume != nil && spec.Volume.PersistentVolumeClaim != nil
} }
func (plugin *persistentClaimPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) { func (plugin *persistentClaimPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
claim, err := plugin.host.GetKubeClient().Core().PersistentVolumeClaims(pod.Namespace).Get(spec.Volume.PersistentVolumeClaim.ClaimName) claim, err := plugin.host.GetKubeClient().Core().PersistentVolumeClaims(pod.Namespace).Get(spec.Volume.PersistentVolumeClaim.ClaimName)
if err != nil { if err != nil {
glog.Errorf("Error finding claim: %+v\n", spec.Volume.PersistentVolumeClaim.ClaimName) glog.Errorf("Error finding claim: %+v\n", spec.Volume.PersistentVolumeClaim.ClaimName)
@ -80,15 +80,15 @@ func (plugin *persistentClaimPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod,
return nil, err return nil, err
} }
builder, err := plugin.host.NewWrapperBuilder(claim.Spec.VolumeName, *volume.NewSpecFromPersistentVolume(pv, spec.ReadOnly), pod, opts) mounter, err := plugin.host.NewWrapperMounter(claim.Spec.VolumeName, *volume.NewSpecFromPersistentVolume(pv, spec.ReadOnly), pod, opts)
if err != nil { if err != nil {
glog.Errorf("Error creating builder for claim: %+v\n", claim.Name) glog.Errorf("Error creating mounter for claim: %+v\n", claim.Name)
return nil, err return nil, err
} }
return builder, nil return mounter, nil
} }
func (plugin *persistentClaimPlugin) NewCleaner(_ string, _ types.UID) (volume.Cleaner, error) { func (plugin *persistentClaimPlugin) NewUnmounter(_ string, _ types.UID) (volume.Unmounter, error) {
return nil, fmt.Errorf("This will never be called directly. The PV backing this claim has a cleaner. Kubelet uses that cleaner, not this one, when removing orphaned volumes.") return nil, fmt.Errorf("This will never be called directly. The PV backing this claim has a unmounter. Kubelet uses that unmounter, not this one, when removing orphaned volumes.")
} }

View File

@ -66,13 +66,13 @@ func TestCanSupport(t *testing.T) {
} }
} }
func TestNewBuilder(t *testing.T) { func TestNewMounter(t *testing.T) {
tests := []struct { tests := []struct {
pv *api.PersistentVolume pv *api.PersistentVolume
claim *api.PersistentVolumeClaim claim *api.PersistentVolumeClaim
plugin volume.VolumePlugin plugin volume.VolumePlugin
podVolume api.VolumeSource podVolume api.VolumeSource
testFunc func(builder volume.Builder, plugin volume.VolumePlugin) error testFunc func(mounter volume.Mounter, plugin volume.VolumePlugin) error
expectedFailure bool expectedFailure bool
}{ }{
{ {
@ -108,9 +108,9 @@ func TestNewBuilder(t *testing.T) {
}, },
}, },
plugin: gce_pd.ProbeVolumePlugins()[0], plugin: gce_pd.ProbeVolumePlugins()[0],
testFunc: func(builder volume.Builder, plugin volume.VolumePlugin) error { testFunc: func(mounter volume.Mounter, plugin volume.VolumePlugin) error {
if !strings.Contains(builder.GetPath(), utilstrings.EscapeQualifiedNameForDisk(plugin.Name())) { if !strings.Contains(mounter.GetPath(), utilstrings.EscapeQualifiedNameForDisk(plugin.Name())) {
return fmt.Errorf("builder path expected to contain plugin name. Got: %s", builder.GetPath()) return fmt.Errorf("mounter path expected to contain plugin name. Got: %s", mounter.GetPath())
} }
return nil return nil
}, },
@ -146,9 +146,9 @@ func TestNewBuilder(t *testing.T) {
}, },
}, },
plugin: host_path.ProbeVolumePlugins(volume.VolumeConfig{})[0], plugin: host_path.ProbeVolumePlugins(volume.VolumeConfig{})[0],
testFunc: func(builder volume.Builder, plugin volume.VolumePlugin) error { testFunc: func(mounter volume.Mounter, plugin volume.VolumePlugin) error {
if builder.GetPath() != "/somepath" { if mounter.GetPath() != "/somepath" {
return fmt.Errorf("Expected HostPath.Path /somepath, got: %s", builder.GetPath()) return fmt.Errorf("Expected HostPath.Path /somepath, got: %s", mounter.GetPath())
} }
return nil return nil
}, },
@ -184,9 +184,9 @@ func TestNewBuilder(t *testing.T) {
}, },
}, },
plugin: gce_pd.ProbeVolumePlugins()[0], plugin: gce_pd.ProbeVolumePlugins()[0],
testFunc: func(builder volume.Builder, plugin volume.VolumePlugin) error { testFunc: func(mounter volume.Mounter, plugin volume.VolumePlugin) error {
if builder != nil { if mounter != nil {
return fmt.Errorf("Unexpected non-nil builder: %+v", builder) return fmt.Errorf("Unexpected non-nil mounter: %+v", mounter)
} }
return nil return nil
}, },
@ -227,9 +227,9 @@ func TestNewBuilder(t *testing.T) {
}, },
}, },
plugin: gce_pd.ProbeVolumePlugins()[0], plugin: gce_pd.ProbeVolumePlugins()[0],
testFunc: func(builder volume.Builder, plugin volume.VolumePlugin) error { testFunc: func(mounter volume.Mounter, plugin volume.VolumePlugin) error {
if builder != nil { if mounter != nil {
return fmt.Errorf("Unexpected non-nil builder: %+v", builder) return fmt.Errorf("Unexpected non-nil mounter: %+v", mounter)
} }
return nil return nil
}, },
@ -251,24 +251,24 @@ func TestNewBuilder(t *testing.T) {
} }
spec := &volume.Spec{Volume: &api.Volume{VolumeSource: item.podVolume}} spec := &volume.Spec{Volume: &api.Volume{VolumeSource: item.podVolume}}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, err := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) mounter, err := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !item.expectedFailure { if !item.expectedFailure {
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder: %v", builder) t.Errorf("Got a nil Mounter: %v", mounter)
} }
} }
if err := item.testFunc(builder, item.plugin); err != nil { if err := item.testFunc(mounter, item.plugin); err != nil {
t.Errorf("Unexpected error %+v", err) t.Errorf("Unexpected error %+v", err)
} }
} }
} }
func TestNewBuilderClaimNotBound(t *testing.T) { func TestNewMounterClaimNotBound(t *testing.T) {
pv := &api.PersistentVolume{ pv := &api.PersistentVolume{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: "pvC", Name: "pvC",
@ -304,9 +304,9 @@ func TestNewBuilderClaimNotBound(t *testing.T) {
} }
spec := &volume.Spec{Volume: &api.Volume{VolumeSource: podVolume}} spec := &volume.Spec{Volume: &api.Volume{VolumeSource: podVolume}}
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, err := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) mounter, err := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if builder != nil { if mounter != nil {
t.Errorf("Expected a nil builder if the claim wasn't bound") t.Errorf("Expected a nil mounter if the claim wasn't bound")
} }
} }

View File

@ -75,16 +75,16 @@ type VolumePlugin interface {
// const. // const.
CanSupport(spec *Spec) bool CanSupport(spec *Spec) bool
// NewBuilder creates a new volume.Builder from an API specification. // NewMounter creates a new volume.Mounter from an API specification.
// Ownership of the spec pointer in *not* transferred. // Ownership of the spec pointer in *not* transferred.
// - spec: The api.Volume spec // - spec: The api.Volume spec
// - pod: The enclosing pod // - pod: The enclosing pod
NewBuilder(spec *Spec, podRef *api.Pod, opts VolumeOptions) (Builder, error) NewMounter(spec *Spec, podRef *api.Pod, opts VolumeOptions) (Mounter, error)
// NewCleaner creates a new volume.Cleaner from recoverable state. // NewUnmounter creates a new volume.Unmounter from recoverable state.
// - name: The volume name, as per the api.Volume spec. // - name: The volume name, as per the api.Volume spec.
// - podUID: The UID of the enclosing pod // - podUID: The UID of the enclosing pod
NewCleaner(name string, podUID types.UID) (Cleaner, error) NewUnmounter(name string, podUID types.UID) (Unmounter, error)
} }
// PersistentVolumePlugin is an extended interface of VolumePlugin and is used // PersistentVolumePlugin is an extended interface of VolumePlugin and is used
@ -158,16 +158,16 @@ type VolumeHost interface {
// GetKubeClient returns a client interface // GetKubeClient returns a client interface
GetKubeClient() clientset.Interface GetKubeClient() clientset.Interface
// NewWrapperBuilder finds an appropriate plugin with which to handle // NewWrapperMounter finds an appropriate plugin with which to handle
// the provided spec. This is used to implement volume plugins which // the provided spec. This is used to implement volume plugins which
// "wrap" other plugins. For example, the "secret" volume is // "wrap" other plugins. For example, the "secret" volume is
// implemented in terms of the "emptyDir" volume. // implemented in terms of the "emptyDir" volume.
NewWrapperBuilder(volName string, spec Spec, pod *api.Pod, opts VolumeOptions) (Builder, error) NewWrapperMounter(volName string, spec Spec, pod *api.Pod, opts VolumeOptions) (Mounter, error)
// NewWrapperCleaner finds an appropriate plugin with which to handle // NewWrapperUnmounter finds an appropriate plugin with which to handle
// the provided spec. See comments on NewWrapperBuilder for more // the provided spec. See comments on NewWrapperMounter for more
// context. // context.
NewWrapperCleaner(volName string, spec Spec, podUID types.UID) (Cleaner, error) NewWrapperUnmounter(volName string, spec Spec, podUID types.UID) (Unmounter, error)
// Get cloud provider from kubelet. // Get cloud provider from kubelet.
GetCloudProvider() cloudprovider.Interface GetCloudProvider() cloudprovider.Interface
@ -403,7 +403,7 @@ func (pm *VolumePluginMgr) FindCreatablePluginBySpec(spec *Spec) (ProvisionableV
} }
// FindAttachablePluginBySpec fetches a persistent volume plugin by name. Unlike the other "FindPlugin" methods, this // FindAttachablePluginBySpec fetches a persistent volume plugin by name. Unlike the other "FindPlugin" methods, this
// does not return error if no plugin is found. All volumes require a builder and cleaner, but not every volume will // does not return error if no plugin is found. All volumes require a mounter and unmounter, but not every volume will
// have an attacher/detacher. // have an attacher/detacher.
func (pm *VolumePluginMgr) FindAttachablePluginBySpec(spec *Spec) (AttachableVolumePlugin, error) { func (pm *VolumePluginMgr) FindAttachablePluginBySpec(spec *Spec) (AttachableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginBySpec(spec) volumePlugin, err := pm.FindPluginBySpec(spec)
@ -417,7 +417,7 @@ func (pm *VolumePluginMgr) FindAttachablePluginBySpec(spec *Spec) (AttachableVol
} }
// FindAttachablePluginByName fetches an attachable volume plugin by name. Unlike the other "FindPlugin" methods, this // FindAttachablePluginByName fetches an attachable volume plugin by name. Unlike the other "FindPlugin" methods, this
// does not return error if no plugin is found. All volumes require a builder and cleaner, but not every volume will // does not return error if no plugin is found. All volumes require a mounter and unmounter, but not every volume will
// have an attacher/detacher. // have an attacher/detacher.
func (pm *VolumePluginMgr) FindAttachablePluginByName(name string) (AttachableVolumePlugin, error) { func (pm *VolumePluginMgr) FindAttachablePluginByName(name string) (AttachableVolumePlugin, error) {
volumePlugin, err := pm.FindPluginByName(name) volumePlugin, err := pm.FindPluginByName(name)

View File

@ -16,7 +16,7 @@ limitations under the License.
// //
// diskManager interface and diskSetup/TearDown functions abtract commonly used procedures to setup a block volume // diskManager interface and diskSetup/TearDown functions abtract commonly used procedures to setup a block volume
// rbd volume implements diskManager, calls diskSetup when creating a volume, and calls diskTearDown inside volume cleaner. // rbd volume implements diskManager, calls diskSetup when creating a volume, and calls diskTearDown inside volume unmounter.
// TODO: consolidate, refactor, and share diskManager among iSCSI, GCE PD, and RBD // TODO: consolidate, refactor, and share diskManager among iSCSI, GCE PD, and RBD
// //
@ -34,13 +34,13 @@ import (
type diskManager interface { type diskManager interface {
MakeGlobalPDName(disk rbd) string MakeGlobalPDName(disk rbd) string
// Attaches the disk to the kubelet's host machine. // Attaches the disk to the kubelet's host machine.
AttachDisk(disk rbdBuilder) error AttachDisk(disk rbdMounter) error
// Detaches the disk from the kubelet's host machine. // Detaches the disk from the kubelet's host machine.
DetachDisk(disk rbdCleaner, mntPath string) error DetachDisk(disk rbdUnmounter, mntPath string) error
} }
// utility to mount a disk based filesystem // utility to mount a disk based filesystem
func diskSetUp(manager diskManager, b rbdBuilder, volPath string, mounter mount.Interface, fsGroup *int64) error { func diskSetUp(manager diskManager, b rbdMounter, volPath string, mounter mount.Interface, fsGroup *int64) error {
globalPDPath := manager.MakeGlobalPDName(*b.rbd) globalPDPath := manager.MakeGlobalPDName(*b.rbd)
// TODO: handle failed mounts here. // TODO: handle failed mounts here.
notMnt, err := mounter.IsLikelyNotMountPoint(volPath) notMnt, err := mounter.IsLikelyNotMountPoint(volPath)
@ -80,7 +80,7 @@ func diskSetUp(manager diskManager, b rbdBuilder, volPath string, mounter mount.
} }
// utility to tear down a disk based filesystem // utility to tear down a disk based filesystem
func diskTearDown(manager diskManager, c rbdCleaner, volPath string, mounter mount.Interface) error { func diskTearDown(manager diskManager, c rbdUnmounter, volPath string, mounter mount.Interface) error {
notMnt, err := mounter.IsLikelyNotMountPoint(volPath) notMnt, err := mounter.IsLikelyNotMountPoint(volPath)
if err != nil { if err != nil {
glog.Errorf("cannot validate mountpoint %s", volPath) glog.Errorf("cannot validate mountpoint %s", volPath)

View File

@ -69,7 +69,7 @@ func (plugin *rbdPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
} }
} }
func (plugin *rbdPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Builder, error) { func (plugin *rbdPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
secret := "" secret := ""
source, _ := plugin.getRBDVolumeSource(spec) source, _ := plugin.getRBDVolumeSource(spec)
@ -91,7 +91,7 @@ func (plugin *rbdPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.Vo
} }
// Inject real implementations here, test through the internal function. // Inject real implementations here, test through the internal function.
return plugin.newBuilderInternal(spec, pod.UID, &RBDUtil{}, plugin.host.GetMounter(), secret) return plugin.newMounterInternal(spec, pod.UID, &RBDUtil{}, plugin.host.GetMounter(), secret)
} }
func (plugin *rbdPlugin) getRBDVolumeSource(spec *volume.Spec) (*api.RBDVolumeSource, bool) { func (plugin *rbdPlugin) getRBDVolumeSource(spec *volume.Spec) (*api.RBDVolumeSource, bool) {
@ -104,7 +104,7 @@ func (plugin *rbdPlugin) getRBDVolumeSource(spec *volume.Spec) (*api.RBDVolumeSo
} }
} }
func (plugin *rbdPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, secret string) (volume.Builder, error) { func (plugin *rbdPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, secret string) (volume.Mounter, error) {
source, readOnly := plugin.getRBDVolumeSource(spec) source, readOnly := plugin.getRBDVolumeSource(spec)
pool := source.RBDPool pool := source.RBDPool
if pool == "" { if pool == "" {
@ -119,7 +119,7 @@ func (plugin *rbdPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID,
keyring = "/etc/ceph/keyring" keyring = "/etc/ceph/keyring"
} }
return &rbdBuilder{ return &rbdMounter{
rbd: &rbd{ rbd: &rbd{
podUID: podUID, podUID: podUID,
volName: spec.Name(), volName: spec.Name(),
@ -138,14 +138,14 @@ func (plugin *rbdPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID,
}, nil }, nil
} }
func (plugin *rbdPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) { func (plugin *rbdPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function. // Inject real implementations here, test through the internal function.
return plugin.newCleanerInternal(volName, podUID, &RBDUtil{}, plugin.host.GetMounter()) return plugin.newUnmounterInternal(volName, podUID, &RBDUtil{}, plugin.host.GetMounter())
} }
func (plugin *rbdPlugin) newCleanerInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Cleaner, error) { func (plugin *rbdPlugin) newUnmounterInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Unmounter, error) {
return &rbdCleaner{ return &rbdUnmounter{
rbdBuilder: &rbdBuilder{ rbdMounter: &rbdMounter{
rbd: &rbd{ rbd: &rbd{
podUID: podUID, podUID: podUID,
volName: volName, volName: volName,
@ -177,7 +177,7 @@ func (rbd *rbd) GetPath() string {
return rbd.plugin.host.GetPodVolumeDir(rbd.podUID, strings.EscapeQualifiedNameForDisk(name), rbd.volName) return rbd.plugin.host.GetPodVolumeDir(rbd.podUID, strings.EscapeQualifiedNameForDisk(name), rbd.volName)
} }
type rbdBuilder struct { type rbdMounter struct {
*rbd *rbd
// capitalized so they can be exported in persistRBD() // capitalized so they can be exported in persistRBD()
Mon []string Mon []string
@ -187,7 +187,7 @@ type rbdBuilder struct {
fsType string fsType string
} }
var _ volume.Builder = &rbdBuilder{} var _ volume.Mounter = &rbdMounter{}
func (b *rbd) GetAttributes() volume.Attributes { func (b *rbd) GetAttributes() volume.Attributes {
return volume.Attributes{ return volume.Attributes{
@ -197,11 +197,11 @@ func (b *rbd) GetAttributes() volume.Attributes {
} }
} }
func (b *rbdBuilder) SetUp(fsGroup *int64) error { func (b *rbdMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup) return b.SetUpAt(b.GetPath(), fsGroup)
} }
func (b *rbdBuilder) SetUpAt(dir string, fsGroup *int64) error { func (b *rbdMounter) SetUpAt(dir string, fsGroup *int64) error {
// diskSetUp checks mountpoints and prevent repeated calls // diskSetUp checks mountpoints and prevent repeated calls
err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup) err := diskSetUp(b.manager, *b, dir, b.mounter, fsGroup)
if err != nil { if err != nil {
@ -210,19 +210,19 @@ func (b *rbdBuilder) SetUpAt(dir string, fsGroup *int64) error {
return err return err
} }
type rbdCleaner struct { type rbdUnmounter struct {
*rbdBuilder *rbdMounter
} }
var _ volume.Cleaner = &rbdCleaner{} var _ volume.Unmounter = &rbdUnmounter{}
// Unmounts the bind mount, and detaches the disk only if the disk // Unmounts the bind mount, and detaches the disk only if the disk
// resource was the last reference to that disk on the kubelet. // resource was the last reference to that disk on the kubelet.
func (c *rbdCleaner) TearDown() error { func (c *rbdUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath()) return c.TearDownAt(c.GetPath())
} }
func (c *rbdCleaner) TearDownAt(dir string) error { func (c *rbdUnmounter) TearDownAt(dir string) error {
return diskTearDown(c.manager, *c, dir, c.mounter) return diskTearDown(c.manager, *c, dir, c.mounter)
} }

View File

@ -69,7 +69,7 @@ func (fake *fakeDiskManager) Cleanup() {
func (fake *fakeDiskManager) MakeGlobalPDName(disk rbd) string { func (fake *fakeDiskManager) MakeGlobalPDName(disk rbd) string {
return fake.tmpDir return fake.tmpDir
} }
func (fake *fakeDiskManager) AttachDisk(b rbdBuilder) error { func (fake *fakeDiskManager) AttachDisk(b rbdMounter) error {
globalPath := b.manager.MakeGlobalPDName(*b.rbd) globalPath := b.manager.MakeGlobalPDName(*b.rbd)
err := os.MkdirAll(globalPath, 0750) err := os.MkdirAll(globalPath, 0750)
if err != nil { if err != nil {
@ -78,7 +78,7 @@ func (fake *fakeDiskManager) AttachDisk(b rbdBuilder) error {
return nil return nil
} }
func (fake *fakeDiskManager) DetachDisk(c rbdCleaner, mntPath string) error { func (fake *fakeDiskManager) DetachDisk(c rbdUnmounter, mntPath string) error {
globalPath := c.manager.MakeGlobalPDName(*c.rbd) globalPath := c.manager.MakeGlobalPDName(*c.rbd)
err := os.RemoveAll(globalPath) err := os.RemoveAll(globalPath)
if err != nil { if err != nil {
@ -103,21 +103,21 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
} }
fdm := NewFakeDiskManager() fdm := NewFakeDiskManager()
defer fdm.Cleanup() defer fdm.Cleanup()
builder, err := plug.(*rbdPlugin).newBuilderInternal(spec, types.UID("poduid"), fdm, &mount.FakeMounter{}, "secrets") mounter, err := plug.(*rbdPlugin).newMounterInternal(spec, types.UID("poduid"), fdm, &mount.FakeMounter{}, "secrets")
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Error("Got a nil Builder") t.Error("Got a nil Mounter")
} }
path := builder.GetPath() path := mounter.GetPath()
expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~rbd/vol1", tmpDir) expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~rbd/vol1", tmpDir)
if path != expectedPath { if path != expectedPath {
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path) t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, path)
} }
if err := builder.SetUp(nil); err != nil { if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(path); err != nil { if _, err := os.Stat(path); err != nil {
@ -135,15 +135,15 @@ func doTestPlugin(t *testing.T, spec *volume.Spec) {
} }
} }
cleaner, err := plug.(*rbdPlugin).newCleanerInternal("vol1", types.UID("poduid"), fdm, &mount.FakeMounter{}) unmounter, err := plug.(*rbdPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fdm, &mount.FakeMounter{})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
if cleaner == nil { if unmounter == nil {
t.Error("Got a nil Cleaner") t.Error("Got a nil Unmounter")
} }
if err := cleaner.TearDown(); err != nil { if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(path); err == nil { if _, err := os.Stat(path); err == nil {
@ -229,12 +229,12 @@ func TestPersistentClaimReadOnlyFlag(t *testing.T) {
plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil)) plugMgr.InitPlugins(ProbeVolumePlugins(), volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(rbdPluginName) plug, _ := plugMgr.FindPluginByName(rbdPluginName)
// readOnly bool is supplied by persistent-claim volume source when its builder creates other volumes // readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true) spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: types.UID("poduid")}}
builder, _ := plug.NewBuilder(spec, pod, volume.VolumeOptions{}) mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !builder.GetAttributes().ReadOnly { if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for builder.IsReadOnly") t.Errorf("Expected true for mounter.IsReadOnly")
} }
} }

View File

@ -95,7 +95,7 @@ func (util *RBDUtil) MakeGlobalPDName(rbd rbd) string {
return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image) return makePDNameInternal(rbd.plugin.host, rbd.Pool, rbd.Image)
} }
func (util *RBDUtil) rbdLock(b rbdBuilder, lock bool) error { func (util *RBDUtil) rbdLock(b rbdMounter, lock bool) error {
var err error var err error
var output, locker string var output, locker string
var cmd []byte var cmd []byte
@ -159,7 +159,7 @@ func (util *RBDUtil) rbdLock(b rbdBuilder, lock bool) error {
return err return err
} }
func (util *RBDUtil) persistRBD(rbd rbdBuilder, mnt string) error { func (util *RBDUtil) persistRBD(rbd rbdMounter, mnt string) error {
file := path.Join(mnt, "rbd.json") file := path.Join(mnt, "rbd.json")
fp, err := os.Create(file) fp, err := os.Create(file)
if err != nil { if err != nil {
@ -175,7 +175,7 @@ func (util *RBDUtil) persistRBD(rbd rbdBuilder, mnt string) error {
return nil return nil
} }
func (util *RBDUtil) loadRBD(builder *rbdBuilder, mnt string) error { func (util *RBDUtil) loadRBD(mounter *rbdMounter, mnt string) error {
file := path.Join(mnt, "rbd.json") file := path.Join(mnt, "rbd.json")
fp, err := os.Open(file) fp, err := os.Open(file)
if err != nil { if err != nil {
@ -184,14 +184,14 @@ func (util *RBDUtil) loadRBD(builder *rbdBuilder, mnt string) error {
defer fp.Close() defer fp.Close()
decoder := json.NewDecoder(fp) decoder := json.NewDecoder(fp)
if err = decoder.Decode(builder); err != nil { if err = decoder.Decode(mounter); err != nil {
return fmt.Errorf("rbd: decode err: %v.", err) return fmt.Errorf("rbd: decode err: %v.", err)
} }
return nil return nil
} }
func (util *RBDUtil) fencing(b rbdBuilder) error { func (util *RBDUtil) fencing(b rbdMounter) error {
// no need to fence readOnly // no need to fence readOnly
if (&b).GetAttributes().ReadOnly { if (&b).GetAttributes().ReadOnly {
return nil return nil
@ -199,16 +199,16 @@ func (util *RBDUtil) fencing(b rbdBuilder) error {
return util.rbdLock(b, true) return util.rbdLock(b, true)
} }
func (util *RBDUtil) defencing(c rbdCleaner) error { func (util *RBDUtil) defencing(c rbdUnmounter) error {
// no need to fence readOnly // no need to fence readOnly
if c.ReadOnly { if c.ReadOnly {
return nil return nil
} }
return util.rbdLock(*c.rbdBuilder, false) return util.rbdLock(*c.rbdMounter, false)
} }
func (util *RBDUtil) AttachDisk(b rbdBuilder) error { func (util *RBDUtil) AttachDisk(b rbdMounter) error {
var err error var err error
var output []byte var output []byte
@ -281,7 +281,7 @@ func (util *RBDUtil) AttachDisk(b rbdBuilder) error {
return err return err
} }
func (util *RBDUtil) DetachDisk(c rbdCleaner, mntPath string) error { func (util *RBDUtil) DetachDisk(c rbdUnmounter, mntPath string) error {
device, cnt, err := mount.GetDeviceNameFromMount(c.mounter, mntPath) device, cnt, err := mount.GetDeviceNameFromMount(c.mounter, mntPath)
if err != nil { if err != nil {
return fmt.Errorf("rbd detach disk: failed to get device from mnt: %s\nError: %v", mntPath, err) return fmt.Errorf("rbd detach disk: failed to get device from mnt: %s\nError: %v", mntPath, err)
@ -298,7 +298,7 @@ func (util *RBDUtil) DetachDisk(c rbdCleaner, mntPath string) error {
} }
// load ceph and image/pool info to remove fencing // load ceph and image/pool info to remove fencing
if err := util.loadRBD(c.rbdBuilder, mntPath); err == nil { if err := util.loadRBD(c.rbdMounter, mntPath); err == nil {
// remove rbd lock // remove rbd lock
util.defencing(c) util.defencing(c)
} }

View File

@ -64,8 +64,8 @@ func (plugin *secretPlugin) CanSupport(spec *volume.Spec) bool {
return spec.Volume != nil && spec.Volume.Secret != nil return spec.Volume != nil && spec.Volume.Secret != nil
} }
func (plugin *secretPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) { func (plugin *secretPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return &secretVolumeBuilder{ return &secretVolumeMounter{
secretVolume: &secretVolume{ secretVolume: &secretVolume{
spec.Name(), spec.Name(),
pod.UID, pod.UID,
@ -80,8 +80,8 @@ func (plugin *secretPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts vol
}, nil }, nil
} }
func (plugin *secretPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) { func (plugin *secretPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
return &secretVolumeCleaner{ return &secretVolumeUnmounter{
&secretVolume{ &secretVolume{
volName, volName,
podUID, podUID,
@ -112,9 +112,9 @@ func getPathFromHost(host volume.VolumeHost, podUID types.UID, volName string) s
return host.GetPodVolumeDir(podUID, strings.EscapeQualifiedNameForDisk(secretPluginName), volName) return host.GetPodVolumeDir(podUID, strings.EscapeQualifiedNameForDisk(secretPluginName), volName)
} }
// secretVolumeBuilder handles retrieving secrets from the API server // secretVolumeMounter handles retrieving secrets from the API server
// and placing them into the volume on the host. // and placing them into the volume on the host.
type secretVolumeBuilder struct { type secretVolumeMounter struct {
*secretVolume *secretVolume
secretName string secretName string
@ -122,7 +122,7 @@ type secretVolumeBuilder struct {
opts *volume.VolumeOptions opts *volume.VolumeOptions
} }
var _ volume.Builder = &secretVolumeBuilder{} var _ volume.Mounter = &secretVolumeMounter{}
func (sv *secretVolume) GetAttributes() volume.Attributes { func (sv *secretVolume) GetAttributes() volume.Attributes {
return volume.Attributes{ return volume.Attributes{
@ -131,15 +131,15 @@ func (sv *secretVolume) GetAttributes() volume.Attributes {
SupportsSELinux: true, SupportsSELinux: true,
} }
} }
func (b *secretVolumeBuilder) SetUp(fsGroup *int64) error { func (b *secretVolumeMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup) return b.SetUpAt(b.GetPath(), fsGroup)
} }
func (b *secretVolumeBuilder) getMetaDir() string { func (b *secretVolumeMounter) getMetaDir() string {
return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, strings.EscapeQualifiedNameForDisk(secretPluginName)), b.volName) return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, strings.EscapeQualifiedNameForDisk(secretPluginName)), b.volName)
} }
func (b *secretVolumeBuilder) SetUpAt(dir string, fsGroup *int64) error { func (b *secretVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir) notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
// Getting an os.IsNotExist err from is a contingency; the directory // Getting an os.IsNotExist err from is a contingency; the directory
// may not exist yet, in which case, setup should run. // may not exist yet, in which case, setup should run.
@ -156,7 +156,7 @@ func (b *secretVolumeBuilder) SetUpAt(dir string, fsGroup *int64) error {
glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir) glog.V(3).Infof("Setting up volume %v for pod %v at %v", b.volName, b.pod.UID, dir)
// Wrap EmptyDir, let it do the setup. // Wrap EmptyDir, let it do the setup.
wrapped, err := b.plugin.host.NewWrapperBuilder(b.volName, wrappedVolumeSpec, &b.pod, *b.opts) wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec, &b.pod, *b.opts)
if err != nil { if err != nil {
return err return err
} }
@ -208,22 +208,22 @@ func totalSecretBytes(secret *api.Secret) int {
return totalSize return totalSize
} }
// secretVolumeCleaner handles cleaning up secret volumes. // secretVolumeUnmounter handles cleaning up secret volumes.
type secretVolumeCleaner struct { type secretVolumeUnmounter struct {
*secretVolume *secretVolume
} }
var _ volume.Cleaner = &secretVolumeCleaner{} var _ volume.Unmounter = &secretVolumeUnmounter{}
func (c *secretVolumeCleaner) TearDown() error { func (c *secretVolumeUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath()) return c.TearDownAt(c.GetPath())
} }
func (c *secretVolumeCleaner) TearDownAt(dir string) error { func (c *secretVolumeUnmounter) TearDownAt(dir string) error {
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir) glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
// Wrap EmptyDir, let it do the teardown. // Wrap EmptyDir, let it do the teardown.
wrapped, err := c.plugin.host.NewWrapperCleaner(c.volName, wrappedVolumeSpec, c.podUID) wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec, c.podUID)
if err != nil { if err != nil {
return err return err
} }

View File

@ -89,20 +89,20 @@ func TestPlugin(t *testing.T) {
} }
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}}
builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
volumePath := builder.GetPath() volumePath := mounter.GetPath()
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~secret/test_volume_name")) { if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid/volumes/kubernetes.io~secret/test_volume_name")) {
t.Errorf("Got unexpected path: %s", volumePath) t.Errorf("Got unexpected path: %s", volumePath)
} }
err = builder.SetUp(nil) err = mounter.SetUp(nil)
if err != nil { if err != nil {
t.Errorf("Failed to setup volume: %v", err) t.Errorf("Failed to setup volume: %v", err)
} }
@ -128,7 +128,7 @@ func TestPlugin(t *testing.T) {
defer doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t) defer doTestCleanAndTeardown(plugin, testPodUID, testVolumeName, volumePath, t)
// Metrics only supported on linux // Metrics only supported on linux
metrics, err := builder.GetMetrics() metrics, err := mounter.GetMetrics()
if runtime.GOOS == "linux" { if runtime.GOOS == "linux" {
assert.NotEmpty(t, metrics) assert.NotEmpty(t, metrics)
assert.NoError(t, err) assert.NoError(t, err)
@ -163,29 +163,29 @@ func TestPluginIdempotent(t *testing.T) {
podVolumeDir := fmt.Sprintf("%v/pods/test_pod_uid2/volumes/kubernetes.io~secret/test_volume_name", rootDir) podVolumeDir := fmt.Sprintf("%v/pods/test_pod_uid2/volumes/kubernetes.io~secret/test_volume_name", rootDir)
podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid2/plugins/kubernetes.io~secret/test_volume_name", rootDir) podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid2/plugins/kubernetes.io~secret/test_volume_name", rootDir)
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}}
mounter := host.GetMounter().(*mount.FakeMounter) physicalMounter := host.GetMounter().(*mount.FakeMounter)
mounter.MountPoints = []mount.MountPoint{ physicalMounter.MountPoints = []mount.MountPoint{
{ {
Path: podVolumeDir, Path: podVolumeDir,
}, },
} }
util.SetReady(podMetadataDir) util.SetReady(podMetadataDir)
builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
volumePath := builder.GetPath() volumePath := mounter.GetPath()
err = builder.SetUp(nil) err = mounter.SetUp(nil)
if err != nil { if err != nil {
t.Errorf("Failed to setup volume: %v", err) t.Errorf("Failed to setup volume: %v", err)
} }
if len(mounter.Log) != 0 { if len(physicalMounter.Log) != 0 {
t.Errorf("Unexpected calls made to mounter: %v", mounter.Log) t.Errorf("Unexpected calls made to physicalMounter: %v", physicalMounter.Log)
} }
if _, err := os.Stat(volumePath); err != nil { if _, err := os.Stat(volumePath); err != nil {
@ -222,22 +222,22 @@ func TestPluginReboot(t *testing.T) {
} }
pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}} pod := &api.Pod{ObjectMeta: api.ObjectMeta{UID: testPodUID}}
builder, err := plugin.NewBuilder(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{}) mounter, err := plugin.NewMounter(volume.NewSpecFromVolume(volumeSpec), pod, volume.VolumeOptions{})
if err != nil { if err != nil {
t.Errorf("Failed to make a new Builder: %v", err) t.Errorf("Failed to make a new Mounter: %v", err)
} }
if builder == nil { if mounter == nil {
t.Errorf("Got a nil Builder") t.Errorf("Got a nil Mounter")
} }
podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid3/plugins/kubernetes.io~secret/test_volume_name", rootDir) podMetadataDir := fmt.Sprintf("%v/pods/test_pod_uid3/plugins/kubernetes.io~secret/test_volume_name", rootDir)
util.SetReady(podMetadataDir) util.SetReady(podMetadataDir)
volumePath := builder.GetPath() volumePath := mounter.GetPath()
if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid3/volumes/kubernetes.io~secret/test_volume_name")) { if !strings.HasSuffix(volumePath, fmt.Sprintf("pods/test_pod_uid3/volumes/kubernetes.io~secret/test_volume_name")) {
t.Errorf("Got unexpected path: %s", volumePath) t.Errorf("Got unexpected path: %s", volumePath)
} }
err = builder.SetUp(nil) err = mounter.SetUp(nil)
if err != nil { if err != nil {
t.Errorf("Failed to setup volume: %v", err) t.Errorf("Failed to setup volume: %v", err)
} }
@ -298,15 +298,15 @@ func doTestSecretDataInVolume(volumePath string, secret api.Secret, t *testing.T
} }
func doTestCleanAndTeardown(plugin volume.VolumePlugin, podUID types.UID, testVolumeName, volumePath string, t *testing.T) { func doTestCleanAndTeardown(plugin volume.VolumePlugin, podUID types.UID, testVolumeName, volumePath string, t *testing.T) {
cleaner, err := plugin.NewCleaner(testVolumeName, podUID) unmounter, err := plugin.NewUnmounter(testVolumeName, podUID)
if err != nil { if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err) t.Errorf("Failed to make a new Unmounter: %v", err)
} }
if cleaner == nil { if unmounter == nil {
t.Errorf("Got a nil Cleaner") t.Errorf("Got a nil Unmounter")
} }
if err := cleaner.TearDown(); err != nil { if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err) t.Errorf("Expected success, got: %v", err)
} }
if _, err := os.Stat(volumePath); err == nil { if _, err := os.Stat(volumePath); err == nil {

View File

@ -82,7 +82,7 @@ func (f *fakeVolumeHost) GetWriter() io.Writer {
return f.writer return f.writer
} }
func (f *fakeVolumeHost) NewWrapperBuilder(volName string, spec Spec, pod *api.Pod, opts VolumeOptions) (Builder, error) { func (f *fakeVolumeHost) NewWrapperMounter(volName string, spec Spec, pod *api.Pod, opts VolumeOptions) (Mounter, error) {
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}" // The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
wrapperVolumeName := "wrapped_" + volName wrapperVolumeName := "wrapped_" + volName
if spec.Volume != nil { if spec.Volume != nil {
@ -92,10 +92,10 @@ func (f *fakeVolumeHost) NewWrapperBuilder(volName string, spec Spec, pod *api.P
if err != nil { if err != nil {
return nil, err return nil, err
} }
return plug.NewBuilder(&spec, pod, opts) return plug.NewMounter(&spec, pod, opts)
} }
func (f *fakeVolumeHost) NewWrapperCleaner(volName string, spec Spec, podUID types.UID) (Cleaner, error) { func (f *fakeVolumeHost) NewWrapperUnmounter(volName string, spec Spec, podUID types.UID) (Unmounter, error) {
// The name of wrapper volume is set to "wrapped_{wrapped_volume_name}" // The name of wrapper volume is set to "wrapped_{wrapped_volume_name}"
wrapperVolumeName := "wrapped_" + volName wrapperVolumeName := "wrapped_" + volName
if spec.Volume != nil { if spec.Volume != nil {
@ -105,7 +105,7 @@ func (f *fakeVolumeHost) NewWrapperCleaner(volName string, spec Spec, podUID typ
if err != nil { if err != nil {
return nil, err return nil, err
} }
return plug.NewCleaner(spec.Name(), podUID) return plug.NewUnmounter(spec.Name(), podUID)
} }
// Returns the hostname of the host kubelet is running on // Returns the hostname of the host kubelet is running on
@ -159,11 +159,11 @@ func (plugin *FakeVolumePlugin) CanSupport(spec *Spec) bool {
return true return true
} }
func (plugin *FakeVolumePlugin) NewBuilder(spec *Spec, pod *api.Pod, opts VolumeOptions) (Builder, error) { func (plugin *FakeVolumePlugin) NewMounter(spec *Spec, pod *api.Pod, opts VolumeOptions) (Mounter, error) {
return &FakeVolume{pod.UID, spec.Name(), plugin, MetricsNil{}}, nil return &FakeVolume{pod.UID, spec.Name(), plugin, MetricsNil{}}, nil
} }
func (plugin *FakeVolumePlugin) NewCleaner(volName string, podUID types.UID) (Cleaner, error) { func (plugin *FakeVolumePlugin) NewUnmounter(volName string, podUID types.UID) (Unmounter, error) {
return &FakeVolume{podUID, volName, plugin, MetricsNil{}}, nil return &FakeVolume{podUID, volName, plugin, MetricsNil{}}, nil
} }

View File

@ -58,15 +58,15 @@ type Metrics struct {
Available *resource.Quantity Available *resource.Quantity
} }
// Attributes represents the attributes of this builder. // Attributes represents the attributes of this mounter.
type Attributes struct { type Attributes struct {
ReadOnly bool ReadOnly bool
Managed bool Managed bool
SupportsSELinux bool SupportsSELinux bool
} }
// Builder interface provides methods to set up/mount the volume. // Mounter interface provides methods to set up/mount the volume.
type Builder interface { type Mounter interface {
// Uses Interface to provide the path for Docker binds. // Uses Interface to provide the path for Docker binds.
Volume Volume
// SetUp prepares and mounts/unpacks the volume to a // SetUp prepares and mounts/unpacks the volume to a
@ -82,12 +82,12 @@ type Builder interface {
// be called more than once, so implementations must be // be called more than once, so implementations must be
// idempotent. // idempotent.
SetUpAt(dir string, fsGroup *int64) error SetUpAt(dir string, fsGroup *int64) error
// GetAttributes returns the attributes of the builder. // GetAttributes returns the attributes of the mounter.
GetAttributes() Attributes GetAttributes() Attributes
} }
// Cleaner interface provides methods to cleanup/unmount the volumes. // Unmounter interface provides methods to cleanup/unmount the volumes.
type Cleaner interface { type Unmounter interface {
Volume Volume
// TearDown unmounts the volume from a self-determined directory and // TearDown unmounts the volume from a self-determined directory and
// removes traces of the SetUp procedure. // removes traces of the SetUp procedure.

View File

@ -38,7 +38,7 @@ const (
// SetVolumeOwnership modifies the given volume to be owned by // SetVolumeOwnership modifies the given volume to be owned by
// fsGroup, and sets SetGid so that newly created files are owned by // fsGroup, and sets SetGid so that newly created files are owned by
// fsGroup. If fsGroup is nil nothing is done. // fsGroup. If fsGroup is nil nothing is done.
func SetVolumeOwnership(builder Builder, fsGroup *int64) error { func SetVolumeOwnership(mounter Mounter, fsGroup *int64) error {
if fsGroup == nil { if fsGroup == nil {
return nil return nil
@ -46,7 +46,7 @@ func SetVolumeOwnership(builder Builder, fsGroup *int64) error {
chownRunner := chown.New() chownRunner := chown.New()
chmodRunner := chmod.New() chmodRunner := chmod.New()
return filepath.Walk(builder.GetPath(), func(path string, info os.FileInfo, err error) error { return filepath.Walk(mounter.GetPath(), func(path string, info os.FileInfo, err error) error {
if err != nil { if err != nil {
return err return err
} }
@ -67,7 +67,7 @@ func SetVolumeOwnership(builder Builder, fsGroup *int64) error {
} }
mask := rwMask mask := rwMask
if builder.GetAttributes().ReadOnly { if mounter.GetAttributes().ReadOnly {
mask = roMask mask = roMask
} }

View File

@ -18,6 +18,6 @@ limitations under the License.
package volume package volume
func SetVolumeOwnership(builder Builder, fsGroup *int64) error { func SetVolumeOwnership(mounter Mounter, fsGroup *int64) error {
return nil return nil
} }