Rename volume.Builder to Mounter and volume.Cleaner to Unmounter
This commit is contained in:
@@ -68,7 +68,7 @@ func (plugin *downwardAPIPlugin) CanSupport(spec *volume.Spec) bool {
|
||||
return spec.Volume != nil && spec.Volume.DownwardAPI != nil
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) {
|
||||
func (plugin *downwardAPIPlugin) NewMounter(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||
v := &downwardAPIVolume{
|
||||
volName: spec.Name(),
|
||||
pod: pod,
|
||||
@@ -79,14 +79,14 @@ func (plugin *downwardAPIPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, opt
|
||||
for _, fileInfo := range spec.Volume.DownwardAPI.Items {
|
||||
v.fieldReferenceFileNames[fileInfo.FieldRef.FieldPath] = path.Clean(fileInfo.Path)
|
||||
}
|
||||
return &downwardAPIVolumeBuilder{
|
||||
return &downwardAPIVolumeMounter{
|
||||
downwardAPIVolume: v,
|
||||
opts: &opts,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (plugin *downwardAPIPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) {
|
||||
return &downwardAPIVolumeCleaner{
|
||||
func (plugin *downwardAPIPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
|
||||
return &downwardAPIVolumeUnmounter{
|
||||
&downwardAPIVolume{
|
||||
volName: volName,
|
||||
podUID: podUID,
|
||||
@@ -100,20 +100,20 @@ type downwardAPIVolume struct {
|
||||
volName string
|
||||
fieldReferenceFileNames map[string]string
|
||||
pod *api.Pod
|
||||
podUID types.UID // TODO: remove this redundancy as soon NewCleaner func will have *api.POD and not only types.UID
|
||||
podUID types.UID // TODO: remove this redundancy as soon NewUnmounter func will have *api.POD and not only types.UID
|
||||
plugin *downwardAPIPlugin
|
||||
volume.MetricsNil
|
||||
}
|
||||
|
||||
// downwardAPIVolumeBuilder fetches info from downward API from the pod
|
||||
// downwardAPIVolumeMounter fetches info from downward API from the pod
|
||||
// and dumps it in files
|
||||
type downwardAPIVolumeBuilder struct {
|
||||
type downwardAPIVolumeMounter struct {
|
||||
*downwardAPIVolume
|
||||
opts *volume.VolumeOptions
|
||||
}
|
||||
|
||||
// downwardAPIVolumeBuilder implements volume.Builder interface
|
||||
var _ volume.Builder = &downwardAPIVolumeBuilder{}
|
||||
// downwardAPIVolumeMounter implements volume.Mounter interface
|
||||
var _ volume.Mounter = &downwardAPIVolumeMounter{}
|
||||
|
||||
// downward API volumes are always ReadOnlyManaged
|
||||
func (d *downwardAPIVolume) GetAttributes() volume.Attributes {
|
||||
@@ -128,14 +128,14 @@ func (d *downwardAPIVolume) GetAttributes() volume.Attributes {
|
||||
// This function is not idempotent by design. We want the data to be refreshed periodically.
|
||||
// The internal sync interval of kubelet will drive the refresh of data.
|
||||
// TODO: Add volume specific ticker and refresh loop
|
||||
func (b *downwardAPIVolumeBuilder) SetUp(fsGroup *int64) error {
|
||||
func (b *downwardAPIVolumeMounter) SetUp(fsGroup *int64) error {
|
||||
return b.SetUpAt(b.GetPath(), fsGroup)
|
||||
}
|
||||
|
||||
func (b *downwardAPIVolumeBuilder) SetUpAt(dir string, fsGroup *int64) error {
|
||||
func (b *downwardAPIVolumeMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
glog.V(3).Infof("Setting up a downwardAPI volume %v for pod %v/%v at %v", b.volName, b.pod.Namespace, b.pod.Name, dir)
|
||||
// Wrap EmptyDir. Here we rely on the idempotency of the wrapped plugin to avoid repeatedly mounting
|
||||
wrapped, err := b.plugin.host.NewWrapperBuilder(b.volName, wrappedVolumeSpec, b.pod, *b.opts)
|
||||
wrapped, err := b.plugin.host.NewWrapperMounter(b.volName, wrappedVolumeSpec, b.pod, *b.opts)
|
||||
if err != nil {
|
||||
glog.Errorf("Couldn't setup downwardAPI volume %v for pod %v/%v: %s", b.volName, b.pod.Namespace, b.pod.Name, err.Error())
|
||||
return err
|
||||
@@ -357,28 +357,28 @@ func (d *downwardAPIVolume) GetPath() string {
|
||||
}
|
||||
|
||||
// downwardAPIVolumeCleander handles cleaning up downwardAPI volumes
|
||||
type downwardAPIVolumeCleaner struct {
|
||||
type downwardAPIVolumeUnmounter struct {
|
||||
*downwardAPIVolume
|
||||
}
|
||||
|
||||
// downwardAPIVolumeCleaner implements volume.Cleaner interface
|
||||
var _ volume.Cleaner = &downwardAPIVolumeCleaner{}
|
||||
// downwardAPIVolumeUnmounter implements volume.Unmounter interface
|
||||
var _ volume.Unmounter = &downwardAPIVolumeUnmounter{}
|
||||
|
||||
func (c *downwardAPIVolumeCleaner) TearDown() error {
|
||||
func (c *downwardAPIVolumeUnmounter) TearDown() error {
|
||||
return c.TearDownAt(c.GetPath())
|
||||
}
|
||||
|
||||
func (c *downwardAPIVolumeCleaner) TearDownAt(dir string) error {
|
||||
func (c *downwardAPIVolumeUnmounter) TearDownAt(dir string) error {
|
||||
glog.V(3).Infof("Tearing down volume %v for pod %v at %v", c.volName, c.podUID, dir)
|
||||
|
||||
// Wrap EmptyDir, let it do the teardown.
|
||||
wrapped, err := c.plugin.host.NewWrapperCleaner(c.volName, wrappedVolumeSpec, c.podUID)
|
||||
wrapped, err := c.plugin.host.NewWrapperUnmounter(c.volName, wrappedVolumeSpec, c.podUID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return wrapped.TearDownAt(dir)
|
||||
}
|
||||
|
||||
func (b *downwardAPIVolumeBuilder) getMetaDir() string {
|
||||
func (b *downwardAPIVolumeMounter) getMetaDir() string {
|
||||
return path.Join(b.plugin.host.GetPodPluginDir(b.podUID, utilstrings.EscapeQualifiedNameForDisk(downwardAPIPluginName)), b.volName)
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user