add rados block device(rbd) volume plugin

Signed-off-by: Huamin Chen <hchen@redhat.com>
This commit is contained in:
Huamin Chen
2015-04-07 13:22:23 -04:00
parent 9b1fb6dca1
commit 4a800fd10e
22 changed files with 1099 additions and 1 deletions

View File

@@ -0,0 +1,118 @@
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// diskManager interface and diskSetup/TearDown functions abtract commonly used procedures to setup a block volume
// rbd volume implements diskManager, calls diskSetup when creating a volume, and calls diskTearDown inside volume cleaner.
// TODO: consolidate, refactor, and share diskManager among iSCSI, GCE PD, and RBD
//
package rbd
import (
"os"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/golang/glog"
)
// Abstract interface to disk operations.
type diskManager interface {
MakeGlobalPDName(disk rbd) string
// Attaches the disk to the kubelet's host machine.
AttachDisk(disk rbd) error
// Detaches the disk from the kubelet's host machine.
DetachDisk(disk rbd, mntPath string) error
}
// utility to mount a disk based filesystem
func diskSetUp(manager diskManager, disk rbd, volPath string, mounter mount.Interface) error {
globalPDPath := manager.MakeGlobalPDName(disk)
// TODO: handle failed mounts here.
mountpoint, err := mounter.IsMountPoint(volPath)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate mountpoint: %s", volPath)
return err
}
if mountpoint {
return nil
}
if err := manager.AttachDisk(disk); err != nil {
glog.Errorf("failed to attach disk")
return err
}
if err := os.MkdirAll(volPath, 0750); err != nil {
glog.Errorf("failed to mkdir:%s", volPath)
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same disk.
options := []string{"bind"}
if disk.readOnly {
options = append(options, "ro")
}
err = mounter.Mount(globalPDPath, volPath, "", options)
if err != nil {
glog.Errorf("failed to bind mount:%s", globalPDPath)
return err
}
return nil
}
// utility to tear down a disk based filesystem
func diskTearDown(manager diskManager, disk rbd, volPath string, mounter mount.Interface) error {
mountpoint, err := mounter.IsMountPoint(volPath)
if err != nil {
glog.Errorf("cannot validate mountpoint %s", volPath)
return err
}
if !mountpoint {
return os.Remove(volPath)
}
refs, err := mount.GetMountRefs(mounter, volPath)
if err != nil {
glog.Errorf("failed to get reference count %s", volPath)
return err
}
if err := mounter.Unmount(volPath); err != nil {
glog.Errorf("failed to umount %s", volPath)
return err
}
// If len(refs) is 1, then all bind mounts have been removed, and the
// remaining reference is the global mount. It is safe to detach.
if len(refs) == 1 {
mntPath := refs[0]
if err := manager.DetachDisk(disk, mntPath); err != nil {
glog.Errorf("failed to detach disk from %s", mntPath)
return err
}
}
mountpoint, mntErr := mounter.IsMountPoint(volPath)
if mntErr != nil {
glog.Errorf("isMountpoint check failed: %v", mntErr)
return err
}
if !mountpoint {
if err := os.Remove(volPath); err != nil {
return err
}
}
return nil
}

203
pkg/volume/rbd/rbd.go Normal file
View File

@@ -0,0 +1,203 @@
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"fmt"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
"github.com/golang/glog"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&RBDPlugin{nil, exec.New()}}
}
type RBDPlugin struct {
host volume.VolumeHost
exe exec.Interface
}
var _ volume.VolumePlugin = &RBDPlugin{}
const (
RBDPluginName = "kubernetes.io/rbd"
)
func (plugin *RBDPlugin) Init(host volume.VolumeHost) {
plugin.host = host
}
func (plugin *RBDPlugin) Name() string {
return RBDPluginName
}
func (plugin *RBDPlugin) CanSupport(spec *volume.Spec) bool {
if spec.VolumeSource.RBD == nil {
return false
}
// see if rbd is there
_, err := plugin.execCommand("rbd", []string{"-h"})
if err == nil {
return true
}
return false
}
func (plugin *RBDPlugin) GetAccessModes() []api.PersistentVolumeAccessMode {
return []api.PersistentVolumeAccessMode{
api.ReadWriteOnce,
api.ReadOnlyMany,
}
}
func (plugin *RBDPlugin) NewBuilder(spec *volume.Spec, pod *api.Pod, _ volume.VolumeOptions, mounter mount.Interface) (volume.Builder, error) {
secret := ""
if spec.VolumeSource.RBD.SecretRef != nil {
kubeClient := plugin.host.GetKubeClient()
if kubeClient == nil {
return nil, fmt.Errorf("Cannot get kube client")
}
secretName, err := kubeClient.Secrets(pod.Namespace).Get(spec.VolumeSource.RBD.SecretRef.Name)
if err != nil {
glog.Errorf("Couldn't get secret %v/%v", pod.Namespace, spec.VolumeSource.RBD.SecretRef)
return nil, err
}
for name, data := range secretName.Data {
secret = string(data)
glog.V(1).Infof("ceph secret info: %s/%s", name, secret)
}
}
// Inject real implementations here, test through the internal function.
return plugin.newBuilderInternal(spec, pod.UID, &RBDUtil{}, mounter, secret)
}
func (plugin *RBDPlugin) newBuilderInternal(spec *volume.Spec, podUID types.UID, manager diskManager, mounter mount.Interface, secret string) (volume.Builder, error) {
pool := spec.VolumeSource.RBD.RBDPool
if pool == "" {
pool = "rbd"
}
id := spec.VolumeSource.RBD.RadosUser
if id == "" {
id = "admin"
}
keyring := spec.VolumeSource.RBD.Keyring
if keyring == "" {
keyring = "/etc/ceph/keyring"
}
return &rbd{
podUID: podUID,
volName: spec.Name,
mon: spec.VolumeSource.RBD.CephMonitors,
image: spec.VolumeSource.RBD.RBDImage,
pool: pool,
id: id,
keyring: keyring,
secret: secret,
fsType: spec.VolumeSource.RBD.FSType,
readOnly: spec.VolumeSource.RBD.ReadOnly,
manager: manager,
mounter: mounter,
plugin: plugin,
}, nil
}
func (plugin *RBDPlugin) NewCleaner(volName string, podUID types.UID, mounter mount.Interface) (volume.Cleaner, error) {
// Inject real implementations here, test through the internal function.
return plugin.newCleanerInternal(volName, podUID, &RBDUtil{}, mounter)
}
func (plugin *RBDPlugin) newCleanerInternal(volName string, podUID types.UID, manager diskManager, mounter mount.Interface) (volume.Cleaner, error) {
return &rbd{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
}, nil
}
type rbd struct {
volName string
podUID types.UID
mon []string
pool string
id string
image string
keyring string
secret string
fsType string
readOnly bool
plugin *RBDPlugin
mounter mount.Interface
// Utility interface that provides API calls to the provider to attach/detach disks.
manager diskManager
}
func (rbd *rbd) GetPath() string {
name := RBDPluginName
// safe to use PodVolumeDir now: volume teardown occurs before pod is cleaned up
return rbd.plugin.host.GetPodVolumeDir(rbd.podUID, util.EscapeQualifiedNameForDisk(name), rbd.volName)
}
func (rbd *rbd) SetUp() error {
return rbd.SetUpAt(rbd.GetPath())
}
func (rbd *rbd) SetUpAt(dir string) error {
// diskSetUp checks mountpoints and prevent repeated calls
err := diskSetUp(rbd.manager, *rbd, dir, rbd.mounter)
if err != nil {
glog.Errorf("rbd: failed to setup")
return err
}
globalPDPath := rbd.manager.MakeGlobalPDName(*rbd)
// make mountpoint rw/ro work as expected
//FIXME revisit pkg/util/mount and ensure rw/ro is implemented as expected
mode := "rw"
if rbd.readOnly {
mode = "ro"
}
rbd.plugin.execCommand("mount", []string{"-o", "remount," + mode, globalPDPath, dir})
return nil
}
// Unmounts the bind mount, and detaches the disk only if the disk
// resource was the last reference to that disk on the kubelet.
func (rbd *rbd) TearDown() error {
return rbd.TearDownAt(rbd.GetPath())
}
func (rbd *rbd) TearDownAt(dir string) error {
return diskTearDown(rbd.manager, *rbd, dir, rbd.mounter)
}
func (plugin *RBDPlugin) execCommand(command string, args []string) ([]byte, error) {
cmd := plugin.exe.Command(command, args...)
return cmd.CombinedOutput()
}

130
pkg/volume/rbd/rbd_test.go Normal file
View File

@@ -0,0 +1,130 @@
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package rbd
import (
"os"
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
)
func TestCanSupport(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.Name() != "kubernetes.io/rbd" {
t.Errorf("Wrong name: %s", plug.Name())
}
}
type fakeDiskManager struct{}
func (fake *fakeDiskManager) MakeGlobalPDName(disk rbd) string {
return "/tmp/fake_rbd_path"
}
func (fake *fakeDiskManager) AttachDisk(disk rbd) error {
globalPath := disk.manager.MakeGlobalPDName(disk)
err := os.MkdirAll(globalPath, 0750)
if err != nil {
return err
}
return nil
}
func (fake *fakeDiskManager) DetachDisk(disk rbd, mntPath string) error {
globalPath := disk.manager.MakeGlobalPDName(disk)
err := os.RemoveAll(globalPath)
if err != nil {
return err
}
return nil
}
func TestPlugin(t *testing.T) {
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), volume.NewFakeVolumeHost("/tmp/fake", nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/rbd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &api.Volume{
Name: "vol1",
VolumeSource: api.VolumeSource{
RBD: &api.RBDVolumeSource{
CephMonitors: []string{"a", "b"},
RBDImage: "bar",
FSType: "ext4",
},
},
}
builder, err := plug.(*RBDPlugin).newBuilderInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), &fakeDiskManager{}, &mount.FakeMounter{}, "secrets")
if err != nil {
t.Errorf("Failed to make a new Builder: %v", err)
}
if builder == nil {
t.Errorf("Got a nil Builder: %v")
}
path := builder.GetPath()
if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~rbd/vol1" {
t.Errorf("Got unexpected path: %s", path)
}
if err := builder.SetUp(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
cleaner, err := plug.(*RBDPlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakeDiskManager{}, &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Cleaner: %v", err)
}
if cleaner == nil {
t.Errorf("Got a nil Cleaner: %v")
}
if err := cleaner.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("SetUp() failed: %v", err)
}
}

140
pkg/volume/rbd/rbd_util.go Normal file
View File

@@ -0,0 +1,140 @@
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// utility functions to setup rbd volume
// mainly implement diskManager interface
//
package rbd
import (
"errors"
"fmt"
"math/rand"
"os"
"path"
"strings"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
"github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
"github.com/golang/glog"
)
// stat a path, if not exists, retry maxRetries times
func waitForPathToExist(devicePath string, maxRetries int) bool {
for i := 0; i < maxRetries; i++ {
_, err := os.Stat(devicePath)
if err == nil {
return true
}
if err != nil && !os.IsNotExist(err) {
return false
}
time.Sleep(time.Second)
}
return false
}
// make a directory like /var/lib/kubelet/plugins/kubernetes.io/pod/rbd/pool-image-image
func makePDNameInternal(host volume.VolumeHost, pool string, image string) string {
return path.Join(host.GetPluginDir(RBDPluginName), "rbd", pool+"-image-"+image)
}
type RBDUtil struct{}
func (util *RBDUtil) MakeGlobalPDName(rbd rbd) string {
return makePDNameInternal(rbd.plugin.host, rbd.pool, rbd.image)
}
func (util *RBDUtil) AttachDisk(rbd rbd) error {
var err error
devicePath := strings.Join([]string{"/dev/rbd", rbd.pool, rbd.image}, "/")
exist := waitForPathToExist(devicePath, 1)
if !exist {
// modprobe
_, err = rbd.plugin.execCommand("modprobe", []string{"rbd"})
if err != nil {
return fmt.Errorf("rbd: failed to modprobe rbd error:%v", err)
}
// rbd map
l := len(rbd.mon)
// avoid mount storm, pick a host randomly
start := rand.Int() % l
// iterate all hosts until mount succeeds.
for i := start; i < start+l; i++ {
mon := rbd.mon[i%l]
glog.V(1).Infof("rbd: map mon %s", mon)
if rbd.secret != "" {
_, err = rbd.plugin.execCommand("rbd",
[]string{"map", rbd.image, "--pool", rbd.pool, "--id", rbd.id, "-m", mon, "--key=" + rbd.secret})
} else {
_, err = rbd.plugin.execCommand("rbd",
[]string{"map", rbd.image, "--pool", rbd.pool, "--id", rbd.id, "-m", mon, "-k", rbd.keyring})
}
if err == nil {
break
}
}
}
if err != nil {
return err
}
exist = waitForPathToExist(devicePath, 10)
if !exist {
return errors.New("Could not map image: Timeout after 10s")
}
// mount it
globalPDPath := rbd.manager.MakeGlobalPDName(rbd)
mountpoint, err := rbd.mounter.IsMountPoint(globalPDPath)
if err != nil {
return fmt.Errorf("rbd: %s failed to check mountpoint", globalPDPath)
}
if mountpoint {
return nil
}
if err := os.MkdirAll(globalPDPath, 0750); err != nil {
return fmt.Errorf("rbd: failed to mkdir %s, error", globalPDPath)
}
if err = rbd.mounter.Mount(devicePath, globalPDPath, rbd.fsType, nil); err != nil {
err = fmt.Errorf("rbd: failed to mount rbd volume %s [%s] to %s, error %v", devicePath, rbd.fsType, globalPDPath, err)
}
return err
}
func (util *RBDUtil) DetachDisk(rbd rbd, mntPath string) error {
device, cnt, err := mount.GetDeviceNameFromMount(rbd.mounter, mntPath)
if err != nil {
return fmt.Errorf("rbd detach disk: failed to get device from mnt: %s\nError: %v", mntPath, err)
}
if err = rbd.mounter.Unmount(mntPath); err != nil {
return fmt.Errorf("rbd detach disk: failed to umount: %s\nError: %v", mntPath, err)
}
// if device is no longer used, see if can unmap
if cnt <= 1 {
// rbd unmap
_, err = rbd.plugin.execCommand("rbd", []string{"unmap", device})
if err != nil {
return fmt.Errorf("rbd: failed to unmap device %s:Error: %v", device, err)
}
glog.Infof("rbd: successfully unmap device %s", device)
}
return nil
}