Added PersistentVolumeController
This commit is contained in:
@@ -86,8 +86,7 @@ func NewPersistentVolumeClaimBinder(kubeClient client.Interface, syncPeriod time
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: binder.addClaim,
|
||||
UpdateFunc: binder.updateClaim,
|
||||
// no DeleteFunc needed. a claim requires no clean-up.
|
||||
// syncVolume handles the missing claim
|
||||
DeleteFunc: binder.deleteClaim,
|
||||
},
|
||||
)
|
||||
|
||||
@@ -145,6 +144,33 @@ func (binder *PersistentVolumeClaimBinder) updateClaim(oldObj, newObj interface{
|
||||
}
|
||||
}
|
||||
|
||||
func (binder *PersistentVolumeClaimBinder) deleteClaim(obj interface{}) {
|
||||
binder.lock.Lock()
|
||||
defer binder.lock.Unlock()
|
||||
var volume *api.PersistentVolume
|
||||
if pvc, ok := obj.(*api.PersistentVolumeClaim); ok {
|
||||
if pvObj, exists, _ := binder.volumeIndex.GetByKey(pvc.Spec.VolumeName); exists {
|
||||
if pv, ok := pvObj.(*api.PersistentVolume); ok {
|
||||
volume = pv
|
||||
}
|
||||
}
|
||||
}
|
||||
if unk, ok := obj.(cache.DeletedFinalStateUnknown); ok && unk.Obj != nil {
|
||||
if pv, ok := unk.Obj.(*api.PersistentVolume); ok {
|
||||
volume = pv
|
||||
}
|
||||
}
|
||||
|
||||
// sync the volume when its claim is deleted. Explicitly sync'ing the volume here in response to
|
||||
// claim deletion prevents the volume from waiting until the next sync period for its Release.
|
||||
if volume != nil {
|
||||
err := syncVolume(binder.volumeIndex, binder.client, volume)
|
||||
if err != nil {
|
||||
glog.Errorf("PVClaimBinder could not update volume %s from deleteClaim handler: %+v", volume.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderClient, volume *api.PersistentVolume) (err error) {
|
||||
glog.V(5).Infof("Synchronizing PersistentVolume[%s], current phase: %s\n", volume.Name, volume.Status.Phase)
|
||||
|
||||
@@ -166,6 +192,11 @@ func syncVolume(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCl
|
||||
volumeIndex.Add(volume)
|
||||
}
|
||||
|
||||
if isBeingProvisioned(volume) {
|
||||
glog.V(4).Infof("Skipping PersistentVolume[%s], waiting for provisioning to finish", volume.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
switch currentPhase {
|
||||
case api.VolumePending:
|
||||
|
||||
@@ -275,38 +306,46 @@ func syncClaim(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCli
|
||||
|
||||
switch claim.Status.Phase {
|
||||
case api.ClaimPending:
|
||||
// claims w/ a storage-class annotation for provisioning with *only* match volumes with a ClaimRef of the claim.
|
||||
volume, err := volumeIndex.findBestMatchForClaim(claim)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if volume == nil {
|
||||
glog.V(5).Infof("A volume match does not exist for persistent claim: %s", claim.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// create a reference to the claim and assign it to the volume being bound.
|
||||
// the volume is a pointer and assigning the reference fixes a race condition where another
|
||||
// claim might match this volume but before the claimRef is persistent in the next case statement
|
||||
if isBeingProvisioned(volume) {
|
||||
glog.V(5).Infof("PersistentVolume[%s] for PersistentVolumeClaim[%s/%s] is still being provisioned.", volume.Name, claim.Namespace, claim.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
claimRef, err := api.GetReference(claim)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unexpected error getting claim reference: %v\n", err)
|
||||
}
|
||||
|
||||
// make a binding reference to the claim and ensure to update the local index to prevent dupe bindings
|
||||
clone, err := conversion.NewCloner().DeepCopy(volume)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error cloning pv: %v", err)
|
||||
}
|
||||
volumeClone, ok := clone.(*api.PersistentVolume)
|
||||
if !ok {
|
||||
return fmt.Errorf("Unexpected pv cast error : %v\n", volumeClone)
|
||||
}
|
||||
volumeClone.Spec.ClaimRef = claimRef
|
||||
if updatedVolume, err := binderClient.UpdatePersistentVolume(volumeClone); err != nil {
|
||||
return fmt.Errorf("Unexpected error saving PersistentVolume.Status: %+v", err)
|
||||
} else {
|
||||
volume = updatedVolume
|
||||
volumeIndex.Update(updatedVolume)
|
||||
// Make a binding reference to the claim by persisting claimRef on the volume.
|
||||
// The local cache must be updated with the new bind to prevent subsequent
|
||||
// claims from binding to the volume.
|
||||
if volume.Spec.ClaimRef == nil {
|
||||
clone, err := conversion.NewCloner().DeepCopy(volume)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error cloning pv: %v", err)
|
||||
}
|
||||
volumeClone, ok := clone.(*api.PersistentVolume)
|
||||
if !ok {
|
||||
return fmt.Errorf("Unexpected pv cast error : %v\n", volumeClone)
|
||||
}
|
||||
volumeClone.Spec.ClaimRef = claimRef
|
||||
if updatedVolume, err := binderClient.UpdatePersistentVolume(volumeClone); err != nil {
|
||||
return fmt.Errorf("Unexpected error saving PersistentVolume.Status: %+v", err)
|
||||
} else {
|
||||
volume = updatedVolume
|
||||
volumeIndex.Update(updatedVolume)
|
||||
}
|
||||
}
|
||||
|
||||
// the bind is persisted on the volume above and will always match the claim in a search.
|
||||
@@ -341,6 +380,14 @@ func syncClaim(volumeIndex *persistentVolumeOrderedIndex, binderClient binderCli
|
||||
return nil
|
||||
}
|
||||
|
||||
func isBeingProvisioned(volume *api.PersistentVolume) bool {
|
||||
value, found := volume.Annotations[pvProvisioningRequiredAnnotationKey]
|
||||
if found && value != pvProvisioningCompletedAnnotationValue {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Run starts all of this binder's control loops
|
||||
func (controller *PersistentVolumeClaimBinder) Run() {
|
||||
glog.V(5).Infof("Starting PersistentVolumeClaimBinder\n")
|
||||
|
@@ -0,0 +1,498 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/conversion"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/io"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// PersistentVolumeProvisionerController reconciles the state of all PersistentVolumes and PersistentVolumeClaims.
|
||||
type PersistentVolumeProvisionerController struct {
|
||||
volumeController *framework.Controller
|
||||
volumeStore cache.Store
|
||||
claimController *framework.Controller
|
||||
claimStore cache.Store
|
||||
client controllerClient
|
||||
cloud cloudprovider.Interface
|
||||
provisioner volume.ProvisionableVolumePlugin
|
||||
pluginMgr volume.VolumePluginMgr
|
||||
stopChannels map[string]chan struct{}
|
||||
mutex sync.RWMutex
|
||||
}
|
||||
|
||||
// constant name values for the controllers stopChannels map.
|
||||
// the controller uses these for graceful shutdown
|
||||
const volumesStopChannel = "volumes"
|
||||
const claimsStopChannel = "claims"
|
||||
|
||||
// NewPersistentVolumeProvisionerController creates a new PersistentVolumeProvisionerController
|
||||
func NewPersistentVolumeProvisionerController(client controllerClient, syncPeriod time.Duration, plugins []volume.VolumePlugin, provisioner volume.ProvisionableVolumePlugin, cloud cloudprovider.Interface) (*PersistentVolumeProvisionerController, error) {
|
||||
controller := &PersistentVolumeProvisionerController{
|
||||
client: client,
|
||||
cloud: cloud,
|
||||
provisioner: provisioner,
|
||||
}
|
||||
|
||||
if err := controller.pluginMgr.InitPlugins(plugins, controller); err != nil {
|
||||
return nil, fmt.Errorf("Could not initialize volume plugins for PersistentVolumeProvisionerController: %+v", err)
|
||||
}
|
||||
|
||||
glog.V(5).Infof("Initializing provisioner: %s", controller.provisioner.Name())
|
||||
controller.provisioner.Init(controller)
|
||||
|
||||
controller.volumeStore, controller.volumeController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options unversioned.ListOptions) (runtime.Object, error) {
|
||||
return client.ListPersistentVolumes(options)
|
||||
},
|
||||
WatchFunc: func(options unversioned.ListOptions) (watch.Interface, error) {
|
||||
return client.WatchPersistentVolumes(options)
|
||||
},
|
||||
},
|
||||
&api.PersistentVolume{},
|
||||
syncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: controller.handleAddVolume,
|
||||
UpdateFunc: controller.handleUpdateVolume,
|
||||
// delete handler not needed in this controller.
|
||||
// volume deletion is handled by the recycler controller
|
||||
},
|
||||
)
|
||||
controller.claimStore, controller.claimController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options unversioned.ListOptions) (runtime.Object, error) {
|
||||
return client.ListPersistentVolumeClaims(api.NamespaceAll, options)
|
||||
},
|
||||
WatchFunc: func(options unversioned.ListOptions) (watch.Interface, error) {
|
||||
return client.WatchPersistentVolumeClaims(api.NamespaceAll, options)
|
||||
},
|
||||
},
|
||||
&api.PersistentVolumeClaim{},
|
||||
syncPeriod,
|
||||
framework.ResourceEventHandlerFuncs{
|
||||
AddFunc: controller.handleAddClaim,
|
||||
UpdateFunc: controller.handleUpdateClaim,
|
||||
// delete handler not needed.
|
||||
// normal recycling applies when a claim is deleted.
|
||||
// recycling is handled by the binding controller.
|
||||
},
|
||||
)
|
||||
|
||||
return controller, nil
|
||||
}
|
||||
|
||||
func (controller *PersistentVolumeProvisionerController) handleAddVolume(obj interface{}) {
|
||||
controller.mutex.Lock()
|
||||
defer controller.mutex.Unlock()
|
||||
cachedPv, _, _ := controller.volumeStore.Get(obj)
|
||||
if pv, ok := cachedPv.(*api.PersistentVolume); ok {
|
||||
err := controller.reconcileVolume(pv)
|
||||
if err != nil {
|
||||
glog.Errorf("Error reconciling volume %s: %+v", pv.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (controller *PersistentVolumeProvisionerController) handleUpdateVolume(oldObj, newObj interface{}) {
|
||||
// The flow for Update is the same as Add.
|
||||
// A volume is only provisioned if not done so already.
|
||||
controller.handleAddVolume(newObj)
|
||||
}
|
||||
|
||||
func (controller *PersistentVolumeProvisionerController) handleAddClaim(obj interface{}) {
|
||||
controller.mutex.Lock()
|
||||
defer controller.mutex.Unlock()
|
||||
cachedPvc, exists, _ := controller.claimStore.Get(obj)
|
||||
if !exists {
|
||||
glog.Errorf("PersistentVolumeClaim does not exist in the local cache: %+v", obj)
|
||||
return
|
||||
}
|
||||
if pvc, ok := cachedPvc.(*api.PersistentVolumeClaim); ok {
|
||||
err := controller.reconcileClaim(pvc)
|
||||
if err != nil {
|
||||
glog.Errorf("Error encoutered reconciling claim %s: %+v", pvc.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (controller *PersistentVolumeProvisionerController) handleUpdateClaim(oldObj, newObj interface{}) {
|
||||
// The flow for Update is the same as Add.
|
||||
// A volume is only provisioned for a claim if not done so already.
|
||||
controller.handleAddClaim(newObj)
|
||||
}
|
||||
|
||||
func (controller *PersistentVolumeProvisionerController) reconcileClaim(claim *api.PersistentVolumeClaim) error {
|
||||
if controller.provisioner == nil {
|
||||
return fmt.Errorf("No provisioner configured for controller")
|
||||
}
|
||||
|
||||
// no provisioning requested, return Pending. Claim may be pending indefinitely without a match.
|
||||
if !keyExists(qosProvisioningKey, claim.Annotations) {
|
||||
glog.V(5).Infof("PersistentVolumeClaim[%s] no provisioning required", claim.Name)
|
||||
return nil
|
||||
}
|
||||
if len(claim.Spec.VolumeName) != 0 {
|
||||
glog.V(5).Infof("PersistentVolumeClaim[%s] already bound. No provisioning required", claim.Name)
|
||||
return nil
|
||||
}
|
||||
if isAnnotationMatch(pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue, claim.Annotations) {
|
||||
glog.V(5).Infof("PersistentVolumeClaim[%s] is already provisioned.", claim.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
glog.V(5).Infof("PersistentVolumeClaim[%s] provisioning", claim.Name)
|
||||
provisioner, err := newProvisioner(controller.provisioner, claim)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unexpected error getting new provisioner for claim %s: %v\n", claim.Name, err)
|
||||
}
|
||||
newVolume, err := provisioner.NewPersistentVolumeTemplate()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unexpected error getting new volume template for claim %s: %v\n", claim.Name, err)
|
||||
}
|
||||
|
||||
claimRef, err := api.GetReference(claim)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unexpected error getting claim reference for %s: %v\n", claim.Name, err)
|
||||
}
|
||||
|
||||
storageClass, _ := claim.Annotations[qosProvisioningKey]
|
||||
|
||||
// the creation of this volume is the bind to the claim.
|
||||
// The claim will match the volume during the next sync period when the volume is in the local cache
|
||||
newVolume.Spec.ClaimRef = claimRef
|
||||
newVolume.Annotations[pvProvisioningRequiredAnnotationKey] = "true"
|
||||
newVolume.Annotations[qosProvisioningKey] = storageClass
|
||||
newVolume, err = controller.client.CreatePersistentVolume(newVolume)
|
||||
glog.V(5).Infof("Unprovisioned PersistentVolume[%s] created for PVC[%s], which will be fulfilled in the storage provider", newVolume.Name, claim.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("PersistentVolumeClaim[%s] failed provisioning: %+v", claim.Name, err)
|
||||
}
|
||||
|
||||
claim.Annotations[pvProvisioningRequiredAnnotationKey] = pvProvisioningCompletedAnnotationValue
|
||||
_, err = controller.client.UpdatePersistentVolumeClaim(claim)
|
||||
if err != nil {
|
||||
glog.Error("error updating persistent volume claim: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (controller *PersistentVolumeProvisionerController) reconcileVolume(pv *api.PersistentVolume) error {
|
||||
glog.V(5).Infof("PersistentVolume[%s] reconciling", pv.Name)
|
||||
|
||||
if pv.Spec.ClaimRef == nil {
|
||||
glog.V(5).Infof("PersistentVolume[%s] is not bound to a claim. No provisioning required", pv.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: fix this leaky abstraction. Had to make our own store key because ClaimRef fails the default keyfunc (no Meta on object).
|
||||
obj, exists, _ := controller.claimStore.GetByKey(fmt.Sprintf("%s/%s", pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name))
|
||||
if !exists {
|
||||
return fmt.Errorf("PersistentVolumeClaim[%s/%s] not found in local cache", pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name)
|
||||
}
|
||||
|
||||
claim, ok := obj.(*api.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
return fmt.Errorf("PersistentVolumeClaim expected, but got %v", obj)
|
||||
}
|
||||
|
||||
// no provisioning required, volume is ready and Bound
|
||||
if !keyExists(pvProvisioningRequiredAnnotationKey, pv.Annotations) {
|
||||
glog.V(5).Infof("PersistentVolume[%s] does not require provisioning", pv.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// provisioning is completed, volume is ready.
|
||||
if isProvisioningComplete(pv) {
|
||||
glog.V(5).Infof("PersistentVolume[%s] is bound and provisioning is complete", pv.Name)
|
||||
if pv.Spec.ClaimRef.Namespace != claim.Namespace || pv.Spec.ClaimRef.Name != claim.Name {
|
||||
return fmt.Errorf("pre-bind mismatch - expected %s but found %s/%s", claimToClaimKey(claim), pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// provisioning is incomplete. Attempt to provision the volume.
|
||||
glog.V(5).Infof("PersistentVolume[%s] provisioning in progress", pv.Name)
|
||||
err := provisionVolume(pv, controller)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error provisioning PersistentVolume[%s]: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// provisionVolume provisions a volume that has been created in the cluster but not yet fulfilled by
|
||||
// the storage provider.
|
||||
func provisionVolume(pv *api.PersistentVolume, controller *PersistentVolumeProvisionerController) error {
|
||||
if isProvisioningComplete(pv) {
|
||||
return fmt.Errorf("PersistentVolume[%s] is already provisioned", pv.Name)
|
||||
}
|
||||
|
||||
if _, exists := pv.Annotations[qosProvisioningKey]; !exists {
|
||||
return fmt.Errorf("PersistentVolume[%s] does not contain a provisioning request. Provisioning not required.", pv.Name)
|
||||
}
|
||||
|
||||
if controller.provisioner == nil {
|
||||
return fmt.Errorf("No provisioner found for volume: %s", pv.Name)
|
||||
}
|
||||
|
||||
// Find the claim in local cache
|
||||
obj, exists, _ := controller.claimStore.GetByKey(fmt.Sprintf("%s/%s", pv.Spec.ClaimRef.Namespace, pv.Spec.ClaimRef.Name))
|
||||
if !exists {
|
||||
return fmt.Errorf("Could not find PersistentVolumeClaim[%s/%s] in local cache", pv.Spec.ClaimRef.Name, pv.Name)
|
||||
}
|
||||
claim := obj.(*api.PersistentVolumeClaim)
|
||||
|
||||
provisioner, _ := newProvisioner(controller.provisioner, claim)
|
||||
err := provisioner.Provision(pv)
|
||||
if err != nil {
|
||||
glog.Errorf("Could not provision %s", pv.Name)
|
||||
pv.Status.Phase = api.VolumeFailed
|
||||
pv.Status.Message = err.Error()
|
||||
if pv, apiErr := controller.client.UpdatePersistentVolumeStatus(pv); apiErr != nil {
|
||||
return fmt.Errorf("PersistentVolume[%s] failed provisioning and also failed status update: %v - %v", pv.Name, err, apiErr)
|
||||
}
|
||||
return fmt.Errorf("PersistentVolume[%s] failed provisioning : %v", pv.Name, err, err)
|
||||
}
|
||||
|
||||
clone, err := conversion.NewCloner().DeepCopy(pv)
|
||||
volumeClone, ok := clone.(*api.PersistentVolume)
|
||||
if !ok {
|
||||
return fmt.Errorf("Unexpected pv cast error : %v\n", volumeClone)
|
||||
}
|
||||
volumeClone.Annotations[pvProvisioningRequiredAnnotationKey] = pvProvisioningCompletedAnnotationValue
|
||||
|
||||
pv, err = controller.client.UpdatePersistentVolume(volumeClone)
|
||||
if err != nil {
|
||||
// TODO: https://github.com/kubernetes/kubernetes/issues/14443
|
||||
// the volume was created in the infrastructure and likely has a PV name on it,
|
||||
// but we failed to save the annotation that marks the volume as provisioned.
|
||||
return fmt.Errorf("Error updating PersistentVolume[%s] with provisioning completed annotation. There is a potential for dupes and orphans.", volumeClone.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run starts all of this controller's control loops
|
||||
func (controller *PersistentVolumeProvisionerController) Run() {
|
||||
glog.V(5).Infof("Starting PersistentVolumeProvisionerController\n")
|
||||
if controller.stopChannels == nil {
|
||||
controller.stopChannels = make(map[string]chan struct{})
|
||||
}
|
||||
|
||||
if _, exists := controller.stopChannels[volumesStopChannel]; !exists {
|
||||
controller.stopChannels[volumesStopChannel] = make(chan struct{})
|
||||
go controller.volumeController.Run(controller.stopChannels[volumesStopChannel])
|
||||
}
|
||||
|
||||
if _, exists := controller.stopChannels[claimsStopChannel]; !exists {
|
||||
controller.stopChannels[claimsStopChannel] = make(chan struct{})
|
||||
go controller.claimController.Run(controller.stopChannels[claimsStopChannel])
|
||||
}
|
||||
}
|
||||
|
||||
// Stop gracefully shuts down this controller
|
||||
func (controller *PersistentVolumeProvisionerController) Stop() {
|
||||
glog.V(5).Infof("Stopping PersistentVolumeProvisionerController\n")
|
||||
for name, stopChan := range controller.stopChannels {
|
||||
close(stopChan)
|
||||
delete(controller.stopChannels, name)
|
||||
}
|
||||
}
|
||||
|
||||
func newProvisioner(plugin volume.ProvisionableVolumePlugin, claim *api.PersistentVolumeClaim) (volume.Provisioner, error) {
|
||||
volumeOptions := volume.VolumeOptions{
|
||||
Capacity: claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)],
|
||||
AccessModes: claim.Spec.AccessModes,
|
||||
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
|
||||
}
|
||||
|
||||
provisioner, err := plugin.NewProvisioner(volumeOptions)
|
||||
return provisioner, err
|
||||
}
|
||||
|
||||
// controllerClient abstracts access to PVs and PVCs. Easy to mock for testing and wrap for real client.
|
||||
type controllerClient interface {
|
||||
CreatePersistentVolume(pv *api.PersistentVolume) (*api.PersistentVolume, error)
|
||||
ListPersistentVolumes(options unversioned.ListOptions) (*api.PersistentVolumeList, error)
|
||||
WatchPersistentVolumes(options unversioned.ListOptions) (watch.Interface, error)
|
||||
GetPersistentVolume(name string) (*api.PersistentVolume, error)
|
||||
UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error)
|
||||
DeletePersistentVolume(volume *api.PersistentVolume) error
|
||||
UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error)
|
||||
|
||||
GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error)
|
||||
ListPersistentVolumeClaims(namespace string, options unversioned.ListOptions) (*api.PersistentVolumeClaimList, error)
|
||||
WatchPersistentVolumeClaims(namespace string, options unversioned.ListOptions) (watch.Interface, error)
|
||||
UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error)
|
||||
UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error)
|
||||
|
||||
// provided to give VolumeHost and plugins access to the kube client
|
||||
GetKubeClient() client.Interface
|
||||
}
|
||||
|
||||
func NewControllerClient(c client.Interface) controllerClient {
|
||||
return &realControllerClient{c}
|
||||
}
|
||||
|
||||
var _ controllerClient = &realControllerClient{}
|
||||
|
||||
type realControllerClient struct {
|
||||
client client.Interface
|
||||
}
|
||||
|
||||
func (c *realControllerClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) {
|
||||
return c.client.PersistentVolumes().Get(name)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) ListPersistentVolumes(options unversioned.ListOptions) (*api.PersistentVolumeList, error) {
|
||||
return c.client.PersistentVolumes().List(options)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) WatchPersistentVolumes(options unversioned.ListOptions) (watch.Interface, error) {
|
||||
return c.client.PersistentVolumes().Watch(options)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) CreatePersistentVolume(pv *api.PersistentVolume) (*api.PersistentVolume, error) {
|
||||
return c.client.PersistentVolumes().Create(pv)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) UpdatePersistentVolume(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
|
||||
return c.client.PersistentVolumes().Update(volume)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) DeletePersistentVolume(volume *api.PersistentVolume) error {
|
||||
return c.client.PersistentVolumes().Delete(volume.Name)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
|
||||
return c.client.PersistentVolumes().UpdateStatus(volume)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error) {
|
||||
return c.client.PersistentVolumeClaims(namespace).Get(name)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) ListPersistentVolumeClaims(namespace string, options unversioned.ListOptions) (*api.PersistentVolumeClaimList, error) {
|
||||
return c.client.PersistentVolumeClaims(namespace).List(options)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) WatchPersistentVolumeClaims(namespace string, options unversioned.ListOptions) (watch.Interface, error) {
|
||||
return c.client.PersistentVolumeClaims(namespace).Watch(options)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
|
||||
return c.client.PersistentVolumeClaims(claim.Namespace).Update(claim)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
|
||||
return c.client.PersistentVolumeClaims(claim.Namespace).UpdateStatus(claim)
|
||||
}
|
||||
|
||||
func (c *realControllerClient) GetKubeClient() client.Interface {
|
||||
return c.client
|
||||
}
|
||||
|
||||
func keyExists(key string, haystack map[string]string) bool {
|
||||
_, exists := haystack[key]
|
||||
return exists
|
||||
}
|
||||
|
||||
func isProvisioningComplete(pv *api.PersistentVolume) bool {
|
||||
return isAnnotationMatch(pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue, pv.Annotations)
|
||||
}
|
||||
|
||||
func isAnnotationMatch(key, needle string, haystack map[string]string) bool {
|
||||
value, exists := haystack[key]
|
||||
if !exists {
|
||||
return false
|
||||
}
|
||||
return value == needle
|
||||
}
|
||||
|
||||
func isRecyclable(policy api.PersistentVolumeReclaimPolicy) bool {
|
||||
return policy == api.PersistentVolumeReclaimDelete || policy == api.PersistentVolumeReclaimRecycle
|
||||
}
|
||||
|
||||
// VolumeHost implementation
|
||||
// PersistentVolumeRecycler is host to the volume plugins, but does not actually mount any volumes.
|
||||
// Because no mounting is performed, most of the VolumeHost methods are not implemented.
|
||||
func (c *PersistentVolumeProvisionerController) GetPluginDir(podUID string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *PersistentVolumeProvisionerController) GetPodVolumeDir(podUID types.UID, pluginName, volumeName string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *PersistentVolumeProvisionerController) GetPodPluginDir(podUID types.UID, pluginName string) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *PersistentVolumeProvisionerController) GetKubeClient() client.Interface {
|
||||
return c.client.GetKubeClient()
|
||||
}
|
||||
|
||||
func (c *PersistentVolumeProvisionerController) NewWrapperBuilder(spec *volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Builder, error) {
|
||||
return nil, fmt.Errorf("NewWrapperBuilder not supported by PVClaimBinder's VolumeHost implementation")
|
||||
}
|
||||
|
||||
func (c *PersistentVolumeProvisionerController) NewWrapperCleaner(spec *volume.Spec, podUID types.UID) (volume.Cleaner, error) {
|
||||
return nil, fmt.Errorf("NewWrapperCleaner not supported by PVClaimBinder's VolumeHost implementation")
|
||||
}
|
||||
|
||||
func (c *PersistentVolumeProvisionerController) GetCloudProvider() cloudprovider.Interface {
|
||||
return c.cloud
|
||||
}
|
||||
|
||||
func (c *PersistentVolumeProvisionerController) GetMounter() mount.Interface {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PersistentVolumeProvisionerController) GetWriter() io.Writer {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PersistentVolumeProvisionerController) GetHostName() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
const (
|
||||
// these pair of constants are used by the provisioner.
|
||||
// The key is a kube namespaced key that denotes a volume requires provisioning.
|
||||
// The value is set only when provisioning is completed. Any other value will tell the provisioner
|
||||
// that provisioning has not yet occurred.
|
||||
pvProvisioningRequiredAnnotationKey = "volume.experimental.kubernetes.io/provisioning-required"
|
||||
pvProvisioningCompletedAnnotationValue = "volume.experimental.kubernetes.io/provisioning-completed"
|
||||
)
|
@@ -0,0 +1,240 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package persistentvolume
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
fake_cloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
"k8s.io/kubernetes/pkg/util"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
|
||||
func TestProvisionerRunStop(t *testing.T) {
|
||||
controller, _ := makeTestController()
|
||||
|
||||
if len(controller.stopChannels) != 0 {
|
||||
t.Errorf("Non-running provisioner should not have any stopChannels. Got %v", len(controller.stopChannels))
|
||||
}
|
||||
|
||||
controller.Run()
|
||||
|
||||
if len(controller.stopChannels) != 2 {
|
||||
t.Errorf("Running provisioner should have exactly 2 stopChannels. Got %v", len(controller.stopChannels))
|
||||
}
|
||||
|
||||
controller.Stop()
|
||||
|
||||
if len(controller.stopChannels) != 0 {
|
||||
t.Errorf("Non-running provisioner should not have any stopChannels. Got %v", len(controller.stopChannels))
|
||||
}
|
||||
}
|
||||
|
||||
func makeTestVolume() *api.PersistentVolume {
|
||||
return &api.PersistentVolume{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Annotations: map[string]string{},
|
||||
Name: "pv01",
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimDelete,
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("10Gi"),
|
||||
},
|
||||
PersistentVolumeSource: api.PersistentVolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{
|
||||
Path: "/tmp/data01",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeTestClaim() *api.PersistentVolumeClaim {
|
||||
return &api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Annotations: map[string]string{},
|
||||
Name: "claim01",
|
||||
Namespace: "ns",
|
||||
SelfLink: testapi.Default.SelfLink("pvc", ""),
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
AccessModes: []api.PersistentVolumeAccessMode{api.ReadWriteOnce},
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): resource.MustParse("8G"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeTestController() (*PersistentVolumeProvisionerController, *mockControllerClient) {
|
||||
mockClient := &mockControllerClient{}
|
||||
mockVolumePlugin := &volume.FakeVolumePlugin{}
|
||||
controller, _ := NewPersistentVolumeProvisionerController(mockClient, 1*time.Second, nil, mockVolumePlugin, &fake_cloud.FakeCloud{})
|
||||
return controller, mockClient
|
||||
}
|
||||
|
||||
func TestReconcileClaim(t *testing.T) {
|
||||
controller, mockClient := makeTestController()
|
||||
pvc := makeTestClaim()
|
||||
|
||||
// watch would have added the claim to the store
|
||||
controller.claimStore.Add(pvc)
|
||||
err := controller.reconcileClaim(pvc)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// non-provisionable PVC should not have created a volume on reconciliation
|
||||
if mockClient.volume != nil {
|
||||
t.Error("Unexpected volume found in mock client. Expected nil")
|
||||
}
|
||||
|
||||
pvc.Annotations[qosProvisioningKey] = "foo"
|
||||
|
||||
err = controller.reconcileClaim(pvc)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// PVC requesting provisioning should have a PV created for it
|
||||
if mockClient.volume == nil {
|
||||
t.Error("Expected to find bound volume but got nil")
|
||||
}
|
||||
|
||||
if mockClient.volume.Spec.ClaimRef.Name != pvc.Name {
|
||||
t.Errorf("Expected PV to be bound to %s but got %s", mockClient.volume.Spec.ClaimRef.Name, pvc.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReconcileVolume(t *testing.T) {
|
||||
|
||||
controller, mockClient := makeTestController()
|
||||
pv := makeTestVolume()
|
||||
pvc := makeTestClaim()
|
||||
|
||||
err := controller.reconcileVolume(pv)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
}
|
||||
|
||||
// watch adds claim to the store.
|
||||
// we need to add it to our mock client to mimic normal Get call
|
||||
controller.claimStore.Add(pvc)
|
||||
mockClient.claim = pvc
|
||||
|
||||
// pretend the claim and volume are bound, no provisioning required
|
||||
claimRef, _ := api.GetReference(pvc)
|
||||
pv.Spec.ClaimRef = claimRef
|
||||
err = controller.reconcileVolume(pv)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
}
|
||||
|
||||
pv.Annotations[pvProvisioningRequiredAnnotationKey] = "!pvProvisioningCompleted"
|
||||
pv.Annotations[qosProvisioningKey] = "foo"
|
||||
err = controller.reconcileVolume(pv)
|
||||
|
||||
if !isAnnotationMatch(pvProvisioningRequiredAnnotationKey, pvProvisioningCompletedAnnotationValue, mockClient.volume.Annotations) {
|
||||
t.Errorf("Expected %s but got %s", pvProvisioningRequiredAnnotationKey, mockClient.volume.Annotations[pvProvisioningRequiredAnnotationKey])
|
||||
}
|
||||
}
|
||||
|
||||
var _ controllerClient = &mockControllerClient{}
|
||||
|
||||
type mockControllerClient struct {
|
||||
volume *api.PersistentVolume
|
||||
claim *api.PersistentVolumeClaim
|
||||
}
|
||||
|
||||
func (c *mockControllerClient) GetPersistentVolume(name string) (*api.PersistentVolume, error) {
|
||||
return c.volume, nil
|
||||
}
|
||||
|
||||
func (c *mockControllerClient) CreatePersistentVolume(pv *api.PersistentVolume) (*api.PersistentVolume, error) {
|
||||
if pv.GenerateName != "" && pv.Name == "" {
|
||||
pv.Name = fmt.Sprintf(pv.GenerateName, util.NewUUID())
|
||||
}
|
||||
c.volume = pv
|
||||
return c.volume, nil
|
||||
}
|
||||
|
||||
func (c *mockControllerClient) ListPersistentVolumes(options unversioned.ListOptions) (*api.PersistentVolumeList, error) {
|
||||
return &api.PersistentVolumeList{
|
||||
Items: []api.PersistentVolume{*c.volume},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *mockControllerClient) WatchPersistentVolumes(options unversioned.ListOptions) (watch.Interface, error) {
|
||||
return watch.NewFake(), nil
|
||||
}
|
||||
|
||||
func (c *mockControllerClient) UpdatePersistentVolume(pv *api.PersistentVolume) (*api.PersistentVolume, error) {
|
||||
return c.CreatePersistentVolume(pv)
|
||||
}
|
||||
|
||||
func (c *mockControllerClient) DeletePersistentVolume(volume *api.PersistentVolume) error {
|
||||
c.volume = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *mockControllerClient) UpdatePersistentVolumeStatus(volume *api.PersistentVolume) (*api.PersistentVolume, error) {
|
||||
return volume, nil
|
||||
}
|
||||
|
||||
func (c *mockControllerClient) GetPersistentVolumeClaim(namespace, name string) (*api.PersistentVolumeClaim, error) {
|
||||
if c.claim != nil {
|
||||
return c.claim, nil
|
||||
} else {
|
||||
return nil, errors.NewNotFound("persistentVolume", name)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *mockControllerClient) ListPersistentVolumeClaims(namespace string, options unversioned.ListOptions) (*api.PersistentVolumeClaimList, error) {
|
||||
return &api.PersistentVolumeClaimList{
|
||||
Items: []api.PersistentVolumeClaim{*c.claim},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *mockControllerClient) WatchPersistentVolumeClaims(namespace string, options unversioned.ListOptions) (watch.Interface, error) {
|
||||
return watch.NewFake(), nil
|
||||
}
|
||||
|
||||
func (c *mockControllerClient) UpdatePersistentVolumeClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
|
||||
c.claim = claim
|
||||
return c.claim, nil
|
||||
}
|
||||
|
||||
func (c *mockControllerClient) UpdatePersistentVolumeClaimStatus(claim *api.PersistentVolumeClaim) (*api.PersistentVolumeClaim, error) {
|
||||
return claim, nil
|
||||
}
|
||||
|
||||
func (c *mockControllerClient) GetKubeClient() client.Interface {
|
||||
return nil
|
||||
}
|
@@ -46,14 +46,16 @@ type PersistentVolumeRecycler struct {
|
||||
client recyclerClient
|
||||
kubeClient client.Interface
|
||||
pluginMgr volume.VolumePluginMgr
|
||||
cloud cloudprovider.Interface
|
||||
}
|
||||
|
||||
// PersistentVolumeRecycler creates a new PersistentVolumeRecycler
|
||||
func NewPersistentVolumeRecycler(kubeClient client.Interface, syncPeriod time.Duration, plugins []volume.VolumePlugin) (*PersistentVolumeRecycler, error) {
|
||||
func NewPersistentVolumeRecycler(kubeClient client.Interface, syncPeriod time.Duration, plugins []volume.VolumePlugin, cloud cloudprovider.Interface) (*PersistentVolumeRecycler, error) {
|
||||
recyclerClient := NewRecyclerClient(kubeClient)
|
||||
recycler := &PersistentVolumeRecycler{
|
||||
client: recyclerClient,
|
||||
kubeClient: kubeClient,
|
||||
cloud: cloud,
|
||||
}
|
||||
|
||||
if err := recycler.pluginMgr.InitPlugins(plugins, recycler); err != nil {
|
||||
@@ -283,7 +285,7 @@ func (f *PersistentVolumeRecycler) NewWrapperCleaner(spec *volume.Spec, podUID t
|
||||
}
|
||||
|
||||
func (f *PersistentVolumeRecycler) GetCloudProvider() cloudprovider.Interface {
|
||||
return nil
|
||||
return f.cloud
|
||||
}
|
||||
|
||||
func (f *PersistentVolumeRecycler) GetMounter() mount.Interface {
|
||||
|
@@ -21,15 +21,15 @@ import (
|
||||
"sort"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
)
|
||||
|
||||
const (
|
||||
// A PV created specifically for one claim must contain this annotation in order to bind to the claim.
|
||||
// The value must be the namespace and name of the claim being bound to (i.e, claim.Namespace/claim.Name)
|
||||
// This is an experimental feature and likely to change in the future.
|
||||
createdForKey = "volume.extensions.kubernetes.io/provisioned-for"
|
||||
// A PVClaim can request a quality of service tier by adding this annotation. The value of the annotation
|
||||
// is arbitrary. The values are pre-defined by a cluster admin and known to users when requesting a QoS.
|
||||
// For example tiers might be gold, silver, and tin and the admin configures what that means for each volume plugin that can provision a volume.
|
||||
// Values in the alpha version of this feature are not meaningful, but will be in the full version of this feature.
|
||||
qosProvisioningKey = "volume.alpha.kubernetes.io/storage-class"
|
||||
)
|
||||
|
||||
// persistentVolumeOrderedIndex is a cache.Store that keeps persistent volumes indexed by AccessModes and ordered by storage capacity.
|
||||
@@ -80,10 +80,7 @@ func (pvIndex *persistentVolumeOrderedIndex) ListByAccessModes(modes []api.Persi
|
||||
type matchPredicate func(compareThis, toThis *api.PersistentVolume) bool
|
||||
|
||||
// find returns the nearest PV from the ordered list or nil if a match is not found
|
||||
func (pvIndex *persistentVolumeOrderedIndex) find(searchPV *api.PersistentVolume, matchPredicate matchPredicate) (*api.PersistentVolume, error) {
|
||||
// the 'searchPV' argument is a synthetic PV with capacity and accessmodes set according to the user's PersistentVolumeClaim.
|
||||
// the synthetic pv arg is, therefore, a request for a storage resource.
|
||||
//
|
||||
func (pvIndex *persistentVolumeOrderedIndex) findByClaim(claim *api.PersistentVolumeClaim, matchPredicate matchPredicate) (*api.PersistentVolume, error) {
|
||||
// PVs are indexed by their access modes to allow easier searching. Each index is the string representation of a set of access modes.
|
||||
// There is a finite number of possible sets and PVs will only be indexed in one of them (whichever index matches the PV's modes).
|
||||
//
|
||||
@@ -92,17 +89,7 @@ func (pvIndex *persistentVolumeOrderedIndex) find(searchPV *api.PersistentVolume
|
||||
//
|
||||
// Searches are performed against a set of access modes, so we can attempt not only the exact matching modes but also
|
||||
// potential matches (the GCEPD example above).
|
||||
allPossibleModes := pvIndex.allPossibleMatchingAccessModes(searchPV.Spec.AccessModes)
|
||||
|
||||
// the searchPV should contain an annotation that allows pre-binding to a claim.
|
||||
// we can use the same annotation value (pvc's namespace/name) and check against
|
||||
// existing volumes to find an exact match. It is possible that a bind is made (ClaimRef persisted to PV)
|
||||
// but the fail to update claim.Spec.VolumeName fails. This check allows the claim to find the volume
|
||||
// that's already bound to the claim.
|
||||
preboundClaim := ""
|
||||
if createdFor, ok := searchPV.Annotations[createdForKey]; ok {
|
||||
preboundClaim = createdFor
|
||||
}
|
||||
allPossibleModes := pvIndex.allPossibleMatchingAccessModes(claim.Spec.AccessModes)
|
||||
|
||||
for _, modes := range allPossibleModes {
|
||||
volumes, err := pvIndex.ListByAccessModes(modes)
|
||||
@@ -115,19 +102,34 @@ func (pvIndex *persistentVolumeOrderedIndex) find(searchPV *api.PersistentVolume
|
||||
// return the exact pre-binding match, if found
|
||||
unboundVolumes := []*api.PersistentVolume{}
|
||||
for _, volume := range volumes {
|
||||
// volume isn't currently bound or pre-bound.
|
||||
if volume.Spec.ClaimRef == nil {
|
||||
// volume isn't currently bound or pre-bound.
|
||||
unboundVolumes = append(unboundVolumes, volume)
|
||||
continue
|
||||
}
|
||||
|
||||
boundClaim := fmt.Sprintf("%s/%s", volume.Spec.ClaimRef.Namespace, volume.Spec.ClaimRef.Name)
|
||||
if boundClaim == preboundClaim {
|
||||
if claim.Name == volume.Spec.ClaimRef.Name && claim.Namespace == volume.Spec.ClaimRef.Namespace {
|
||||
// exact match! No search required.
|
||||
return volume, nil
|
||||
}
|
||||
}
|
||||
|
||||
// a claim requesting provisioning will have an exact match pre-bound to the claim.
|
||||
// no need to search through unbound volumes. The matching volume will be created by the provisioner
|
||||
// and will match above when the claim is re-processed by the binder.
|
||||
if keyExists(qosProvisioningKey, claim.Annotations) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
searchPV := &api.PersistentVolume{
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
AccessModes: claim.Spec.AccessModes,
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
i := sort.Search(len(unboundVolumes), func(i int) bool { return matchPredicate(searchPV, unboundVolumes[i]) })
|
||||
if i < len(unboundVolumes) {
|
||||
return unboundVolumes[i], nil
|
||||
@@ -136,27 +138,9 @@ func (pvIndex *persistentVolumeOrderedIndex) find(searchPV *api.PersistentVolume
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// findByAccessModesAndStorageCapacity is a convenience method that calls Find w/ requisite matchPredicate for storage
|
||||
func (pvIndex *persistentVolumeOrderedIndex) findByAccessModesAndStorageCapacity(prebindKey string, modes []api.PersistentVolumeAccessMode, qty resource.Quantity) (*api.PersistentVolume, error) {
|
||||
pv := &api.PersistentVolume{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
createdForKey: prebindKey,
|
||||
},
|
||||
},
|
||||
Spec: api.PersistentVolumeSpec{
|
||||
AccessModes: modes,
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceStorage): qty,
|
||||
},
|
||||
},
|
||||
}
|
||||
return pvIndex.find(pv, matchStorageCapacity)
|
||||
}
|
||||
|
||||
// findBestMatchForClaim is a convenience method that finds a volume by the claim's AccessModes and requests for Storage
|
||||
func (pvIndex *persistentVolumeOrderedIndex) findBestMatchForClaim(claim *api.PersistentVolumeClaim) (*api.PersistentVolume, error) {
|
||||
return pvIndex.findByAccessModesAndStorageCapacity(fmt.Sprintf("%s/%s", claim.Namespace, claim.Name), claim.Spec.AccessModes, claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)])
|
||||
return pvIndex.findByClaim(claim, matchStorageCapacity)
|
||||
}
|
||||
|
||||
// byCapacity is used to order volumes by ascending storage size
|
||||
@@ -268,3 +252,7 @@ func (c byAccessModes) Swap(i, j int) {
|
||||
func (c byAccessModes) Len() int {
|
||||
return len(c.modes)
|
||||
}
|
||||
|
||||
func claimToClaimKey(claim *api.PersistentVolumeClaim) string {
|
||||
return fmt.Sprintf("%s/%s", claim.Namespace, claim.Name)
|
||||
}
|
||||
|
Reference in New Issue
Block a user