combine feature gate VolumeScheduling and DynamicProvisioningScheduling into one
This commit is contained in:
@@ -140,7 +140,7 @@ const annStorageProvisioner = "volume.beta.kubernetes.io/storage-provisioner"
|
||||
|
||||
// This annotation is added to a PVC that has been triggered by scheduler to
|
||||
// be dynamically provisioned. Its value is the name of the selected node.
|
||||
const annSelectedNode = "volume.alpha.kubernetes.io/selected-node"
|
||||
const annSelectedNode = "volume.kubernetes.io/selected-node"
|
||||
|
||||
// If the provisioner name in a storage class is set to "kubernetes.io/no-provisioner",
|
||||
// then dynamic provisioning is not supported by the storage.
|
||||
@@ -290,14 +290,12 @@ func (ctrl *PersistentVolumeController) shouldDelayBinding(claim *v1.PersistentV
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
|
||||
// When feature DynamicProvisioningScheduling enabled,
|
||||
// Scheduler signal to the PV controller to start dynamic
|
||||
// provisioning by setting the "annSelectedNode" annotation
|
||||
// in the PVC
|
||||
if _, ok := claim.Annotations[annSelectedNode]; ok {
|
||||
return false, nil
|
||||
}
|
||||
// When feature VolumeScheduling enabled,
|
||||
// Scheduler signal to the PV controller to start dynamic
|
||||
// provisioning by setting the "annSelectedNode" annotation
|
||||
// in the PVC
|
||||
if _, ok := claim.Annotations[annSelectedNode]; ok {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
className := v1helper.GetPersistentVolumeClaimClass(claim)
|
||||
@@ -1477,25 +1475,22 @@ func (ctrl *PersistentVolumeController) provisionClaimOperation(claim *v1.Persis
|
||||
}
|
||||
|
||||
var selectedNode *v1.Node = nil
|
||||
var allowedTopologies []v1.TopologySelectorTerm = nil
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
|
||||
if nodeName, ok := claim.Annotations[annSelectedNode]; ok {
|
||||
selectedNode, err = ctrl.NodeLister.Get(nodeName)
|
||||
if err != nil {
|
||||
strerr := fmt.Sprintf("Failed to get target node: %v", err)
|
||||
glog.V(3).Infof("unexpected error getting target node %q for claim %q: %v", nodeName, claimToClaimKey(claim), err)
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
|
||||
return pluginName, err
|
||||
}
|
||||
if nodeName, ok := claim.Annotations[annSelectedNode]; ok {
|
||||
selectedNode, err = ctrl.NodeLister.Get(nodeName)
|
||||
if err != nil {
|
||||
strerr := fmt.Sprintf("Failed to get target node: %v", err)
|
||||
glog.V(3).Infof("unexpected error getting target node %q for claim %q: %v", nodeName, claimToClaimKey(claim), err)
|
||||
ctrl.eventRecorder.Event(claim, v1.EventTypeWarning, events.ProvisioningFailed, strerr)
|
||||
return pluginName, err
|
||||
}
|
||||
allowedTopologies = storageClass.AllowedTopologies
|
||||
}
|
||||
allowedTopologies := storageClass.AllowedTopologies
|
||||
|
||||
opComplete := util.OperationCompleteHook(plugin.GetPluginName(), "volume_provision")
|
||||
volume, err = provisioner.Provision(selectedNode, allowedTopologies)
|
||||
opComplete(&err)
|
||||
if err != nil {
|
||||
// Other places of failure have nothing to do with DynamicProvisioningScheduling,
|
||||
// Other places of failure have nothing to do with VolumeScheduling,
|
||||
// so just let controller retry in the next sync. We'll only call func
|
||||
// rescheduleProvisioning here when the underlying provisioning actually failed.
|
||||
ctrl.rescheduleProvisioning(claim)
|
||||
|
@@ -456,7 +456,7 @@ func TestAssumeUpdatePVCCache(t *testing.T) {
|
||||
|
||||
// Assume PVC
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC.Annotations["volume.alpha.kubernetes.io/selected-node"] = "test-node"
|
||||
newPVC.Annotations[annSelectedNode] = "test-node"
|
||||
if err := cache.Assume(newPVC); err != nil {
|
||||
t.Fatalf("failed to assume PVC: %v", err)
|
||||
}
|
||||
|
@@ -25,12 +25,10 @@ import (
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
storageinformers "k8s.io/client-go/informers/storage/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
)
|
||||
|
||||
@@ -169,13 +167,11 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, node *v1.Node) (unboundVolume
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.DynamicProvisioningScheduling) {
|
||||
// Try to provision for unbound volumes
|
||||
if !unboundVolumesSatisfied {
|
||||
unboundVolumesSatisfied, err = b.checkVolumeProvisions(pod, claimsToProvision, node)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
// Try to provision for unbound volumes
|
||||
if !unboundVolumesSatisfied {
|
||||
unboundVolumesSatisfied, err = b.checkVolumeProvisions(pod, claimsToProvision, node)
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user