Implement controller and kubelet changes for recovery from resize
failures
This commit is contained in:
@@ -28,7 +28,9 @@ import (
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volumetypes "k8s.io/kubernetes/pkg/volume/util/types"
|
||||
"k8s.io/mount-utils"
|
||||
@@ -61,7 +63,7 @@ func ClaimToClaimKey(claim *v1.PersistentVolumeClaim) string {
|
||||
func UpdatePVSize(
|
||||
pv *v1.PersistentVolume,
|
||||
newSize resource.Quantity,
|
||||
kubeClient clientset.Interface) error {
|
||||
kubeClient clientset.Interface) (*v1.PersistentVolume, error) {
|
||||
pvClone := pv.DeepCopy()
|
||||
pvClone.Spec.Capacity[v1.ResourceStorage] = newSize
|
||||
|
||||
@@ -84,7 +86,8 @@ func AddAnnPreResizeCapacity(
|
||||
}
|
||||
pvClone.ObjectMeta.Annotations[AnnPreResizeCapacity] = oldCapacity.String()
|
||||
|
||||
return PatchPV(pv, pvClone, kubeClient)
|
||||
_, err := PatchPV(pv, pvClone, kubeClient)
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteAnnPreResizeCapacity deletes volume.alpha.kubernetes.io/pre-resize-capacity from the pv
|
||||
@@ -97,35 +100,35 @@ func DeleteAnnPreResizeCapacity(
|
||||
}
|
||||
pvClone := pv.DeepCopy()
|
||||
delete(pvClone.ObjectMeta.Annotations, AnnPreResizeCapacity)
|
||||
|
||||
return PatchPV(pv, pvClone, kubeClient)
|
||||
_, err := PatchPV(pv, pvClone, kubeClient)
|
||||
return err
|
||||
}
|
||||
|
||||
// PatchPV creates and executes a patch for pv
|
||||
func PatchPV(
|
||||
oldPV *v1.PersistentVolume,
|
||||
newPV *v1.PersistentVolume,
|
||||
kubeClient clientset.Interface) error {
|
||||
kubeClient clientset.Interface) (*v1.PersistentVolume, error) {
|
||||
oldData, err := json.Marshal(oldPV)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected error marshaling old PV %q with error : %v", oldPV.Name, err)
|
||||
return oldPV, fmt.Errorf("unexpected error marshaling old PV %q with error : %v", oldPV.Name, err)
|
||||
}
|
||||
|
||||
newData, err := json.Marshal(newPV)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unexpected error marshaling new PV %q with error : %v", newPV.Name, err)
|
||||
return oldPV, fmt.Errorf("unexpected error marshaling new PV %q with error : %v", newPV.Name, err)
|
||||
}
|
||||
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(oldData, newData, oldPV)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error Creating two way merge patch for PV %q with error : %v", oldPV.Name, err)
|
||||
return oldPV, fmt.Errorf("error Creating two way merge patch for PV %q with error : %v", oldPV.Name, err)
|
||||
}
|
||||
|
||||
_, err = kubeClient.CoreV1().PersistentVolumes().Patch(context.TODO(), oldPV.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
|
||||
updatedPV, err := kubeClient.CoreV1().PersistentVolumes().Patch(context.TODO(), oldPV.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("error Patching PV %q with error : %v", oldPV.Name, err)
|
||||
return oldPV, fmt.Errorf("error Patching PV %q with error : %v", oldPV.Name, err)
|
||||
}
|
||||
return nil
|
||||
return updatedPV, nil
|
||||
}
|
||||
|
||||
// MarkResizeInProgressWithResizer marks cloudprovider resizing as in progress
|
||||
@@ -147,6 +150,23 @@ func MarkResizeInProgressWithResizer(
|
||||
return PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
|
||||
}
|
||||
|
||||
func MarkControllerReisizeInProgress(pvc *v1.PersistentVolumeClaim, resizerName string, newSize resource.Quantity, kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
// Mark PVC as Resize Started
|
||||
progressCondition := v1.PersistentVolumeClaimCondition{
|
||||
Type: v1.PersistentVolumeClaimResizing,
|
||||
Status: v1.ConditionTrue,
|
||||
LastTransitionTime: metav1.Now(),
|
||||
}
|
||||
controllerExpansionInProgress := v1.PersistentVolumeClaimControllerExpansionInProgress
|
||||
conditions := []v1.PersistentVolumeClaimCondition{progressCondition}
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC = MergeResizeConditionOnPVC(newPVC, conditions)
|
||||
newPVC.Status.ResizeStatus = &controllerExpansionInProgress
|
||||
newPVC.Status.AllocatedResources = v1.ResourceList{v1.ResourceStorage: newSize}
|
||||
newPVC = setResizer(newPVC, resizerName)
|
||||
return PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
|
||||
}
|
||||
|
||||
// SetClaimResizer sets resizer annotation on PVC
|
||||
func SetClaimResizer(
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
@@ -168,7 +188,7 @@ func setResizer(pvc *v1.PersistentVolumeClaim, resizerName string) *v1.Persisten
|
||||
// MarkForFSResize marks file system resizing as pending
|
||||
func MarkForFSResize(
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
kubeClient clientset.Interface) error {
|
||||
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
pvcCondition := v1.PersistentVolumeClaimCondition{
|
||||
Type: v1.PersistentVolumeClaimFileSystemResizePending,
|
||||
Status: v1.ConditionTrue,
|
||||
@@ -177,16 +197,20 @@ func MarkForFSResize(
|
||||
}
|
||||
conditions := []v1.PersistentVolumeClaimCondition{pvcCondition}
|
||||
newPVC := pvc.DeepCopy()
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure) {
|
||||
expansionPendingOnNode := v1.PersistentVolumeClaimNodeExpansionPending
|
||||
newPVC.Status.ResizeStatus = &expansionPendingOnNode
|
||||
}
|
||||
newPVC = MergeResizeConditionOnPVC(newPVC, conditions)
|
||||
_, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
|
||||
return err
|
||||
updatedPVC, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
|
||||
return updatedPVC, err
|
||||
}
|
||||
|
||||
// MarkResizeFinished marks all resizing as done
|
||||
func MarkResizeFinished(
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
newSize resource.Quantity,
|
||||
kubeClient clientset.Interface) error {
|
||||
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
return MarkFSResizeFinished(pvc, newSize, kubeClient)
|
||||
}
|
||||
|
||||
@@ -194,12 +218,65 @@ func MarkResizeFinished(
|
||||
func MarkFSResizeFinished(
|
||||
pvc *v1.PersistentVolumeClaim,
|
||||
newSize resource.Quantity,
|
||||
kubeClient clientset.Interface) error {
|
||||
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
newPVC := pvc.DeepCopy()
|
||||
|
||||
newPVC.Status.Capacity[v1.ResourceStorage] = newSize
|
||||
|
||||
// if RecoverVolumeExpansionFailure is enabled, we need to reset ResizeStatus back to nil
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.RecoverVolumeExpansionFailure) {
|
||||
expansionFinished := v1.PersistentVolumeClaimNoExpansionInProgress
|
||||
newPVC.Status.ResizeStatus = &expansionFinished
|
||||
}
|
||||
|
||||
newPVC = MergeResizeConditionOnPVC(newPVC, []v1.PersistentVolumeClaimCondition{})
|
||||
_, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
|
||||
return err
|
||||
updatedPVC, err := PatchPVCStatus(pvc /*oldPVC*/, newPVC, kubeClient)
|
||||
return updatedPVC, err
|
||||
}
|
||||
|
||||
func MarkControllerExpansionFailed(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
expansionFailedOnController := v1.PersistentVolumeClaimControllerExpansionFailed
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC.Status.ResizeStatus = &expansionFailedOnController
|
||||
patchBytes, err := createPVCPatch(pvc, newPVC, false /* addResourceVersionCheck */)
|
||||
if err != nil {
|
||||
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", pvc.Name, err)
|
||||
}
|
||||
|
||||
updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).
|
||||
Patch(context.TODO(), pvc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
|
||||
if updateErr != nil {
|
||||
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", pvc.Name, updateErr)
|
||||
}
|
||||
return updatedClaim, nil
|
||||
}
|
||||
|
||||
// MarkNodeExpansionFailed marks a PVC for node expansion as failed. Kubelet should not retry expansion
|
||||
// of volumes which are in failed state.
|
||||
func MarkNodeExpansionFailed(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
expansionFailedOnNode := v1.PersistentVolumeClaimNodeExpansionFailed
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC.Status.ResizeStatus = &expansionFailedOnNode
|
||||
patchBytes, err := createPVCPatch(pvc, newPVC, false /* addResourceVersionCheck */)
|
||||
if err != nil {
|
||||
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", pvc.Name, err)
|
||||
}
|
||||
|
||||
updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(pvc.Namespace).
|
||||
Patch(context.TODO(), pvc.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
|
||||
if updateErr != nil {
|
||||
return pvc, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", pvc.Name, updateErr)
|
||||
}
|
||||
return updatedClaim, nil
|
||||
}
|
||||
|
||||
// MarkNodeExpansionInProgress marks pvc expansion in progress on node
|
||||
func MarkNodeExpansionInProgress(pvc *v1.PersistentVolumeClaim, kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
nodeExpansionInProgress := v1.PersistentVolumeClaimNodeExpansionInProgress
|
||||
newPVC := pvc.DeepCopy()
|
||||
newPVC.Status.ResizeStatus = &nodeExpansionInProgress
|
||||
updatedPVC, err := PatchPVCStatus(pvc /* oldPVC */, newPVC, kubeClient)
|
||||
return updatedPVC, err
|
||||
}
|
||||
|
||||
// PatchPVCStatus updates PVC status using PATCH verb
|
||||
@@ -210,22 +287,22 @@ func PatchPVCStatus(
|
||||
oldPVC *v1.PersistentVolumeClaim,
|
||||
newPVC *v1.PersistentVolumeClaim,
|
||||
kubeClient clientset.Interface) (*v1.PersistentVolumeClaim, error) {
|
||||
patchBytes, err := createPVCPatch(oldPVC, newPVC)
|
||||
patchBytes, err := createPVCPatch(oldPVC, newPVC, true /* addResourceVersionCheck */)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", oldPVC.Name, err)
|
||||
return oldPVC, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", oldPVC.Name, err)
|
||||
}
|
||||
|
||||
updatedClaim, updateErr := kubeClient.CoreV1().PersistentVolumeClaims(oldPVC.Namespace).
|
||||
Patch(context.TODO(), oldPVC.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{}, "status")
|
||||
if updateErr != nil {
|
||||
return nil, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", oldPVC.Name, updateErr)
|
||||
return oldPVC, fmt.Errorf("patchPVCStatus failed to patch PVC %q: %v", oldPVC.Name, updateErr)
|
||||
}
|
||||
return updatedClaim, nil
|
||||
}
|
||||
|
||||
func createPVCPatch(
|
||||
oldPVC *v1.PersistentVolumeClaim,
|
||||
newPVC *v1.PersistentVolumeClaim) ([]byte, error) {
|
||||
newPVC *v1.PersistentVolumeClaim, addResourceVersionCheck bool) ([]byte, error) {
|
||||
oldData, err := json.Marshal(oldPVC)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal old data: %v", err)
|
||||
@@ -241,9 +318,11 @@ func createPVCPatch(
|
||||
return nil, fmt.Errorf("failed to create 2 way merge patch: %v", err)
|
||||
}
|
||||
|
||||
patchBytes, err = addResourceVersion(patchBytes, oldPVC.ResourceVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to add resource version: %v", err)
|
||||
if addResourceVersionCheck {
|
||||
patchBytes, err = addResourceVersion(patchBytes, oldPVC.ResourceVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to add resource version: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return patchBytes, nil
|
||||
|
Reference in New Issue
Block a user