Respect controllers on PVCs for retention policy
This commit is contained in:
		@@ -220,7 +220,7 @@ func (spc *StatefulPodControl) ClaimsMatchRetentionPolicy(ctx context.Context, s
 | 
			
		||||
		case err != nil:
 | 
			
		||||
			return false, fmt.Errorf("Could not retrieve claim %s for %s when checking PVC deletion policy", claimName, pod.Name)
 | 
			
		||||
		default:
 | 
			
		||||
			if !claimOwnerMatchesSetAndPod(logger, claim, set, pod) {
 | 
			
		||||
			if !isClaimOwnerUpToDate(logger, claim, set, pod) {
 | 
			
		||||
				return false, nil
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
@@ -242,14 +242,16 @@ func (spc *StatefulPodControl) UpdatePodClaimForRetentionPolicy(ctx context.Cont
 | 
			
		||||
		case err != nil:
 | 
			
		||||
			return fmt.Errorf("Could not retrieve claim %s not found for %s when checking PVC deletion policy: %w", claimName, pod.Name, err)
 | 
			
		||||
		default:
 | 
			
		||||
			if !claimOwnerMatchesSetAndPod(logger, claim, set, pod) {
 | 
			
		||||
			if hasUnexpectedController(claim, set, pod) {
 | 
			
		||||
				// Add an event so the user knows they're in a strange configuration. The claim will be cleaned up below.
 | 
			
		||||
				msg := fmt.Sprintf("PersistentVolumeClaim %s has a conflicting OwnerReference that acts as a manging controller, the retention policy is ignored for this claim", claimName)
 | 
			
		||||
				spc.recorder.Event(set, v1.EventTypeWarning, "ConflictingController", msg)
 | 
			
		||||
			}
 | 
			
		||||
			if !isClaimOwnerUpToDate(logger, claim, set, pod) {
 | 
			
		||||
				claim = claim.DeepCopy() // Make a copy so we don't mutate the shared cache.
 | 
			
		||||
				needsUpdate := updateClaimOwnerRefForSetAndPod(logger, claim, set, pod)
 | 
			
		||||
				if needsUpdate {
 | 
			
		||||
					err := spc.objectMgr.UpdateClaim(claim)
 | 
			
		||||
					if err != nil {
 | 
			
		||||
						return fmt.Errorf("Could not update claim %s for delete policy ownerRefs: %w", claimName, err)
 | 
			
		||||
					}
 | 
			
		||||
				updateClaimOwnerRefForSetAndPod(logger, claim, set, pod)
 | 
			
		||||
				if err := spc.objectMgr.UpdateClaim(claim); err != nil {
 | 
			
		||||
					return fmt.Errorf("could not update claim %s for delete policy ownerRefs: %w", claimName, err)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
@@ -275,8 +277,7 @@ func (spc *StatefulPodControl) PodClaimIsStale(set *apps.StatefulSet, pod *v1.Po
 | 
			
		||||
		case err != nil:
 | 
			
		||||
			return false, err
 | 
			
		||||
		case err == nil:
 | 
			
		||||
			// A claim is stale if it doesn't match the pod's UID, including if the pod has no UID.
 | 
			
		||||
			if hasStaleOwnerRef(pvc, pod) {
 | 
			
		||||
			if hasStaleOwnerRef(pvc, pod, podKind) {
 | 
			
		||||
				return true, nil
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
 
 | 
			
		||||
@@ -41,6 +41,7 @@ import (
 | 
			
		||||
	_ "k8s.io/kubernetes/pkg/apis/apps/install"
 | 
			
		||||
	_ "k8s.io/kubernetes/pkg/apis/core/install"
 | 
			
		||||
	"k8s.io/kubernetes/pkg/features"
 | 
			
		||||
	"k8s.io/utils/ptr"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
func TestStatefulPodControlCreatesPods(t *testing.T) {
 | 
			
		||||
@@ -502,7 +503,7 @@ func TestStatefulPodControlDeleteFailure(t *testing.T) {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
func TestStatefulPodControlClaimsMatchDeletionPolcy(t *testing.T) {
 | 
			
		||||
	// The claimOwnerMatchesSetAndPod is tested exhaustively in stateful_set_utils_test; this
 | 
			
		||||
	// The isClaimOwnerUpToDate is tested exhaustively in stateful_set_utils_test; this
 | 
			
		||||
	// test is for the wiring to the method tested there.
 | 
			
		||||
	_, ctx := ktesting.NewTestContext(t)
 | 
			
		||||
	fakeClient := &fake.Clientset{}
 | 
			
		||||
@@ -542,38 +543,64 @@ func TestStatefulPodControlUpdatePodClaimForRetentionPolicy(t *testing.T) {
 | 
			
		||||
	testFn := func(t *testing.T) {
 | 
			
		||||
		_, ctx := ktesting.NewTestContext(t)
 | 
			
		||||
		featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.StatefulSetAutoDeletePVC, true)
 | 
			
		||||
		fakeClient := &fake.Clientset{}
 | 
			
		||||
		indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
 | 
			
		||||
		claimLister := corelisters.NewPersistentVolumeClaimLister(indexer)
 | 
			
		||||
		fakeClient.AddReactor("update", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
 | 
			
		||||
			update := action.(core.UpdateAction)
 | 
			
		||||
			indexer.Update(update.GetObject())
 | 
			
		||||
			return true, update.GetObject(), nil
 | 
			
		||||
		})
 | 
			
		||||
		set := newStatefulSet(3)
 | 
			
		||||
		set.GetObjectMeta().SetUID("set-123")
 | 
			
		||||
		pod := newStatefulSetPod(set, 0)
 | 
			
		||||
		claims := getPersistentVolumeClaims(set, pod)
 | 
			
		||||
		for k := range claims {
 | 
			
		||||
			claim := claims[k]
 | 
			
		||||
			indexer.Add(&claim)
 | 
			
		||||
 | 
			
		||||
		testCases := []struct {
 | 
			
		||||
			name      string
 | 
			
		||||
			ownerRef  []metav1.OwnerReference
 | 
			
		||||
			expectRef bool
 | 
			
		||||
		}{
 | 
			
		||||
			{
 | 
			
		||||
				name:      "bare PVC",
 | 
			
		||||
				expectRef: true,
 | 
			
		||||
			},
 | 
			
		||||
			{
 | 
			
		||||
				name:      "PVC already controller",
 | 
			
		||||
				ownerRef:  []metav1.OwnerReference{{Controller: ptr.To(true), Name: "foobar"}},
 | 
			
		||||
				expectRef: false,
 | 
			
		||||
			},
 | 
			
		||||
		}
 | 
			
		||||
		control := NewStatefulPodControl(fakeClient, nil, claimLister, &noopRecorder{})
 | 
			
		||||
		set.Spec.PersistentVolumeClaimRetentionPolicy = &apps.StatefulSetPersistentVolumeClaimRetentionPolicy{
 | 
			
		||||
			WhenDeleted: apps.DeletePersistentVolumeClaimRetentionPolicyType,
 | 
			
		||||
			WhenScaled:  apps.RetainPersistentVolumeClaimRetentionPolicyType,
 | 
			
		||||
		}
 | 
			
		||||
		if err := control.UpdatePodClaimForRetentionPolicy(ctx, set, pod); err != nil {
 | 
			
		||||
			t.Errorf("Unexpected error for UpdatePodClaimForRetentionPolicy (retain): %v", err)
 | 
			
		||||
		}
 | 
			
		||||
		expectRef := utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC)
 | 
			
		||||
		for k := range claims {
 | 
			
		||||
			claim, err := claimLister.PersistentVolumeClaims(claims[k].Namespace).Get(claims[k].Name)
 | 
			
		||||
			if err != nil {
 | 
			
		||||
				t.Errorf("Unexpected error getting Claim %s/%s: %v", claim.Namespace, claim.Name, err)
 | 
			
		||||
 | 
			
		||||
		for _, tc := range testCases {
 | 
			
		||||
			fakeClient := &fake.Clientset{}
 | 
			
		||||
			indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
 | 
			
		||||
			claimLister := corelisters.NewPersistentVolumeClaimLister(indexer)
 | 
			
		||||
			fakeClient.AddReactor("update", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
 | 
			
		||||
				update := action.(core.UpdateAction)
 | 
			
		||||
				if err := indexer.Update(update.GetObject()); err != nil {
 | 
			
		||||
					t.Fatalf("could not update index: %v", err)
 | 
			
		||||
				}
 | 
			
		||||
				return true, update.GetObject(), nil
 | 
			
		||||
			})
 | 
			
		||||
			set := newStatefulSet(3)
 | 
			
		||||
			set.GetObjectMeta().SetUID("set-123")
 | 
			
		||||
			pod0 := newStatefulSetPod(set, 0)
 | 
			
		||||
			claims0 := getPersistentVolumeClaims(set, pod0)
 | 
			
		||||
			for k := range claims0 {
 | 
			
		||||
				claim := claims0[k]
 | 
			
		||||
				if tc.ownerRef != nil {
 | 
			
		||||
					claim.SetOwnerReferences(tc.ownerRef)
 | 
			
		||||
				}
 | 
			
		||||
				if err := indexer.Add(&claim); err != nil {
 | 
			
		||||
					t.Errorf("Could not add claim %s: %v", k, err)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
			if hasOwnerRef(claim, set) != expectRef {
 | 
			
		||||
				t.Errorf("Claim %s/%s bad set owner ref", claim.Namespace, claim.Name)
 | 
			
		||||
			control := NewStatefulPodControl(fakeClient, nil, claimLister, &noopRecorder{})
 | 
			
		||||
			set.Spec.PersistentVolumeClaimRetentionPolicy = &apps.StatefulSetPersistentVolumeClaimRetentionPolicy{
 | 
			
		||||
				WhenDeleted: apps.DeletePersistentVolumeClaimRetentionPolicyType,
 | 
			
		||||
				WhenScaled:  apps.RetainPersistentVolumeClaimRetentionPolicyType,
 | 
			
		||||
			}
 | 
			
		||||
			if err := control.UpdatePodClaimForRetentionPolicy(ctx, set, pod0); err != nil {
 | 
			
		||||
				t.Errorf("Unexpected error for UpdatePodClaimForRetentionPolicy (retain), pod0: %v", err)
 | 
			
		||||
			}
 | 
			
		||||
			expectRef := tc.expectRef && utilfeature.DefaultFeatureGate.Enabled(features.StatefulSetAutoDeletePVC)
 | 
			
		||||
			for k := range claims0 {
 | 
			
		||||
				claim, err := claimLister.PersistentVolumeClaims(claims0[k].Namespace).Get(claims0[k].Name)
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					t.Errorf("Unexpected error getting Claim %s/%s: %v", claim.Namespace, claim.Name, err)
 | 
			
		||||
				}
 | 
			
		||||
				if hasOwnerRef(claim, set) != expectRef {
 | 
			
		||||
					t.Errorf("%s: Claim %s/%s bad set owner ref", tc.name, claim.Namespace, claim.Name)
 | 
			
		||||
				}
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@@ -663,12 +690,22 @@ func TestPodClaimIsStale(t *testing.T) {
 | 
			
		||||
				claimIndexer.Add(&claim)
 | 
			
		||||
			case stale:
 | 
			
		||||
				claim.SetOwnerReferences([]metav1.OwnerReference{
 | 
			
		||||
					{Name: "set-3", UID: types.UID("stale")},
 | 
			
		||||
					{
 | 
			
		||||
						Name:       "set-3",
 | 
			
		||||
						UID:        types.UID("stale"),
 | 
			
		||||
						APIVersion: "v1",
 | 
			
		||||
						Kind:       "Pod",
 | 
			
		||||
					},
 | 
			
		||||
				})
 | 
			
		||||
				claimIndexer.Add(&claim)
 | 
			
		||||
			case withRef:
 | 
			
		||||
				claim.SetOwnerReferences([]metav1.OwnerReference{
 | 
			
		||||
					{Name: "set-3", UID: types.UID("123")},
 | 
			
		||||
					{
 | 
			
		||||
						Name:       "set-3",
 | 
			
		||||
						UID:        types.UID("123"),
 | 
			
		||||
						APIVersion: "v1",
 | 
			
		||||
						Kind:       "Pod",
 | 
			
		||||
					},
 | 
			
		||||
				})
 | 
			
		||||
				claimIndexer.Add(&claim)
 | 
			
		||||
			}
 | 
			
		||||
@@ -710,7 +747,8 @@ func TestStatefulPodControlRetainDeletionPolicyUpdate(t *testing.T) {
 | 
			
		||||
		}
 | 
			
		||||
		for k := range claims {
 | 
			
		||||
			claim := claims[k]
 | 
			
		||||
			setOwnerRef(&claim, set, &set.TypeMeta) // This ownerRef should be removed in the update.
 | 
			
		||||
			// This ownerRef should be removed in the update.
 | 
			
		||||
			claim.SetOwnerReferences(addControllerRef(claim.GetOwnerReferences(), set, controllerKind))
 | 
			
		||||
			claimIndexer.Add(&claim)
 | 
			
		||||
		}
 | 
			
		||||
		control := NewStatefulPodControl(fakeClient, podLister, claimLister, recorder)
 | 
			
		||||
 
 | 
			
		||||
@@ -49,6 +49,9 @@ import (
 | 
			
		||||
// controllerKind contains the schema.GroupVersionKind for this controller type.
 | 
			
		||||
var controllerKind = apps.SchemeGroupVersion.WithKind("StatefulSet")
 | 
			
		||||
 | 
			
		||||
// podKind contains the schema.GroupVersionKind for pods.
 | 
			
		||||
var podKind = v1.SchemeGroupVersion.WithKind("Pod")
 | 
			
		||||
 | 
			
		||||
// StatefulSetController controls statefulsets.
 | 
			
		||||
type StatefulSetController struct {
 | 
			
		||||
	// client interface
 | 
			
		||||
 
 | 
			
		||||
@@ -26,6 +26,7 @@ import (
 | 
			
		||||
	v1 "k8s.io/api/core/v1"
 | 
			
		||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/runtime"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/runtime/schema"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/intstr"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/util/strategicpatch"
 | 
			
		||||
	utilfeature "k8s.io/apiserver/pkg/util/feature"
 | 
			
		||||
@@ -170,9 +171,100 @@ func getPersistentVolumeClaimRetentionPolicy(set *apps.StatefulSet) apps.Statefu
 | 
			
		||||
	return policy
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// claimOwnerMatchesSetAndPod returns false if the ownerRefs of the claim are not set consistently with the
 | 
			
		||||
// matchesRef returns true when the object matches the owner reference, that is the name and GVK are the same.
 | 
			
		||||
func matchesRef(ref *metav1.OwnerReference, obj metav1.Object, gvk schema.GroupVersionKind) bool {
 | 
			
		||||
	return gvk.GroupVersion().String() == ref.APIVersion && gvk.Kind == ref.Kind && ref.Name == obj.GetName()
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// hasUnexpectedController returns true if the set has a retention policy and there is a controller
 | 
			
		||||
// for the claim that's not the set or pod. Since the retention policy may have been changed, it is
 | 
			
		||||
// always valid for the set or pod to be a controller.
 | 
			
		||||
func hasUnexpectedController(claim *v1.PersistentVolumeClaim, set *apps.StatefulSet, pod *v1.Pod) bool {
 | 
			
		||||
	policy := getPersistentVolumeClaimRetentionPolicy(set)
 | 
			
		||||
	const retain = apps.RetainPersistentVolumeClaimRetentionPolicyType
 | 
			
		||||
	if policy.WhenScaled == retain && policy.WhenDeleted == retain {
 | 
			
		||||
		// On a retain policy, it's not a problem for different controller to be managing the claims.
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	for _, ownerRef := range claim.GetOwnerReferences() {
 | 
			
		||||
		if matchesRef(&ownerRef, set, controllerKind) {
 | 
			
		||||
			if ownerRef.UID != set.GetUID() {
 | 
			
		||||
				// A UID mismatch means that pods were incorrectly orphaned. Treating this as an unexpected
 | 
			
		||||
				// controller means we won't touch the PVCs (eg, leave it to the garbage collector to clean
 | 
			
		||||
				// up if appropriate).
 | 
			
		||||
				return true
 | 
			
		||||
			}
 | 
			
		||||
			continue // This is us.
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if matchesRef(&ownerRef, pod, podKind) {
 | 
			
		||||
			if ownerRef.UID != pod.GetUID() {
 | 
			
		||||
				// This is the same situation as the set UID mismatch, above.
 | 
			
		||||
				return true
 | 
			
		||||
			}
 | 
			
		||||
			continue // This is us.
 | 
			
		||||
		}
 | 
			
		||||
		if ownerRef.Controller != nil && *ownerRef.Controller {
 | 
			
		||||
			return true // This is another controller.
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// hasNonControllerOwner returns true if the pod or set is an owner but not controller of the claim.
 | 
			
		||||
func hasNonControllerOwner(claim *v1.PersistentVolumeClaim, set *apps.StatefulSet, pod *v1.Pod) bool {
 | 
			
		||||
	for _, ownerRef := range claim.GetOwnerReferences() {
 | 
			
		||||
		if ownerRef.UID == set.GetUID() || ownerRef.UID == pod.GetUID() {
 | 
			
		||||
			if ownerRef.Controller == nil || !*ownerRef.Controller {
 | 
			
		||||
				return true
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// removeRefs removes any owner refs from the list matching predicate. Returns true if the list was changed and
 | 
			
		||||
// the new (or unchanged list).
 | 
			
		||||
func removeRefs(refs []metav1.OwnerReference, predicate func(ref *metav1.OwnerReference) bool) []metav1.OwnerReference {
 | 
			
		||||
	newRefs := []metav1.OwnerReference{}
 | 
			
		||||
	for _, ownerRef := range refs {
 | 
			
		||||
		if !predicate(&ownerRef) {
 | 
			
		||||
			newRefs = append(newRefs, ownerRef)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return newRefs
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// isClaimOwnerUpToDate returns false if the ownerRefs of the claim are not set consistently with the
 | 
			
		||||
// PVC deletion policy for the StatefulSet.
 | 
			
		||||
func claimOwnerMatchesSetAndPod(logger klog.Logger, claim *v1.PersistentVolumeClaim, set *apps.StatefulSet, pod *v1.Pod) bool {
 | 
			
		||||
//
 | 
			
		||||
// If there are stale references or unexpected controllers, this returns true in order to not touch
 | 
			
		||||
// PVCs that have gotten into this unknown state. Otherwise the ownerships are checked to match the
 | 
			
		||||
// PVC retention policy:
 | 
			
		||||
//
 | 
			
		||||
//	Retain on scaling and set deletion: no owner ref
 | 
			
		||||
//	Retain on scaling and delete on set deletion: owner ref on the set only
 | 
			
		||||
//	Delete on scaling and retain on set deletion: owner ref on the pod only
 | 
			
		||||
//	Delete on scaling and set deletion: owner refs on both set and pod.
 | 
			
		||||
func isClaimOwnerUpToDate(logger klog.Logger, claim *v1.PersistentVolumeClaim, set *apps.StatefulSet, pod *v1.Pod) bool {
 | 
			
		||||
	if hasStaleOwnerRef(claim, set, controllerKind) || hasStaleOwnerRef(claim, pod, podKind) {
 | 
			
		||||
		// The claim is being managed by previous, presumably deleted, version of the controller. It should not be touched.
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if hasUnexpectedController(claim, set, pod) {
 | 
			
		||||
		if hasOwnerRef(claim, set) || hasOwnerRef(claim, pod) {
 | 
			
		||||
			return false // Need to clean up the conflicting controllers
 | 
			
		||||
		}
 | 
			
		||||
		// The claim refs are good, we don't want to add any controllers on top of the unexpected one.
 | 
			
		||||
		return true
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if hasNonControllerOwner(claim, set, pod) {
 | 
			
		||||
		// Some resource has an owner ref, but there is no controller. This needs to be updated.
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	policy := getPersistentVolumeClaimRetentionPolicy(set)
 | 
			
		||||
	const retain = apps.RetainPersistentVolumeClaimRetentionPolicyType
 | 
			
		||||
	const delete = apps.DeletePersistentVolumeClaimRetentionPolicyType
 | 
			
		||||
@@ -214,64 +306,53 @@ func claimOwnerMatchesSetAndPod(logger klog.Logger, claim *v1.PersistentVolumeCl
 | 
			
		||||
 | 
			
		||||
// updateClaimOwnerRefForSetAndPod updates the ownerRefs for the claim according to the deletion policy of
 | 
			
		||||
// the StatefulSet. Returns true if the claim was changed and should be updated and false otherwise.
 | 
			
		||||
func updateClaimOwnerRefForSetAndPod(logger klog.Logger, claim *v1.PersistentVolumeClaim, set *apps.StatefulSet, pod *v1.Pod) bool {
 | 
			
		||||
	needsUpdate := false
 | 
			
		||||
	// Sometimes the version and kind are not set {pod,set}.TypeMeta. These are necessary for the ownerRef.
 | 
			
		||||
	// This is the case both in real clusters and the unittests.
 | 
			
		||||
	// TODO: there must be a better way to do this other than hardcoding the pod version?
 | 
			
		||||
	updateMeta := func(tm *metav1.TypeMeta, kind string) {
 | 
			
		||||
		if tm.APIVersion == "" {
 | 
			
		||||
			if kind == "StatefulSet" {
 | 
			
		||||
				tm.APIVersion = "apps/v1"
 | 
			
		||||
			} else {
 | 
			
		||||
				tm.APIVersion = "v1"
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		if tm.Kind == "" {
 | 
			
		||||
			tm.Kind = kind
 | 
			
		||||
		}
 | 
			
		||||
// isClaimOwnerUpToDate should be called before this to avoid an expensive update operation.
 | 
			
		||||
func updateClaimOwnerRefForSetAndPod(logger klog.Logger, claim *v1.PersistentVolumeClaim, set *apps.StatefulSet, pod *v1.Pod) {
 | 
			
		||||
	refs := claim.GetOwnerReferences()
 | 
			
		||||
 | 
			
		||||
	unexpectedController := hasUnexpectedController(claim, set, pod)
 | 
			
		||||
 | 
			
		||||
	// Scrub any ownerRefs to our set & pod.
 | 
			
		||||
	refs = removeRefs(refs, func(ref *metav1.OwnerReference) bool {
 | 
			
		||||
		return matchesRef(ref, set, controllerKind) || matchesRef(ref, pod, podKind)
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	if unexpectedController {
 | 
			
		||||
		// Leave ownerRefs to our set & pod scrubed and return without creating new ones.
 | 
			
		||||
		claim.SetOwnerReferences(refs)
 | 
			
		||||
		return
 | 
			
		||||
	}
 | 
			
		||||
	podMeta := pod.TypeMeta
 | 
			
		||||
	updateMeta(&podMeta, "Pod")
 | 
			
		||||
	setMeta := set.TypeMeta
 | 
			
		||||
	updateMeta(&setMeta, "StatefulSet")
 | 
			
		||||
 | 
			
		||||
	policy := getPersistentVolumeClaimRetentionPolicy(set)
 | 
			
		||||
	const retain = apps.RetainPersistentVolumeClaimRetentionPolicyType
 | 
			
		||||
	const delete = apps.DeletePersistentVolumeClaimRetentionPolicyType
 | 
			
		||||
	switch {
 | 
			
		||||
	default:
 | 
			
		||||
		logger.Error(nil, "Unknown policy, treating as Retain", "policy", set.Spec.PersistentVolumeClaimRetentionPolicy)
 | 
			
		||||
		fallthrough
 | 
			
		||||
		// Nothing to do
 | 
			
		||||
	case policy.WhenScaled == retain && policy.WhenDeleted == retain:
 | 
			
		||||
		needsUpdate = removeOwnerRef(claim, set) || needsUpdate
 | 
			
		||||
		needsUpdate = removeOwnerRef(claim, pod) || needsUpdate
 | 
			
		||||
		// Nothing to do
 | 
			
		||||
	case policy.WhenScaled == retain && policy.WhenDeleted == delete:
 | 
			
		||||
		needsUpdate = setOwnerRef(claim, set, &setMeta) || needsUpdate
 | 
			
		||||
		needsUpdate = removeOwnerRef(claim, pod) || needsUpdate
 | 
			
		||||
		refs = addControllerRef(refs, set, controllerKind)
 | 
			
		||||
	case policy.WhenScaled == delete && policy.WhenDeleted == retain:
 | 
			
		||||
		needsUpdate = removeOwnerRef(claim, set) || needsUpdate
 | 
			
		||||
		podScaledDown := !podInOrdinalRange(pod, set)
 | 
			
		||||
		if podScaledDown {
 | 
			
		||||
			needsUpdate = setOwnerRef(claim, pod, &podMeta) || needsUpdate
 | 
			
		||||
		}
 | 
			
		||||
		if !podScaledDown {
 | 
			
		||||
			needsUpdate = removeOwnerRef(claim, pod) || needsUpdate
 | 
			
		||||
			refs = addControllerRef(refs, pod, podKind)
 | 
			
		||||
		}
 | 
			
		||||
	case policy.WhenScaled == delete && policy.WhenDeleted == delete:
 | 
			
		||||
		podScaledDown := !podInOrdinalRange(pod, set)
 | 
			
		||||
		if podScaledDown {
 | 
			
		||||
			needsUpdate = removeOwnerRef(claim, set) || needsUpdate
 | 
			
		||||
			needsUpdate = setOwnerRef(claim, pod, &podMeta) || needsUpdate
 | 
			
		||||
			refs = addControllerRef(refs, pod, podKind)
 | 
			
		||||
		}
 | 
			
		||||
		if !podScaledDown {
 | 
			
		||||
			needsUpdate = setOwnerRef(claim, set, &setMeta) || needsUpdate
 | 
			
		||||
			needsUpdate = removeOwnerRef(claim, pod) || needsUpdate
 | 
			
		||||
			refs = addControllerRef(refs, set, controllerKind)
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return needsUpdate
 | 
			
		||||
	claim.SetOwnerReferences(refs)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// hasOwnerRef returns true if target has an ownerRef to owner.
 | 
			
		||||
// hasOwnerRef returns true if target has an ownerRef to owner (as its UID).
 | 
			
		||||
// This does not check if the owner is a controller.
 | 
			
		||||
func hasOwnerRef(target, owner metav1.Object) bool {
 | 
			
		||||
	ownerUID := owner.GetUID()
 | 
			
		||||
	for _, ownerRef := range target.GetOwnerReferences() {
 | 
			
		||||
@@ -282,53 +363,28 @@ func hasOwnerRef(target, owner metav1.Object) bool {
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// hasStaleOwnerRef returns true if target has a ref to owner that appears to be stale.
 | 
			
		||||
func hasStaleOwnerRef(target, owner metav1.Object) bool {
 | 
			
		||||
// hasStaleOwnerRef returns true if target has a ref to owner that appears to be stale, that is,
 | 
			
		||||
// the ref matches the object but not the UID.
 | 
			
		||||
func hasStaleOwnerRef(target *v1.PersistentVolumeClaim, obj metav1.Object, gvk schema.GroupVersionKind) bool {
 | 
			
		||||
	for _, ownerRef := range target.GetOwnerReferences() {
 | 
			
		||||
		if ownerRef.Name == owner.GetName() && ownerRef.UID != owner.GetUID() {
 | 
			
		||||
			return true
 | 
			
		||||
		if matchesRef(&ownerRef, obj, gvk) {
 | 
			
		||||
			return ownerRef.UID != obj.GetUID()
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return false
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// setOwnerRef adds owner to the ownerRefs of target, if necessary. Returns true if target needs to be
 | 
			
		||||
// updated and false otherwise.
 | 
			
		||||
func setOwnerRef(target, owner metav1.Object, ownerType *metav1.TypeMeta) bool {
 | 
			
		||||
	if hasOwnerRef(target, owner) {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	ownerRefs := append(
 | 
			
		||||
		target.GetOwnerReferences(),
 | 
			
		||||
		metav1.OwnerReference{
 | 
			
		||||
			APIVersion: ownerType.APIVersion,
 | 
			
		||||
			Kind:       ownerType.Kind,
 | 
			
		||||
			Name:       owner.GetName(),
 | 
			
		||||
			UID:        owner.GetUID(),
 | 
			
		||||
		})
 | 
			
		||||
	target.SetOwnerReferences(ownerRefs)
 | 
			
		||||
	return true
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// removeOwnerRef removes owner from the ownerRefs of target, if necessary. Returns true if target needs
 | 
			
		||||
// to be updated and false otherwise.
 | 
			
		||||
func removeOwnerRef(target, owner metav1.Object) bool {
 | 
			
		||||
	if !hasOwnerRef(target, owner) {
 | 
			
		||||
		return false
 | 
			
		||||
	}
 | 
			
		||||
	ownerUID := owner.GetUID()
 | 
			
		||||
	oldRefs := target.GetOwnerReferences()
 | 
			
		||||
	newRefs := make([]metav1.OwnerReference, len(oldRefs)-1)
 | 
			
		||||
	skip := 0
 | 
			
		||||
	for i := range oldRefs {
 | 
			
		||||
		if oldRefs[i].UID == ownerUID {
 | 
			
		||||
			skip = -1
 | 
			
		||||
		} else {
 | 
			
		||||
			newRefs[i+skip] = oldRefs[i]
 | 
			
		||||
// addControllerRef returns refs with owner added as a controller, if necessary.
 | 
			
		||||
func addControllerRef(refs []metav1.OwnerReference, owner metav1.Object, gvk schema.GroupVersionKind) []metav1.OwnerReference {
 | 
			
		||||
	for _, ref := range refs {
 | 
			
		||||
		if ref.UID == owner.GetUID() {
 | 
			
		||||
			// Already added. Since we scrub our refs before making any changes, we know it's already
 | 
			
		||||
			// a controller if appropriate.
 | 
			
		||||
			return refs
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	target.SetOwnerReferences(newRefs)
 | 
			
		||||
	return true
 | 
			
		||||
 | 
			
		||||
	return append(refs, *metav1.NewControllerRef(owner, gvk))
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getPersistentVolumeClaims gets a map of PersistentVolumeClaims to their template names, as defined in set. The
 | 
			
		||||
 
 | 
			
		||||
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							@@ -22,6 +22,7 @@ import (
 | 
			
		||||
	"fmt"
 | 
			
		||||
	"reflect"
 | 
			
		||||
	"regexp"
 | 
			
		||||
	"slices"
 | 
			
		||||
	"strconv"
 | 
			
		||||
	"strings"
 | 
			
		||||
	"sync"
 | 
			
		||||
@@ -34,6 +35,7 @@ import (
 | 
			
		||||
	appsv1 "k8s.io/api/apps/v1"
 | 
			
		||||
	autoscalingv1 "k8s.io/api/autoscaling/v1"
 | 
			
		||||
	v1 "k8s.io/api/core/v1"
 | 
			
		||||
	apierrors "k8s.io/apimachinery/pkg/api/errors"
 | 
			
		||||
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 | 
			
		||||
	"k8s.io/apimachinery/pkg/fields"
 | 
			
		||||
	klabels "k8s.io/apimachinery/pkg/labels"
 | 
			
		||||
@@ -59,6 +61,7 @@ import (
 | 
			
		||||
	imageutils "k8s.io/kubernetes/test/utils/image"
 | 
			
		||||
	admissionapi "k8s.io/pod-security-admission/api"
 | 
			
		||||
	"k8s.io/utils/pointer"
 | 
			
		||||
	"k8s.io/utils/ptr"
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
const (
 | 
			
		||||
@@ -1339,6 +1342,84 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
		})
 | 
			
		||||
 | 
			
		||||
		ginkgo.It("should not delete PVC with OnScaledown policy if another controller owns the PVC", func(ctx context.Context) {
 | 
			
		||||
			e2epv.SkipIfNoDefaultStorageClass(ctx, c)
 | 
			
		||||
			ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
 | 
			
		||||
			*(ss.Spec.Replicas) = 3
 | 
			
		||||
			_, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Confirm PVC has been created")
 | 
			
		||||
			err = verifyStatefulSetPVCsExist(ctx, c, ss, []int{0, 1, 2})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Create configmap to use as dummy controller")
 | 
			
		||||
			dummyConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(ctx, &v1.ConfigMap{
 | 
			
		||||
				ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
					GenerateName: "dummy-controller",
 | 
			
		||||
					Namespace:    ns,
 | 
			
		||||
				},
 | 
			
		||||
			}, metav1.CreateOptions{})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
			defer func() {
 | 
			
		||||
				// Will be cleaned up with the namespace if this fails.
 | 
			
		||||
				_ = c.CoreV1().ConfigMaps(ns).Delete(ctx, dummyConfigMap.Name, metav1.DeleteOptions{})
 | 
			
		||||
			}()
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Update PVC 1 owner ref")
 | 
			
		||||
			pvc1Name := fmt.Sprintf("datadir-%s-1", ss.Name)
 | 
			
		||||
			_, err = updatePVCWithRetries(ctx, c, ns, pvc1Name, func(update *v1.PersistentVolumeClaim) {
 | 
			
		||||
				update.OwnerReferences = []metav1.OwnerReference{
 | 
			
		||||
					{
 | 
			
		||||
						Name:       dummyConfigMap.GetName(),
 | 
			
		||||
						APIVersion: "v1",
 | 
			
		||||
						Kind:       "ConfigMap",
 | 
			
		||||
						UID:        dummyConfigMap.GetUID(),
 | 
			
		||||
						Controller: ptr.To(true),
 | 
			
		||||
					},
 | 
			
		||||
				}
 | 
			
		||||
			})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Update StatefulSet retention policy")
 | 
			
		||||
			ss, err = updateStatefulSetWithRetries(ctx, c, ns, ss.Name, func(update *appsv1.StatefulSet) {
 | 
			
		||||
				update.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy{
 | 
			
		||||
					WhenScaled: appsv1.DeletePersistentVolumeClaimRetentionPolicyType,
 | 
			
		||||
				}
 | 
			
		||||
			})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Scale StatefulSet down to 0")
 | 
			
		||||
			_, err = e2estatefulset.Scale(ctx, c, ss, 0)
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Verify PVC 1 still exists")
 | 
			
		||||
			err = verifyStatefulSetPVCsExist(ctx, c, ss, []int{1})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Remove PVC 1 owner ref")
 | 
			
		||||
			_, err = updatePVCWithRetries(ctx, c, ns, pvc1Name, func(update *v1.PersistentVolumeClaim) {
 | 
			
		||||
				update.OwnerReferences = nil
 | 
			
		||||
			})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Scale set back up to 2")
 | 
			
		||||
			_, err = e2estatefulset.Scale(ctx, c, ss, 2)
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Confirm PVCs scaled up as well")
 | 
			
		||||
			err = verifyStatefulSetPVCsExist(ctx, c, ss, []int{0, 1})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Scale set down to 1")
 | 
			
		||||
			_, err = e2estatefulset.Scale(ctx, c, ss, 1)
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Confirm PVC 1 deleted this time")
 | 
			
		||||
			err = verifyStatefulSetPVCsExist(ctx, c, ss, []int{0})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
		})
 | 
			
		||||
 | 
			
		||||
		ginkgo.It("should delete PVCs after adopting pod (WhenDeleted)", func(ctx context.Context) {
 | 
			
		||||
			e2epv.SkipIfNoDefaultStorageClass(ctx, c)
 | 
			
		||||
			ginkgo.By("Creating statefulset " + ssName + " in namespace " + ns)
 | 
			
		||||
@@ -1400,6 +1481,191 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
			
		||||
			err = verifyStatefulSetPVCsExist(ctx, c, ss, []int{0})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
		})
 | 
			
		||||
 | 
			
		||||
		ginkgo.It("should not delete PVCs when there is another controller", func(ctx context.Context) {
 | 
			
		||||
			e2epv.SkipIfNoDefaultStorageClass(ctx, c)
 | 
			
		||||
			ginkgo.By("Creating statefulset " + ssName + " with no retention policy in namespace " + ns)
 | 
			
		||||
 | 
			
		||||
			*(ss.Spec.Replicas) = 4
 | 
			
		||||
			_, err := c.AppsV1().StatefulSets(ns).Create(ctx, ss, metav1.CreateOptions{})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Confirming all 4 PVCs exist")
 | 
			
		||||
			err = verifyStatefulSetPVCsExist(ctx, c, ss, []int{0, 1, 2, 3})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			claimNames := make([]string, 4)
 | 
			
		||||
			for i := 0; i < 4; i++ {
 | 
			
		||||
				claimNames[i] = fmt.Sprintf("%s-%s-%d", statefulPodMounts[0].Name, ssName, i)
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Create configmap to use as random owner")
 | 
			
		||||
			randomConfigMap, err := c.CoreV1().ConfigMaps(ns).Create(ctx, &v1.ConfigMap{
 | 
			
		||||
				ObjectMeta: metav1.ObjectMeta{
 | 
			
		||||
					GenerateName: "random-owner",
 | 
			
		||||
					Namespace:    ns,
 | 
			
		||||
				},
 | 
			
		||||
			}, metav1.CreateOptions{})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
			defer func() {
 | 
			
		||||
				// Will be cleaned up by the namespace delete if this fails
 | 
			
		||||
				_ = c.CoreV1().ConfigMaps(ns).Delete(ctx, randomConfigMap.Name, metav1.DeleteOptions{})
 | 
			
		||||
			}()
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Add external owner to PVC 1")
 | 
			
		||||
			expectedExternalRef := []metav1.OwnerReference{
 | 
			
		||||
				{
 | 
			
		||||
					APIVersion: "v1",
 | 
			
		||||
					Kind:       "ConfigMap",
 | 
			
		||||
					Name:       randomConfigMap.GetName(),
 | 
			
		||||
					UID:        randomConfigMap.GetUID(),
 | 
			
		||||
				},
 | 
			
		||||
			}
 | 
			
		||||
			_, err = updatePVCWithRetries(ctx, c, ns, claimNames[1], func(update *v1.PersistentVolumeClaim) {
 | 
			
		||||
				update.SetOwnerReferences(expectedExternalRef)
 | 
			
		||||
			})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Add stale statefulset controller to PVC 3, with finalizer to prevent garbage collection")
 | 
			
		||||
			expectedStaleRef := []metav1.OwnerReference{
 | 
			
		||||
				{
 | 
			
		||||
					APIVersion:         "apps/v1",
 | 
			
		||||
					Kind:               "StatefulSet",
 | 
			
		||||
					Name:               "unknown",
 | 
			
		||||
					UID:                "9d86d6ae-4e06-4ff1-bc55-f77f52e272e9",
 | 
			
		||||
					Controller:         ptr.To(true),
 | 
			
		||||
					BlockOwnerDeletion: ptr.To(true),
 | 
			
		||||
				},
 | 
			
		||||
			}
 | 
			
		||||
			_, err = updatePVCWithRetries(ctx, c, ns, claimNames[3], func(update *v1.PersistentVolumeClaim) {
 | 
			
		||||
				update.SetOwnerReferences(expectedStaleRef)
 | 
			
		||||
				update.SetFinalizers([]string{"keep-with/stale-ref"})
 | 
			
		||||
			})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			defer func() {
 | 
			
		||||
				if _, err := c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claimNames[3], metav1.GetOptions{}); apierrors.IsNotFound(err) {
 | 
			
		||||
					return
 | 
			
		||||
				}
 | 
			
		||||
				_, err := updatePVCWithRetries(ctx, c, ns, claimNames[3], func(update *v1.PersistentVolumeClaim) {
 | 
			
		||||
					update.SetFinalizers([]string{})
 | 
			
		||||
				})
 | 
			
		||||
				framework.ExpectNoError(err)
 | 
			
		||||
			}()
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Check references updated")
 | 
			
		||||
			err = wait.PollUntilContextTimeout(ctx, e2estatefulset.StatefulSetPoll, e2estatefulset.StatefulSetTimeout, true, func(ctx context.Context) (bool, error) {
 | 
			
		||||
				claim, err := c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claimNames[1], metav1.GetOptions{})
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					return false, nil // retry
 | 
			
		||||
				}
 | 
			
		||||
				if !reflect.DeepEqual(claim.GetOwnerReferences(), expectedExternalRef) {
 | 
			
		||||
					return false, nil // retry
 | 
			
		||||
				}
 | 
			
		||||
				claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claimNames[3], metav1.GetOptions{})
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					return false, nil // retry
 | 
			
		||||
				}
 | 
			
		||||
				if !reflect.DeepEqual(claim.GetOwnerReferences(), expectedStaleRef) {
 | 
			
		||||
					return false, nil // retry
 | 
			
		||||
				}
 | 
			
		||||
				return true, nil // found them all!
 | 
			
		||||
			})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Update retention policy to delete to force claims to resync")
 | 
			
		||||
			var ssUID types.UID
 | 
			
		||||
			_, err = updateStatefulSetWithRetries(ctx, c, ns, ssName, func(update *appsv1.StatefulSet) {
 | 
			
		||||
				update.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy{
 | 
			
		||||
					WhenDeleted: appsv1.DeletePersistentVolumeClaimRetentionPolicyType,
 | 
			
		||||
				}
 | 
			
		||||
				ssUID = update.GetUID()
 | 
			
		||||
			})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			expectedOwnerRef := []metav1.OwnerReference{
 | 
			
		||||
				{
 | 
			
		||||
					APIVersion:         "apps/v1",
 | 
			
		||||
					Kind:               "StatefulSet",
 | 
			
		||||
					Name:               ssName,
 | 
			
		||||
					UID:                ssUID,
 | 
			
		||||
					Controller:         ptr.To(true),
 | 
			
		||||
					BlockOwnerDeletion: ptr.To(true),
 | 
			
		||||
				},
 | 
			
		||||
			}
 | 
			
		||||
			ginkgo.By("Expect claims 0, 1 and 2 to have ownerRefs to the statefulset, and 3 to have a stale reference")
 | 
			
		||||
			err = wait.PollUntilContextTimeout(ctx, e2estatefulset.StatefulSetPoll, e2estatefulset.StatefulSetTimeout, true, func(ctx context.Context) (bool, error) {
 | 
			
		||||
				for _, i := range []int{0, 2} {
 | 
			
		||||
					claim, err := c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claimNames[i], metav1.GetOptions{})
 | 
			
		||||
					if err != nil {
 | 
			
		||||
						return false, nil // retry
 | 
			
		||||
					}
 | 
			
		||||
					if !reflect.DeepEqual(claim.GetOwnerReferences(), expectedOwnerRef) {
 | 
			
		||||
						return false, nil // retry
 | 
			
		||||
					}
 | 
			
		||||
				}
 | 
			
		||||
				claim, err := c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claimNames[1], metav1.GetOptions{})
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					return false, nil // retry
 | 
			
		||||
				}
 | 
			
		||||
				// Claim 1's external owner is neither its pod nor its set, so it should get updated with a controller.
 | 
			
		||||
				if !reflect.DeepEqual(claim.GetOwnerReferences(), slices.Concat(expectedExternalRef, expectedOwnerRef)) {
 | 
			
		||||
					return false, nil // retry
 | 
			
		||||
				}
 | 
			
		||||
				claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claimNames[3], metav1.GetOptions{})
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					return false, nil // retry
 | 
			
		||||
				}
 | 
			
		||||
				if !reflect.DeepEqual(claim.GetOwnerReferences(), expectedStaleRef) {
 | 
			
		||||
					return false, nil // retry
 | 
			
		||||
				}
 | 
			
		||||
				return true, nil // found them all!
 | 
			
		||||
			})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Remove controller flag from claim 0")
 | 
			
		||||
			_, err = updatePVCWithRetries(ctx, c, ns, claimNames[0], func(update *v1.PersistentVolumeClaim) {
 | 
			
		||||
				update.SetOwnerReferences([]metav1.OwnerReference{
 | 
			
		||||
					{
 | 
			
		||||
						APIVersion:         "apps/v1",
 | 
			
		||||
						Kind:               "StatefulSet",
 | 
			
		||||
						Name:               ssName,
 | 
			
		||||
						UID:                ssUID,
 | 
			
		||||
						Controller:         ptr.To(false),
 | 
			
		||||
						BlockOwnerDeletion: ptr.To(true),
 | 
			
		||||
					},
 | 
			
		||||
				})
 | 
			
		||||
			})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Update statefulset to provoke a reconcile")
 | 
			
		||||
			ss, err = updateStatefulSetWithRetries(ctx, c, ns, ssName, func(update *appsv1.StatefulSet) {
 | 
			
		||||
				update.Spec.PersistentVolumeClaimRetentionPolicy = &appsv1.StatefulSetPersistentVolumeClaimRetentionPolicy{
 | 
			
		||||
					WhenDeleted: appsv1.DeletePersistentVolumeClaimRetentionPolicyType,
 | 
			
		||||
					WhenScaled:  appsv1.DeletePersistentVolumeClaimRetentionPolicyType,
 | 
			
		||||
				}
 | 
			
		||||
			})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
			ginkgo.By("Expect controller flag for claim 0 to reconcile back to true")
 | 
			
		||||
			err = wait.PollUntilContextTimeout(ctx, e2estatefulset.StatefulSetPoll, e2estatefulset.StatefulSetTimeout, true, func(ctx context.Context) (bool, error) {
 | 
			
		||||
				claim, err := c.CoreV1().PersistentVolumeClaims(ns).Get(ctx, claimNames[0], metav1.GetOptions{})
 | 
			
		||||
				if err != nil {
 | 
			
		||||
					return false, nil // retry
 | 
			
		||||
				}
 | 
			
		||||
				if reflect.DeepEqual(claim.GetOwnerReferences(), expectedOwnerRef) {
 | 
			
		||||
					return true, nil // success!
 | 
			
		||||
				}
 | 
			
		||||
				return false, nil // retry
 | 
			
		||||
			})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			// Claim 1 has an external owner, and 3 has a finalizer still, so they will not be deleted.
 | 
			
		||||
			ginkgo.By("Delete the stateful set and wait for claims 0 and 2 but not 1 and 3 to disappear")
 | 
			
		||||
			err = c.AppsV1().StatefulSets(ns).Delete(ctx, ssName, metav1.DeleteOptions{})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
			err = verifyStatefulSetPVCsExist(ctx, c, ss, []int{1, 3})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
		})
 | 
			
		||||
	})
 | 
			
		||||
 | 
			
		||||
	ginkgo.Describe("Automatically recreate PVC for pending pod when PVC is missing", func() {
 | 
			
		||||
@@ -1587,7 +1853,7 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Increasing .spec.ordinals.start = 4")
 | 
			
		||||
			ss, err = updateStatefulSetWithRetries(ctx, c, ns, ss.Name, func(update *appsv1.StatefulSet) {
 | 
			
		||||
			ss, err = updateStatefulSetWithRetries(ctx, c, ns, ssName, func(update *appsv1.StatefulSet) {
 | 
			
		||||
				update.Spec.Ordinals = &appsv1.StatefulSetOrdinals{
 | 
			
		||||
					Start: 4,
 | 
			
		||||
				}
 | 
			
		||||
@@ -1621,7 +1887,7 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Decreasing .spec.ordinals.start = 2")
 | 
			
		||||
			ss, err = updateStatefulSetWithRetries(ctx, c, ns, ss.Name, func(update *appsv1.StatefulSet) {
 | 
			
		||||
			ss, err = updateStatefulSetWithRetries(ctx, c, ns, ssName, func(update *appsv1.StatefulSet) {
 | 
			
		||||
				update.Spec.Ordinals = &appsv1.StatefulSetOrdinals{
 | 
			
		||||
					Start: 2,
 | 
			
		||||
				}
 | 
			
		||||
@@ -1654,14 +1920,14 @@ var _ = SIGDescribe("StatefulSet", func() {
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			ginkgo.By("Removing .spec.ordinals")
 | 
			
		||||
			ss, err = updateStatefulSetWithRetries(ctx, c, ns, ss.Name, func(update *appsv1.StatefulSet) {
 | 
			
		||||
			ss, err = updateStatefulSetWithRetries(ctx, c, ns, ssName, func(update *appsv1.StatefulSet) {
 | 
			
		||||
				update.Spec.Ordinals = nil
 | 
			
		||||
			})
 | 
			
		||||
			framework.ExpectNoError(err)
 | 
			
		||||
 | 
			
		||||
			// since we are replacing 2 pods for 2, we need to ensure we wait
 | 
			
		||||
			// for the new ones to show up, not just for any random 2
 | 
			
		||||
			framework.Logf("Confirming 2 replicas, with start ordinal 0")
 | 
			
		||||
			ginkgo.By("Confirming 2 replicas, with start ordinal 0")
 | 
			
		||||
			waitForStatus(ctx, c, ss)
 | 
			
		||||
			waitForPodNames(ctx, c, ss, []string{"ss-0", "ss-1"})
 | 
			
		||||
			e2estatefulset.WaitForStatusReplicas(ctx, c, ss, 2)
 | 
			
		||||
@@ -2231,7 +2497,7 @@ type updateStatefulSetFunc func(*appsv1.StatefulSet)
 | 
			
		||||
func updateStatefulSetWithRetries(ctx context.Context, c clientset.Interface, namespace, name string, applyUpdate updateStatefulSetFunc) (statefulSet *appsv1.StatefulSet, err error) {
 | 
			
		||||
	statefulSets := c.AppsV1().StatefulSets(namespace)
 | 
			
		||||
	var updateErr error
 | 
			
		||||
	pollErr := wait.PollWithContext(ctx, 10*time.Millisecond, 1*time.Minute, func(ctx context.Context) (bool, error) {
 | 
			
		||||
	pollErr := wait.PollUntilContextTimeout(ctx, 10*time.Millisecond, 1*time.Minute, true, func(ctx context.Context) (bool, error) {
 | 
			
		||||
		if statefulSet, err = statefulSets.Get(ctx, name, metav1.GetOptions{}); err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
@@ -2245,11 +2511,34 @@ func updateStatefulSetWithRetries(ctx context.Context, c clientset.Interface, na
 | 
			
		||||
		return false, nil
 | 
			
		||||
	})
 | 
			
		||||
	if wait.Interrupted(pollErr) {
 | 
			
		||||
		pollErr = fmt.Errorf("couldn't apply the provided updated to stateful set %q: %v", name, updateErr)
 | 
			
		||||
		pollErr = fmt.Errorf("couldn't apply the provided updated to stateful set %q: %w", name, updateErr)
 | 
			
		||||
	}
 | 
			
		||||
	return statefulSet, pollErr
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// updatePVCWithRetries updates PVCs with retries.
 | 
			
		||||
func updatePVCWithRetries(ctx context.Context, c clientset.Interface, namespace, name string, applyUpdate func(*v1.PersistentVolumeClaim)) (pvc *v1.PersistentVolumeClaim, err error) {
 | 
			
		||||
	pvcs := c.CoreV1().PersistentVolumeClaims(namespace)
 | 
			
		||||
	var updateErr error
 | 
			
		||||
	pollErr := wait.PollUntilContextTimeout(ctx, 10*time.Millisecond, 1*time.Minute, true, func(ctx context.Context) (bool, error) {
 | 
			
		||||
		if pvc, err = pvcs.Get(ctx, name, metav1.GetOptions{}); err != nil {
 | 
			
		||||
			return false, err
 | 
			
		||||
		}
 | 
			
		||||
		// Apply the update, then attempt to push it to the apiserver.
 | 
			
		||||
		applyUpdate(pvc)
 | 
			
		||||
		if pvc, err = pvcs.Update(ctx, pvc, metav1.UpdateOptions{}); err == nil {
 | 
			
		||||
			framework.Logf("Updating pvc %s", name)
 | 
			
		||||
			return true, nil
 | 
			
		||||
		}
 | 
			
		||||
		updateErr = err
 | 
			
		||||
		return false, nil
 | 
			
		||||
	})
 | 
			
		||||
	if wait.Interrupted(pollErr) {
 | 
			
		||||
		pollErr = fmt.Errorf("couldn't apply the provided updated to stateful set %q: %w", name, updateErr)
 | 
			
		||||
	}
 | 
			
		||||
	return pvc, pollErr
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// getStatefulSet gets the StatefulSet named name in namespace.
 | 
			
		||||
func getStatefulSet(ctx context.Context, c clientset.Interface, namespace, name string) *appsv1.StatefulSet {
 | 
			
		||||
	ss, err := c.AppsV1().StatefulSets(namespace).Get(ctx, name, metav1.GetOptions{})
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user