Merge pull request #105535 from soltysh/apps_cleanup

Move test utils where they belong
This commit is contained in:
Kubernetes Prow Robot
2021-10-12 08:11:48 -07:00
committed by GitHub
7 changed files with 59 additions and 294 deletions

View File

@@ -637,7 +637,7 @@ func failureTrap(c clientset.Interface, ns string) {
d := deployments.Items[i]
framework.Logf(spew.Sprintf("Deployment %q:\n%+v\n", d.Name, d))
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(&d, c.AppsV1())
_, allOldRSs, newRS, err := testutil.GetAllReplicaSets(&d, c)
if err != nil {
framework.Logf("Could not list ReplicaSets for Deployment %q: %v", d.Name, err)
return
@@ -740,7 +740,7 @@ func testDeleteDeployment(f *framework.Framework) {
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err)
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
newRS, err := testutil.GetNewReplicaSet(deployment, c)
framework.ExpectNoError(err)
framework.ExpectNotEqual(newRS, nilRs)
stopDeployment(c, ns, deploymentName)
@@ -790,7 +790,7 @@ func testRollingUpdateDeployment(f *framework.Framework) {
framework.Logf("Ensuring deployment %q has one old replica set (the one it adopted)", deploy.Name)
deployment, err := c.AppsV1().Deployments(ns).Get(context.TODO(), deploymentName, metav1.GetOptions{})
framework.ExpectNoError(err)
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
_, allOldRSs, err := testutil.GetOldReplicaSets(deployment, c)
framework.ExpectNoError(err)
framework.ExpectEqual(len(allOldRSs), 1)
}
@@ -956,7 +956,7 @@ func testRolloverDeployment(f *framework.Framework) {
oldRS, err := c.AppsV1().ReplicaSets(ns).Get(context.TODO(), rsName, metav1.GetOptions{})
framework.ExpectNoError(err)
ensureReplicas(oldRS, int32(1))
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
newRS, err := testutil.GetNewReplicaSet(deployment, c)
framework.ExpectNoError(err)
ensureReplicas(newRS, int32(1))
@@ -1222,7 +1222,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
err = e2edeployment.WaitForDeploymentComplete(c, deployment)
framework.ExpectNoError(err)
firstRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
firstRS, err := testutil.GetNewReplicaSet(deployment, c)
framework.ExpectNoError(err)
// Update the deployment with a non-existent image so that the new replica set
@@ -1260,7 +1260,7 @@ func testProportionalScalingDeployment(f *framework.Framework) {
framework.ExpectNoError(err)
// Checking state of second rollout's replicaset.
secondRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
secondRS, err := testutil.GetNewReplicaSet(deployment, c)
framework.ExpectNoError(err)
maxSurge, err := intstr.GetScaledValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), false)
@@ -1536,8 +1536,8 @@ func watchRecreateDeployment(c clientset.Interface, d *appsv1.Deployment) error
status = d.Status
if d.Status.UpdatedReplicas > 0 && d.Status.Replicas != d.Status.UpdatedReplicas {
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(d, c.AppsV1())
newRS, nerr := deploymentutil.GetNewReplicaSet(d, c.AppsV1())
_, allOldRSs, err := testutil.GetOldReplicaSets(d, c)
newRS, nerr := testutil.GetNewReplicaSet(d, c)
if err == nil && nerr == nil {
framework.Logf("%+v", d)
testutil.LogReplicaSetsOfDeployment(d, allOldRSs, newRS, framework.Logf)
@@ -1572,7 +1572,7 @@ func waitForDeploymentOldRSsNum(c clientset.Interface, ns, deploymentName string
}
d = deployment
_, oldRSs, err = deploymentutil.GetOldReplicaSets(deployment, c.AppsV1())
_, oldRSs, err = testutil.GetOldReplicaSets(deployment, c)
if err != nil {
return false, err
}

View File

@@ -31,6 +31,7 @@ import (
"k8s.io/client-go/util/retry"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/test/integration/framework"
testutil "k8s.io/kubernetes/test/utils"
"k8s.io/utils/pointer"
)
@@ -197,7 +198,7 @@ func TestDeploymentRollingUpdate(t *testing.T) {
if err := tester.waitForDeploymentCompleteAndCheckRollingAndMarkPodsReady(); err != nil {
t.Fatal(err)
}
_, allOldRSs, err := deploymentutil.GetOldReplicaSets(tester.deployment, c.AppsV1())
_, allOldRSs, err := testutil.GetOldReplicaSets(tester.deployment, c)
if err != nil {
t.Fatalf("failed retrieving old replicasets of deployment %s: %v", tester.deployment.Name, err)
}
@@ -332,7 +333,7 @@ func TestPausedDeployment(t *testing.T) {
t.Fatal(err)
}
_, allOldRs, err := deploymentutil.GetOldReplicaSets(tester.deployment, c.AppsV1())
_, allOldRs, err := testutil.GetOldReplicaSets(tester.deployment, c)
if err != nil {
t.Fatalf("failed retrieving old replicasets of deployment %s: %v", tester.deployment.Name, err)
}
@@ -457,7 +458,7 @@ func TestDeploymentHashCollision(t *testing.T) {
}
// Mock a hash collision
newRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1())
newRS, err := testutil.GetNewReplicaSet(tester.deployment, c)
if err != nil {
t.Fatalf("failed getting new replicaset of deployment %s: %v", tester.deployment.Name, err)
}
@@ -751,7 +752,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
}
// Verify every replicaset has correct desiredReplicas annotation after 3rd rollout
thirdRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1())
thirdRS, err := testutil.GetNewReplicaSet(tester.deployment, c)
if err != nil {
t.Fatalf("failed getting new revision 3 replicaset for deployment %q: %v", name, err)
}
@@ -828,7 +829,7 @@ func TestScaledRolloutDeployment(t *testing.T) {
}
// Verify every replicaset has correct desiredReplicas annotation after 5th rollout
fifthRS, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1())
fifthRS, err := testutil.GetNewReplicaSet(tester.deployment, c)
if err != nil {
t.Fatalf("failed getting new revision 5 replicaset for deployment %q: %v", name, err)
}
@@ -1059,7 +1060,7 @@ func TestGeneralReplicaSetAdoption(t *testing.T) {
}
// Get replicaset of the deployment
rs, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1())
rs, err := testutil.GetNewReplicaSet(tester.deployment, c)
if err != nil {
t.Fatalf("failed to get replicaset of deployment %q: %v", deploymentName, err)
}
@@ -1197,7 +1198,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
// Orphaning: deployment should remove OwnerReference from a RS when the RS's labels change to not match its labels
// Get replicaset of the deployment
rs, err := deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1())
rs, err := testutil.GetNewReplicaSet(tester.deployment, c)
if err != nil {
t.Fatalf("failed to get replicaset of deployment %q: %v", deploymentName, err)
}
@@ -1240,7 +1241,7 @@ func TestReplicaSetOrphaningAndAdoptionWhenLabelsChange(t *testing.T) {
// i.e., the new replicaset will have a name with different hash to preserve name uniqueness
var newRS *apps.ReplicaSet
if err = wait.PollImmediate(pollInterval, pollTimeout, func() (bool, error) {
newRS, err = deploymentutil.GetNewReplicaSet(tester.deployment, c.AppsV1())
newRS, err = testutil.GetNewReplicaSet(tester.deployment, c)
if err != nil {
return false, fmt.Errorf("failed to get new replicaset of deployment %q after orphaning: %v", deploymentName, err)
}

View File

@@ -293,7 +293,7 @@ func (d *deploymentTester) getNewReplicaSet() (*apps.ReplicaSet, error) {
if err != nil {
return nil, fmt.Errorf("failed retrieving deployment %s: %v", d.deployment.Name, err)
}
rs, err := deploymentutil.GetNewReplicaSet(deployment, d.c.AppsV1())
rs, err := testutil.GetNewReplicaSet(deployment, d.c)
if err != nil {
return nil, fmt.Errorf("failed retrieving new replicaset of deployment %s: %v", d.deployment.Name, err)
}

View File

@@ -117,7 +117,7 @@ func waitForDeploymentCompleteMaybeCheckRolling(c clientset.Interface, d *apps.D
func checkRollingUpdateStatus(c clientset.Interface, deployment *apps.Deployment, logf LogfFn) (string, error) {
var reason string
oldRSs, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.AppsV1())
oldRSs, allOldRSs, newRS, err := GetAllReplicaSets(deployment, c)
if err != nil {
return "", err
}
@@ -152,6 +152,40 @@ func checkRollingUpdateStatus(c clientset.Interface, deployment *apps.Deployment
return "", nil
}
// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and ReplicaSetList from client interface.
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
// The third returned value is the new replica set, and it may be nil if it doesn't exist yet.
func GetAllReplicaSets(deployment *apps.Deployment, c clientset.Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, *apps.ReplicaSet, error) {
rsList, err := deploymentutil.ListReplicaSets(deployment, deploymentutil.RsListFromClient(c.AppsV1()))
if err != nil {
return nil, nil, nil, err
}
oldRSes, allOldRSes := deploymentutil.FindOldReplicaSets(deployment, rsList)
newRS := deploymentutil.FindNewReplicaSet(deployment, rsList)
return oldRSes, allOldRSes, newRS, nil
}
// GetOldReplicaSets returns the old replica sets targeted by the given Deployment; get PodList and ReplicaSetList from client interface.
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
func GetOldReplicaSets(deployment *apps.Deployment, c clientset.Interface) ([]*apps.ReplicaSet, []*apps.ReplicaSet, error) {
rsList, err := deploymentutil.ListReplicaSets(deployment, deploymentutil.RsListFromClient(c.AppsV1()))
if err != nil {
return nil, nil, err
}
oldRSes, allOldRSes := deploymentutil.FindOldReplicaSets(deployment, rsList)
return oldRSes, allOldRSes, nil
}
// GetNewReplicaSet returns a replica set that matches the intent of the given deployment; get ReplicaSetList from client interface.
// Returns nil if the new replica set doesn't exist yet.
func GetNewReplicaSet(deployment *apps.Deployment, c clientset.Interface) (*apps.ReplicaSet, error) {
rsList, err := deploymentutil.ListReplicaSets(deployment, deploymentutil.RsListFromClient(c.AppsV1()))
if err != nil {
return nil, err
}
return deploymentutil.FindNewReplicaSet(deployment, rsList), nil
}
// Waits for the deployment to complete, and check rolling update strategy isn't broken at any times.
// Rolling update strategy should not be broken during a rolling update.
func WaitForDeploymentCompleteAndCheckRolling(c clientset.Interface, d *apps.Deployment, logf LogfFn, pollInterval, pollTimeout time.Duration) error {
@@ -180,7 +214,7 @@ func WaitForDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName
return false, err
}
// The new ReplicaSet needs to be non-nil and contain the pod-template-hash label
newRS, err = deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
newRS, err = GetNewReplicaSet(deployment, c)
if err != nil {
return false, err
}
@@ -223,7 +257,7 @@ func CheckDeploymentRevisionAndImage(c clientset.Interface, ns, deploymentName,
}
// Check revision of the new replica set of this deployment
newRS, err := deploymentutil.GetNewReplicaSet(deployment, c.AppsV1())
newRS, err := GetNewReplicaSet(deployment, c)
if err != nil {
return fmt.Errorf("unable to get new replicaset of deployment %s during revision check: %v", deploymentName, err)
}
@@ -344,7 +378,7 @@ func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, r
})
if pollErr == wait.ErrWaitTimeout {
pollErr = fmt.Errorf("deployment %q never updated with the desired condition and reason, latest deployment conditions: %+v", deployment.Name, deployment.Status.Conditions)
_, allOldRSs, newRS, err := deploymentutil.GetAllReplicaSets(deployment, c.AppsV1())
_, allOldRSs, newRS, err := GetAllReplicaSets(deployment, c)
if err == nil {
LogReplicaSetsOfDeployment(deployment, allOldRSs, newRS, logf)
LogPodsOfDeployment(c, deployment, append(allOldRSs, newRS), logf)