Merge pull request #52111 from MrHohn/kube-proxy-upgrade-image
Automatic merge from submit-queue Pipe in upgrade image target for kube-proxy migration tests **What this PR does / why we need it**: https://k8s-testgrid.appspot.com/sig-network#gci-gce-latest-upgrade-kube-proxy-ds&width=20 and https://k8s-testgrid.appspot.com/sig-network#gci-gce-latest-downgrade-kube-proxy-ds&width=20 are still failing. Reproduced it locally and found node image is being default to debian during upgrade (it was gci before upgrade) because we don't pass in `gci` via `--upgrade--target`. And for some reasons (haven't figured out yet), the upgraded node uses debian image with gci startupscripts... This PR pipes in `--upgrade-target` for kube-proxy migration tests, hopefully in conjunction with https://github.com/kubernetes/test-infra/pull/4447 it will bring the tests back to normal. **Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes #NONE **Special notes for your reviewer**: Sorry for bothering again. /assign @krousey **Release note**: ```release-note NONE ```
This commit is contained in:
		@@ -130,9 +130,9 @@ func NodeUpgrade(f *Framework, v string, img string) error {
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
// TODO(mrhohn): Remove this function when kube-proxy is run as a DaemonSet by default.
 | 
			
		||||
func NodeUpgradeGCEWithKubeProxyDaemonSet(f *Framework, v string, enableKubeProxyDaemonSet bool) error {
 | 
			
		||||
func NodeUpgradeGCEWithKubeProxyDaemonSet(f *Framework, v string, img string, enableKubeProxyDaemonSet bool) error {
 | 
			
		||||
	// Perform the upgrade.
 | 
			
		||||
	if err := nodeUpgradeGCE(v, "", enableKubeProxyDaemonSet); err != nil {
 | 
			
		||||
	if err := nodeUpgradeGCE(v, img, enableKubeProxyDaemonSet); err != nil {
 | 
			
		||||
		return err
 | 
			
		||||
	}
 | 
			
		||||
	// Wait for it to complete and validate nodes are healthy.
 | 
			
		||||
 
 | 
			
		||||
@@ -255,7 +255,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
 | 
			
		||||
				target := upgCtx.Versions[1].Version.String()
 | 
			
		||||
				framework.ExpectNoError(framework.MasterUpgradeGCEWithKubeProxyDaemonSet(target, true))
 | 
			
		||||
				framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
 | 
			
		||||
				framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, true))
 | 
			
		||||
				framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, framework.TestContext.UpgradeImage, true))
 | 
			
		||||
				framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
 | 
			
		||||
			}
 | 
			
		||||
			runUpgradeSuite(f, kubeProxyUpgradeTests, testFrameworks, testSuite, upgCtx, upgrades.ClusterUpgrade, upgradeFunc)
 | 
			
		||||
@@ -281,7 +281,7 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
 | 
			
		||||
				defer finalizeUpgradeTest(start, kubeProxyDowngradeTest)
 | 
			
		||||
				// Yes this really is a downgrade. And nodes must downgrade first.
 | 
			
		||||
				target := upgCtx.Versions[1].Version.String()
 | 
			
		||||
				framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, false))
 | 
			
		||||
				framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, framework.TestContext.UpgradeImage, false))
 | 
			
		||||
				framework.ExpectNoError(framework.CheckNodesVersions(f.ClientSet, target))
 | 
			
		||||
				framework.ExpectNoError(framework.MasterUpgradeGCEWithKubeProxyDaemonSet(target, false))
 | 
			
		||||
				framework.ExpectNoError(framework.CheckMasterVersion(f.ClientSet, target))
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user