|
|
|
@@ -20,19 +20,24 @@ import (
|
|
|
|
|
"encoding/xml"
|
|
|
|
|
"fmt"
|
|
|
|
|
"os"
|
|
|
|
|
"os/exec"
|
|
|
|
|
"path"
|
|
|
|
|
"path/filepath"
|
|
|
|
|
"regexp"
|
|
|
|
|
"strings"
|
|
|
|
|
"sync"
|
|
|
|
|
"time"
|
|
|
|
|
|
|
|
|
|
"k8s.io/apimachinery/pkg/util/version"
|
|
|
|
|
utilversion "k8s.io/apimachinery/pkg/util/version"
|
|
|
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
|
|
|
|
"k8s.io/apimachinery/pkg/version"
|
|
|
|
|
"k8s.io/client-go/discovery"
|
|
|
|
|
clientset "k8s.io/client-go/kubernetes"
|
|
|
|
|
"k8s.io/kubernetes/test/e2e/chaosmonkey"
|
|
|
|
|
"k8s.io/kubernetes/test/e2e/framework"
|
|
|
|
|
"k8s.io/kubernetes/test/e2e/framework/config"
|
|
|
|
|
"k8s.io/kubernetes/test/e2e/framework/ginkgowrapper"
|
|
|
|
|
e2elifecycle "k8s.io/kubernetes/test/e2e/framework/lifecycle"
|
|
|
|
|
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
|
|
|
|
"k8s.io/kubernetes/test/e2e/upgrades"
|
|
|
|
|
apps "k8s.io/kubernetes/test/e2e/upgrades/apps"
|
|
|
|
|
"k8s.io/kubernetes/test/e2e/upgrades/storage"
|
|
|
|
@@ -104,7 +109,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
|
|
|
|
|
defer finalizeUpgradeTest(start, masterUpgradeTest)
|
|
|
|
|
target := upgCtx.Versions[1].Version.String()
|
|
|
|
|
framework.ExpectNoError(framework.MasterUpgrade(f, target))
|
|
|
|
|
framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
|
|
|
|
|
}
|
|
|
|
|
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc)
|
|
|
|
|
})
|
|
|
|
@@ -126,7 +131,7 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
|
|
|
|
|
defer finalizeUpgradeTest(start, nodeUpgradeTest)
|
|
|
|
|
target := upgCtx.Versions[1].Version.String()
|
|
|
|
|
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
|
|
|
|
|
framework.ExpectNoError(e2elifecycle.CheckNodesVersions(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
|
|
|
|
|
}
|
|
|
|
|
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgrades.NodeUpgrade, upgradeFunc)
|
|
|
|
|
})
|
|
|
|
@@ -145,9 +150,9 @@ var _ = SIGDescribe("Upgrade [Feature:Upgrade]", func() {
|
|
|
|
|
defer finalizeUpgradeTest(start, clusterUpgradeTest)
|
|
|
|
|
target := upgCtx.Versions[1].Version.String()
|
|
|
|
|
framework.ExpectNoError(framework.MasterUpgrade(f, target))
|
|
|
|
|
framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
|
|
|
|
|
framework.ExpectNoError(e2elifecycle.CheckNodesVersions(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
|
|
|
|
|
}
|
|
|
|
|
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
|
|
|
|
|
})
|
|
|
|
@@ -176,9 +181,9 @@ var _ = SIGDescribe("Downgrade [Feature:Downgrade]", func() {
|
|
|
|
|
// Yes this really is a downgrade. And nodes must downgrade first.
|
|
|
|
|
target := upgCtx.Versions[1].Version.String()
|
|
|
|
|
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
|
|
|
|
|
framework.ExpectNoError(e2elifecycle.CheckNodesVersions(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(framework.MasterUpgrade(f, target))
|
|
|
|
|
framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
|
|
|
|
|
}
|
|
|
|
|
runUpgradeSuite(f, upgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
|
|
|
|
|
})
|
|
|
|
@@ -226,7 +231,7 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
|
|
|
|
|
defer finalizeUpgradeTest(start, gpuUpgradeTest)
|
|
|
|
|
target := upgCtx.Versions[1].Version.String()
|
|
|
|
|
framework.ExpectNoError(framework.MasterUpgrade(f, target))
|
|
|
|
|
framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
|
|
|
|
|
}
|
|
|
|
|
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgrades.MasterUpgrade, upgradeFunc)
|
|
|
|
|
})
|
|
|
|
@@ -244,9 +249,9 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
|
|
|
|
|
defer finalizeUpgradeTest(start, gpuUpgradeTest)
|
|
|
|
|
target := upgCtx.Versions[1].Version.String()
|
|
|
|
|
framework.ExpectNoError(framework.MasterUpgrade(f, target))
|
|
|
|
|
framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
|
|
|
|
|
framework.ExpectNoError(e2elifecycle.CheckNodesVersions(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
|
|
|
|
|
}
|
|
|
|
|
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
|
|
|
|
|
})
|
|
|
|
@@ -264,9 +269,9 @@ var _ = SIGDescribe("gpu Upgrade [Feature:GPUUpgrade]", func() {
|
|
|
|
|
defer finalizeUpgradeTest(start, gpuDowngradeTest)
|
|
|
|
|
target := upgCtx.Versions[1].Version.String()
|
|
|
|
|
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
|
|
|
|
|
framework.ExpectNoError(e2elifecycle.CheckNodesVersions(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(framework.MasterUpgrade(f, target))
|
|
|
|
|
framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
|
|
|
|
|
}
|
|
|
|
|
runUpgradeSuite(f, gpuUpgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
|
|
|
|
|
})
|
|
|
|
@@ -292,9 +297,9 @@ var _ = ginkgo.Describe("[sig-apps] stateful Upgrade [Feature:StatefulUpgrade]",
|
|
|
|
|
defer finalizeUpgradeTest(start, statefulUpgradeTest)
|
|
|
|
|
target := upgCtx.Versions[1].Version.String()
|
|
|
|
|
framework.ExpectNoError(framework.MasterUpgrade(f, target))
|
|
|
|
|
framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(framework.NodeUpgrade(f, target, *upgradeImage))
|
|
|
|
|
framework.ExpectNoError(e2elifecycle.CheckNodesVersions(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
|
|
|
|
|
}
|
|
|
|
|
runUpgradeSuite(f, statefulsetUpgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
|
|
|
|
|
})
|
|
|
|
@@ -327,9 +332,9 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
|
|
|
|
|
defer finalizeUpgradeTest(start, kubeProxyUpgradeTest)
|
|
|
|
|
target := upgCtx.Versions[1].Version.String()
|
|
|
|
|
framework.ExpectNoError(framework.MasterUpgradeGCEWithKubeProxyDaemonSet(target, true))
|
|
|
|
|
framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, *upgradeImage, true))
|
|
|
|
|
framework.ExpectNoError(e2elifecycle.CheckNodesVersions(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
|
|
|
|
|
}
|
|
|
|
|
runUpgradeSuite(f, kubeProxyUpgradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
|
|
|
|
|
})
|
|
|
|
@@ -355,9 +360,9 @@ var _ = SIGDescribe("kube-proxy migration [Feature:KubeProxyDaemonSetMigration]"
|
|
|
|
|
// Yes this really is a downgrade. And nodes must downgrade first.
|
|
|
|
|
target := upgCtx.Versions[1].Version.String()
|
|
|
|
|
framework.ExpectNoError(framework.NodeUpgradeGCEWithKubeProxyDaemonSet(f, target, *upgradeImage, false))
|
|
|
|
|
framework.ExpectNoError(e2elifecycle.CheckNodesVersions(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(checkNodesVersions(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(framework.MasterUpgradeGCEWithKubeProxyDaemonSet(target, false))
|
|
|
|
|
framework.ExpectNoError(e2elifecycle.CheckMasterVersion(f.ClientSet, target))
|
|
|
|
|
framework.ExpectNoError(checkMasterVersion(f.ClientSet, target))
|
|
|
|
|
}
|
|
|
|
|
runUpgradeSuite(f, kubeProxyDowngradeTests, testFrameworks, testSuite, upgrades.ClusterUpgrade, upgradeFunc)
|
|
|
|
|
})
|
|
|
|
@@ -485,7 +490,7 @@ func getUpgradeContext(c discovery.DiscoveryInterface, upgradeTarget string) (*u
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
curVer, err := version.ParseSemantic(current.String())
|
|
|
|
|
curVer, err := utilversion.ParseSemantic(current.String())
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
@@ -503,12 +508,12 @@ func getUpgradeContext(c discovery.DiscoveryInterface, upgradeTarget string) (*u
|
|
|
|
|
return upgCtx, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
next, err := e2elifecycle.RealVersion(upgradeTarget)
|
|
|
|
|
next, err := realVersion(upgradeTarget)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
nextVer, err := version.ParseSemantic(next)
|
|
|
|
|
nextVer, err := utilversion.ParseSemantic(next)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return nil, err
|
|
|
|
|
}
|
|
|
|
@@ -520,3 +525,82 @@ func getUpgradeContext(c discovery.DiscoveryInterface, upgradeTarget string) (*u
|
|
|
|
|
|
|
|
|
|
return upgCtx, nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// realVersion turns a version constants into a version string deployable on
|
|
|
|
|
// GKE. See hack/get-build.sh for more information.
|
|
|
|
|
func realVersion(s string) (string, error) {
|
|
|
|
|
framework.Logf("Getting real version for %q", s)
|
|
|
|
|
v, _, err := framework.RunCmd(path.Join(framework.TestContext.RepoRoot, "hack/get-build.sh"), "-v", s)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return v, fmt.Errorf("error getting real version for %q: %v", s, err)
|
|
|
|
|
}
|
|
|
|
|
framework.Logf("Version for %q is %q", s, v)
|
|
|
|
|
return strings.TrimPrefix(strings.TrimSpace(v), "v"), nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func traceRouteToMaster() {
|
|
|
|
|
traceroute, err := exec.LookPath("traceroute")
|
|
|
|
|
if err != nil {
|
|
|
|
|
framework.Logf("Could not find traceroute program")
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
cmd := exec.Command(traceroute, "-I", framework.GetMasterHost())
|
|
|
|
|
out, err := cmd.Output()
|
|
|
|
|
if len(out) != 0 {
|
|
|
|
|
framework.Logf(string(out))
|
|
|
|
|
}
|
|
|
|
|
if exiterr, ok := err.(*exec.ExitError); err != nil && ok {
|
|
|
|
|
framework.Logf("Error while running traceroute: %s", exiterr.Stderr)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// checkMasterVersion validates the master version
|
|
|
|
|
func checkMasterVersion(c clientset.Interface, want string) error {
|
|
|
|
|
framework.Logf("Checking master version")
|
|
|
|
|
var err error
|
|
|
|
|
var v *version.Info
|
|
|
|
|
waitErr := wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) {
|
|
|
|
|
v, err = c.Discovery().ServerVersion()
|
|
|
|
|
if err != nil {
|
|
|
|
|
traceRouteToMaster()
|
|
|
|
|
return false, nil
|
|
|
|
|
}
|
|
|
|
|
return true, nil
|
|
|
|
|
})
|
|
|
|
|
if waitErr != nil {
|
|
|
|
|
return fmt.Errorf("CheckMasterVersion() couldn't get the master version: %v", err)
|
|
|
|
|
}
|
|
|
|
|
// We do prefix trimming and then matching because:
|
|
|
|
|
// want looks like: 0.19.3-815-g50e67d4
|
|
|
|
|
// got looks like: v0.19.3-815-g50e67d4034e858-dirty
|
|
|
|
|
got := strings.TrimPrefix(v.GitVersion, "v")
|
|
|
|
|
if !strings.HasPrefix(got, want) {
|
|
|
|
|
return fmt.Errorf("master had kube-apiserver version %s which does not start with %s", got, want)
|
|
|
|
|
}
|
|
|
|
|
framework.Logf("Master is at version %s", want)
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// checkNodesVersions validates the nodes versions
|
|
|
|
|
func checkNodesVersions(cs clientset.Interface, want string) error {
|
|
|
|
|
l, err := e2enode.GetReadySchedulableNodes(cs)
|
|
|
|
|
if err != nil {
|
|
|
|
|
return err
|
|
|
|
|
}
|
|
|
|
|
for _, n := range l.Items {
|
|
|
|
|
// We do prefix trimming and then matching because:
|
|
|
|
|
// want looks like: 0.19.3-815-g50e67d4
|
|
|
|
|
// kv/kvp look like: v0.19.3-815-g50e67d4034e858-dirty
|
|
|
|
|
kv, kpv := strings.TrimPrefix(n.Status.NodeInfo.KubeletVersion, "v"),
|
|
|
|
|
strings.TrimPrefix(n.Status.NodeInfo.KubeProxyVersion, "v")
|
|
|
|
|
if !strings.HasPrefix(kv, want) {
|
|
|
|
|
return fmt.Errorf("node %s had kubelet version %s which does not start with %s",
|
|
|
|
|
n.ObjectMeta.Name, kv, want)
|
|
|
|
|
}
|
|
|
|
|
if !strings.HasPrefix(kpv, want) {
|
|
|
|
|
return fmt.Errorf("node %s had kube-proxy version %s which does not start with %s",
|
|
|
|
|
n.ObjectMeta.Name, kpv, want)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|