Removed all pause image constant strings, now the pause image is chosen by arch. Part of the effort of making e2e arch-agnostic

This commit is contained in:
Lucas Käldström 2016-05-26 19:16:43 +03:00
parent 965e8dce12
commit 79ca1911e1
26 changed files with 121 additions and 111 deletions

View File

@ -785,7 +785,7 @@ func runSchedulerNoPhantomPodsTest(client *client.Client) {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "c1", Name: "c1",
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: e2e.GetPauseImageName(client),
Ports: []api.ContainerPort{ Ports: []api.ContainerPort{
{ContainerPort: 1234, HostPort: 9999}, {ContainerPort: 1234, HostPort: 9999},
}, },
@ -795,7 +795,7 @@ func runSchedulerNoPhantomPodsTest(client *client.Client) {
}, },
} }
// Assuming we only have two kublets, the third pod here won't schedule // Assuming we only have two kubelets, the third pod here won't schedule
// if the scheduler doesn't correctly handle the delete for the second // if the scheduler doesn't correctly handle the delete for the second
// pod. // pod.
pod.ObjectMeta.Name = "phantom.foo" pod.ObjectMeta.Name = "phantom.foo"

View File

@ -39,6 +39,7 @@ const (
defaultRootDir = "/var/lib/kubelet" defaultRootDir = "/var/lib/kubelet"
experimentalFlannelOverlay = false experimentalFlannelOverlay = false
// When these values are updated, also update test/e2e/util.go
defaultPodInfraContainerImageName = "gcr.io/google_containers/pause" defaultPodInfraContainerImageName = "gcr.io/google_containers/pause"
defaultPodInfraContainerImageVersion = "3.0" defaultPodInfraContainerImageVersion = "3.0"
) )

View File

@ -19,8 +19,11 @@
# `make container` will build a container-- you must supply a tag. # `make container` will build a container-- you must supply a tag.
# `make push` will push the container-- you must supply a tag. # `make push` will push the container-- you must supply a tag.
GOARCH?=$(shell go env GOARCH)
GOOS?=$(shell go env GOOS)
kubectl: kubectl:
KUBE_STATIC_OVERRIDES="kubectl" ../../hack/build-go.sh cmd/kubectl; cp ../../_output/local/bin/linux/amd64/kubectl . KUBE_STATIC_OVERRIDES="kubectl" ../../hack/build-go.sh cmd/kubectl; cp ../../_output/local/bin/$(GOOS)/$(GOARCH)/kubectl .
.tag: kubectl .tag: kubectl
./kubectl version -c | grep -o 'GitVersion:"[^"]*"' | cut -f 2 -d '"' > .tag ./kubectl version -c | grep -o 'GitVersion:"[^"]*"' | cut -f 2 -d '"' > .tag

View File

@ -32,6 +32,7 @@ import (
"k8s.io/kubernetes/plugin/pkg/scheduler" "k8s.io/kubernetes/plugin/pkg/scheduler"
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider" _ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
"k8s.io/kubernetes/plugin/pkg/scheduler/factory" "k8s.io/kubernetes/plugin/pkg/scheduler/factory"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/integration/framework" "k8s.io/kubernetes/test/integration/framework"
) )
@ -113,7 +114,7 @@ func makePodSpec() api.PodSpec {
return api.PodSpec{ return api.PodSpec{
Containers: []api.Container{{ Containers: []api.Container{{
Name: "pause", Name: "pause",
Image: "gcr.io/google_containers/pause:1.0", Image: e2e.GetPauseImageNameForHostArch(),
Ports: []api.ContainerPort{{ContainerPort: 80}}, Ports: []api.ContainerPort{{ContainerPort: 80}},
Resources: api.ResourceRequirements{ Resources: api.ResourceRequirements{
Limits: api.ResourceList{ Limits: api.ResourceList{

View File

@ -86,7 +86,7 @@ func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectR
Name: id, Name: id,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Timeout: scaleTimeout, Timeout: scaleTimeout,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
Replicas: replicas, Replicas: replicas,
HostPorts: map[string]int{"port1": 4321}, HostPorts: map[string]int{"port1": 4321},
} }
@ -105,7 +105,7 @@ func ReserveCpu(f *framework.Framework, id string, replicas, millicores int) {
Name: id, Name: id,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Timeout: scaleTimeout, Timeout: scaleTimeout,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
Replicas: replicas, Replicas: replicas,
CpuRequest: request, CpuRequest: request,
} }
@ -120,7 +120,7 @@ func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
Name: id, Name: id,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Timeout: scaleTimeout, Timeout: scaleTimeout,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
Replicas: replicas, Replicas: replicas,
MemRequest: request, MemRequest: request,
} }

View File

@ -209,7 +209,7 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
Client: f.Client, Client: f.Client,
Name: rcName, Name: rcName,
Namespace: ns, Namespace: ns,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
Replicas: numPods, Replicas: numPods,
CreatedPods: &[]*api.Pod{}, CreatedPods: &[]*api.Pod{},
} }

View File

@ -294,7 +294,7 @@ var _ = framework.KubeDescribe("Density", func() {
for i := 0; i < numberOrRCs; i++ { for i := 0; i < numberOrRCs; i++ {
RCName = "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid RCName = "density" + strconv.Itoa(totalPods) + "-" + strconv.Itoa(i) + "-" + uuid
RCConfigs[i] = framework.RCConfig{Client: c, RCConfigs[i] = framework.RCConfig{Client: c,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
Name: RCName, Name: RCName,
Namespace: ns, Namespace: ns,
Labels: map[string]string{"type": "densityPod"}, Labels: map[string]string{"type": "densityPod"},
@ -522,7 +522,7 @@ var _ = framework.KubeDescribe("Density", func() {
} }
for i := 1; i <= nodeCount; i++ { for i := 1; i <= nodeCount; i++ {
name := additionalPodsPrefix + "-" + strconv.Itoa(i) name := additionalPodsPrefix + "-" + strconv.Itoa(i)
go createRunningPodFromRC(&wg, c, name, ns, "gcr.io/google_containers/pause-amd64:3.0", additionalPodsPrefix, cpuRequest, memRequest) go createRunningPodFromRC(&wg, c, name, ns, framework.GetPauseImageName(f.Client), additionalPodsPrefix, cpuRequest, memRequest)
time.Sleep(200 * time.Millisecond) time.Sleep(200 * time.Millisecond)
} }
wg.Wait() wg.Wait()

View File

@ -44,7 +44,7 @@ var _ = framework.KubeDescribe("Etcd failure [Disruptive]", func() {
Client: f.Client, Client: f.Client,
Name: "baz", Name: "baz",
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
Replicas: 1, Replicas: 1,
})).NotTo(HaveOccurred()) })).NotTo(HaveOccurred())
}) })

View File

@ -30,6 +30,7 @@ import (
"os/exec" "os/exec"
"path" "path"
"path/filepath" "path/filepath"
goRuntime "runtime"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -128,12 +129,42 @@ const (
// How long claims have to become dynamically provisioned // How long claims have to become dynamically provisioned
ClaimProvisionTimeout = 5 * time.Minute ClaimProvisionTimeout = 5 * time.Minute
// When these values are updated, also update cmd/kubelet/app/options/options.go
currentPodInfraContainerImageName = "gcr.io/google_containers/pause"
currentPodInfraContainerImageVersion = "3.0"
) )
// Label allocated to the image puller static pod that runs on each node // Label allocated to the image puller static pod that runs on each node
// before e2es. // before e2es.
var ImagePullerLabels = map[string]string{"name": "e2e-image-puller"} var ImagePullerLabels = map[string]string{"name": "e2e-image-puller"}
// GetServerArchitecture fetches the architecture of the target cluster.
func GetServerArchitecture(c *client.Client) string {
arch := ""
sVer, err := c.Discovery().ServerVersion()
if err != nil || sVer.Platform == "" {
// If we failed to get the server version for some reason, default to amd64.
arch = "amd64"
} else {
// Split the platform string into OS and Arch separately.
// The platform string may for example be "linux/amd64", "linux/arm" or "windows/amd64".
osArchArray := strings.Split(sVer.Platform, "/")
arch = osArchArray[1]
}
return arch
}
// GetPauseImageName fetches the architecture of the target cluster and chooses the pause image to use.
func GetPauseImageName(c *client.Client) string {
return currentPodInfraContainerImageName + "-" + GetServerArchitecture(c) + ":" + currentPodInfraContainerImageVersion
}
// GetPauseImageNameForHostArch() fetches the pause image for the same architecture the machine is running on.
func GetPauseImageNameForHostArch() string {
return currentPodInfraContainerImageName + "-" + goRuntime.GOARCH + ":" + currentPodInfraContainerImageVersion
}
// SubResource proxy should have been functional in v1.0.0, but SubResource // SubResource proxy should have been functional in v1.0.0, but SubResource
// proxy via tunneling is known to be broken in v1.0. See // proxy via tunneling is known to be broken in v1.0. See
// https://github.com/kubernetes/kubernetes/pull/15224#issuecomment-146769463 // https://github.com/kubernetes/kubernetes/pull/15224#issuecomment-146769463

View File

@ -1429,37 +1429,6 @@ func streamingUpload(file *os.File, fileName string, postBodyWriter *multipart.W
} }
} }
var binPrefixes = []string{
"_output/dockerized/bin",
"_output/local/bin",
"platforms",
}
// findBinary searches through likely paths to find the specified binary. It
// takes the one that has been built most recently. Platform should be
// specified as '<os>/<arch>'. For example: 'linux/amd64'.
func findBinary(binName string, platform string) (string, error) {
var binTime time.Time
var binPath string
for _, pre := range binPrefixes {
tryPath := path.Join(framework.TestContext.RepoRoot, pre, platform, binName)
fi, err := os.Stat(tryPath)
if err != nil {
continue
}
if fi.ModTime().After(binTime) {
binPath = tryPath
binTime = fi.ModTime()
}
}
if len(binPath) > 0 {
return binPath, nil
}
return binPath, fmt.Errorf("Could not find %v for %v", binName, platform)
}
func startLocalProxy() (srv *httptest.Server, logs *bytes.Buffer) { func startLocalProxy() (srv *httptest.Server, logs *bytes.Buffer) {
logs = &bytes.Buffer{} logs = &bytes.Buffer{}
p := goproxy.NewProxyHttpServer() p := goproxy.NewProxyHttpServer()

View File

@ -128,7 +128,7 @@ var _ = framework.KubeDescribe("kubelet", func() {
Client: f.Client, Client: f.Client,
Name: rcName, Name: rcName,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
Replicas: totalPods, Replicas: totalPods,
})).NotTo(HaveOccurred()) })).NotTo(HaveOccurred())
// Perform a sanity check so that we know all desired pods are // Perform a sanity check so that we know all desired pods are

View File

@ -69,7 +69,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
Client: f.Client, Client: f.Client,
Name: rcName, Name: rcName,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
Replicas: totalPods, Replicas: totalPods,
})).NotTo(HaveOccurred()) })).NotTo(HaveOccurred())

View File

@ -53,7 +53,7 @@ var _ = framework.KubeDescribe("LimitRange", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a Pod with no resource requirements") By("Creating a Pod with no resource requirements")
pod := newTestPod("pod-no-resources", api.ResourceList{}, api.ResourceList{}) pod := newTestPod(f, "pod-no-resources", api.ResourceList{}, api.ResourceList{})
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -70,7 +70,7 @@ var _ = framework.KubeDescribe("LimitRange", func() {
} }
By("Creating a Pod with partial resource requirements") By("Creating a Pod with partial resource requirements")
pod = newTestPod("pod-partial-resources", getResourceList("", "150Mi"), getResourceList("300m", "")) pod = newTestPod(f, "pod-partial-resources", getResourceList("", "150Mi"), getResourceList("300m", ""))
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -91,12 +91,12 @@ var _ = framework.KubeDescribe("LimitRange", func() {
} }
By("Failing to create a Pod with less than min resources") By("Failing to create a Pod with less than min resources")
pod = newTestPod(podName, getResourceList("10m", "50Mi"), api.ResourceList{}) pod = newTestPod(f, podName, getResourceList("10m", "50Mi"), api.ResourceList{})
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
By("Failing to create a Pod with more than max resources") By("Failing to create a Pod with more than max resources")
pod = newTestPod(podName, getResourceList("600m", "600Mi"), api.ResourceList{}) pod = newTestPod(f, podName, getResourceList("600m", "600Mi"), api.ResourceList{})
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
}) })
@ -167,7 +167,7 @@ func newLimitRange(name string, limitType api.LimitType,
} }
// newTestPod returns a pod that has the specified requests and limits // newTestPod returns a pod that has the specified requests and limits
func newTestPod(name string, requests api.ResourceList, limits api.ResourceList) *api.Pod { func newTestPod(f *framework.Framework, name string, requests api.ResourceList, limits api.ResourceList) *api.Pod {
return &api.Pod{ return &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: name, Name: name,
@ -175,8 +175,8 @@ func newTestPod(name string, requests api.ResourceList, limits api.ResourceList)
Spec: api.PodSpec{ Spec: api.PodSpec{
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "nginx", Name: "pause",
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
Resources: api.ResourceRequirements{ Resources: api.ResourceRequirements{
Requests: requests, Requests: requests,
Limits: limits, Limits: limits,

View File

@ -94,7 +94,7 @@ var _ = framework.KubeDescribe("Mesos", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: "beta.gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },

View File

@ -97,7 +97,7 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "nginx", Name: "nginx",
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },

View File

@ -179,7 +179,7 @@ func createOutOfDiskPod(c *client.Client, ns, name string, milliCPU int64) {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "pause", Name: "pause",
Image: "beta.gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(c),
Resources: api.ResourceRequirements{ Resources: api.ResourceRequirements{
Requests: api.ResourceList{ Requests: api.ResourceList{
// Request enough CPU to fit only two pods on a given node. // Request enough CPU to fit only two pods on a given node.

View File

@ -219,7 +219,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "test", Name: "test",
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -244,7 +244,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "nginx", Name: "nginx",
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
Resources: api.ResourceRequirements{ Resources: api.ResourceRequirements{
Limits: api.ResourceList{ Limits: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
@ -754,7 +754,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "run1", Name: "run1",
Image: "gcr.io/google_containers/pause:2.0", Image: framework.GetPauseImageName(f.Client),
Resources: api.ResourceRequirements{ Resources: api.ResourceRequirements{
Limits: api.ResourceList{ Limits: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
@ -823,7 +823,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "run1", Name: "run1",
Image: "gcr.io/google_containers/pause:2.0", Image: framework.GetPauseImageName(f.Client),
Resources: api.ResourceRequirements{ Resources: api.ResourceRequirements{
Limits: api.ResourceList{ Limits: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),

View File

@ -328,7 +328,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
requests := api.ResourceList{} requests := api.ResourceList{}
requests[api.ResourceCPU] = resource.MustParse("500m") requests[api.ResourceCPU] = resource.MustParse("500m")
requests[api.ResourceMemory] = resource.MustParse("252Mi") requests[api.ResourceMemory] = resource.MustParse("252Mi")
pod := newTestPodForQuota(podName, requests, api.ResourceList{}) pod := newTestPodForQuota(f, podName, requests, api.ResourceList{})
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
podToUpdate := pod podToUpdate := pod
@ -345,7 +345,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
requests = api.ResourceList{} requests = api.ResourceList{}
requests[api.ResourceCPU] = resource.MustParse("600m") requests[api.ResourceCPU] = resource.MustParse("600m")
requests[api.ResourceMemory] = resource.MustParse("100Mi") requests[api.ResourceMemory] = resource.MustParse("100Mi")
pod = newTestPodForQuota("fail-pod", requests, api.ResourceList{}) pod = newTestPodForQuota(f, "fail-pod", requests, api.ResourceList{})
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
@ -509,7 +509,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
limits := api.ResourceList{} limits := api.ResourceList{}
limits[api.ResourceCPU] = resource.MustParse("1") limits[api.ResourceCPU] = resource.MustParse("1")
limits[api.ResourceMemory] = resource.MustParse("400Mi") limits[api.ResourceMemory] = resource.MustParse("400Mi")
pod := newTestPodForQuota(podName, requests, limits) pod := newTestPodForQuota(f, podName, requests, limits)
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -546,7 +546,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
By("Creating a terminating pod") By("Creating a terminating pod")
podName = "terminating-pod" podName = "terminating-pod"
pod = newTestPodForQuota(podName, requests, limits) pod = newTestPodForQuota(f, podName, requests, limits)
activeDeadlineSeconds := int64(3600) activeDeadlineSeconds := int64(3600)
pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds pod.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
@ -604,7 +604,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a best-effort pod") By("Creating a best-effort pod")
pod := newTestPodForQuota(podName, api.ResourceList{}, api.ResourceList{}) pod := newTestPodForQuota(f, podName, api.ResourceList{}, api.ResourceList{})
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -634,7 +634,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
limits := api.ResourceList{} limits := api.ResourceList{}
limits[api.ResourceCPU] = resource.MustParse("1") limits[api.ResourceCPU] = resource.MustParse("1")
limits[api.ResourceMemory] = resource.MustParse("400Mi") limits[api.ResourceMemory] = resource.MustParse("400Mi")
pod = newTestPodForQuota("burstable-pod", requests, limits) pod = newTestPodForQuota(f, "burstable-pod", requests, limits)
pod, err = f.Client.Pods(f.Namespace.Name).Create(pod) pod, err = f.Client.Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -697,7 +697,7 @@ func newTestResourceQuota(name string) *api.ResourceQuota {
} }
// newTestPodForQuota returns a pod that has the specified requests and limits // newTestPodForQuota returns a pod that has the specified requests and limits
func newTestPodForQuota(name string, requests api.ResourceList, limits api.ResourceList) *api.Pod { func newTestPodForQuota(f *framework.Framework, name string, requests api.ResourceList, limits api.ResourceList) *api.Pod {
return &api.Pod{ return &api.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: api.ObjectMeta{
Name: name, Name: name,
@ -705,8 +705,8 @@ func newTestPodForQuota(name string, requests api.ResourceList, limits api.Resou
Spec: api.PodSpec{ Spec: api.PodSpec{
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "nginx", Name: "pause",
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
Resources: api.ResourceRequirements{ Resources: api.ResourceRequirements{
Requests: requests, Requests: requests,
Limits: limits, Limits: limits,

View File

@ -238,7 +238,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "", Name: "",
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -257,7 +257,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -315,7 +315,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "", Name: "",
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
Resources: api.ResourceRequirements{ Resources: api.ResourceRequirements{
Limits: api.ResourceList{ Limits: api.ResourceList{
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"), "cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
@ -342,7 +342,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
Resources: api.ResourceRequirements{ Resources: api.ResourceRequirements{
Limits: api.ResourceList{ Limits: api.ResourceList{
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"), "cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
@ -382,7 +382,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
NodeSelector: map[string]string{ NodeSelector: map[string]string{
@ -425,7 +425,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -461,7 +461,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -499,7 +499,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: labelPodName, Name: labelPodName,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
NodeSelector: map[string]string{ NodeSelector: map[string]string{
@ -563,7 +563,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -598,7 +598,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -654,7 +654,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: labelPodName, Name: labelPodName,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -692,7 +692,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -771,7 +771,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: "gcr.io/google_containers/pause:2.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -822,7 +822,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: "gcr.io/google_containers/pause:2.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -857,7 +857,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: "gcr.io/google_containers/pause:2.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -910,7 +910,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: labelPodName, Name: labelPodName,
Image: "gcr.io/google_containers/pause:2.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -949,7 +949,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: "gcr.io/google_containers/pause:2.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -1002,7 +1002,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: labelPodName, Name: labelPodName,
Image: "gcr.io/google_containers/pause:2.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -1037,7 +1037,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: "gcr.io/google_containers/pause:2.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -1098,7 +1098,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: labelPodName, Name: labelPodName,
Image: "gcr.io/google_containers/pause:2.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -1137,7 +1137,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: "gcr.io/google_containers/pause:2.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -1201,7 +1201,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: labelPodName, Name: labelPodName,
Image: "gcr.io/google_containers/pause:2.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -1240,7 +1240,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: "gcr.io/google_containers/pause:2.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -1304,7 +1304,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -1369,7 +1369,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: tolerationPodName, Name: tolerationPodName,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -1426,7 +1426,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podName, Name: podName,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },
@ -1481,7 +1481,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: podNameNoTolerations, Name: podNameNoTolerations,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },

View File

@ -1138,8 +1138,8 @@ func createPodOrFail(c *client.Client, ns, name string, labels map[string]string
Spec: api.PodSpec{ Spec: api.PodSpec{
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "test", Name: "pause",
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(c),
Ports: containerPorts, Ports: containerPorts,
// Add a dummy environment variable to work around a docker issue. // Add a dummy environment variable to work around a docker issue.
// https://github.com/docker/docker/issues/14203 // https://github.com/docker/docker/issues/14203

View File

@ -118,7 +118,7 @@ var _ = framework.KubeDescribe("Service endpoints latency", func() {
func runServiceLatencies(f *framework.Framework, inParallel, total int) (output []time.Duration, err error) { func runServiceLatencies(f *framework.Framework, inParallel, total int) (output []time.Duration, err error) {
cfg := framework.RCConfig{ cfg := framework.RCConfig{
Client: f.Client, Client: f.Client,
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
Name: "svc-latency-rc", Name: "svc-latency-rc",
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Replicas: 1, Replicas: 1,

View File

@ -88,7 +88,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "test", Name: "test",
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: framework.GetPauseImageName(f.Client),
}, },
}, },
}, },

View File

@ -21,6 +21,8 @@ import (
"time" "time"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/test/e2e/framework"
) )
const ( const (
@ -45,7 +47,7 @@ var ImageRegistry = map[int]string{
hostExecImage: "gcr.io/google_containers/hostexec:1.2", hostExecImage: "gcr.io/google_containers/hostexec:1.2",
netExecImage: "gcr.io/google_containers/netexec:1.4", netExecImage: "gcr.io/google_containers/netexec:1.4",
nginxImage: "gcr.io/google_containers/nginx:1.7.9", nginxImage: "gcr.io/google_containers/nginx:1.7.9",
pauseImage: "gcr.io/google_containers/pause-amd64:3.0", pauseImage: framework.GetPauseImageNameForHostArch(),
} }
// These are used by tests that explicitly test the ability to pull images // These are used by tests that explicitly test the ability to pull images

View File

@ -35,6 +35,7 @@ import (
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/integration/framework" "k8s.io/kubernetes/test/integration/framework"
) )
@ -234,8 +235,8 @@ func TestMultiWatch(t *testing.T) {
}, },
Spec: api.PodSpec{ Spec: api.PodSpec{
Containers: []api.Container{{ Containers: []api.Container{{
Name: "nothing", Name: "pause",
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: e2e.GetPauseImageName(client),
}}, }},
}, },
}) })
@ -341,7 +342,7 @@ func TestMultiWatch(t *testing.T) {
Spec: api.PodSpec{ Spec: api.PodSpec{
Containers: []api.Container{{ Containers: []api.Container{{
Name: "nothing", Name: "nothing",
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: e2e.GetPauseImageName(client),
}}, }},
}, },
}) })
@ -372,7 +373,7 @@ func TestMultiWatch(t *testing.T) {
if err != nil { if err != nil {
panic(fmt.Sprintf("Couldn't get %v: %v", name, err)) panic(fmt.Sprintf("Couldn't get %v: %v", name, err))
} }
pod.Spec.Containers[0].Image = "gcr.io/google_containers/pause-amd64:3.0" pod.Spec.Containers[0].Image = e2e.GetPauseImageName(client)
sentTimes <- timePair{time.Now(), name} sentTimes <- timePair{time.Now(), name}
if _, err := client.Pods(ns).Update(pod); err != nil { if _, err := client.Pods(ns).Update(pod); err != nil {
panic(fmt.Sprintf("Couldn't make %v: %v", name, err)) panic(fmt.Sprintf("Couldn't make %v: %v", name, err))

View File

@ -42,6 +42,7 @@ import (
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider" _ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
"k8s.io/kubernetes/plugin/pkg/scheduler/factory" "k8s.io/kubernetes/plugin/pkg/scheduler/factory"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/integration/framework" "k8s.io/kubernetes/test/integration/framework"
) )
@ -283,7 +284,7 @@ func DoTestPodScheduling(t *testing.T, restClient *client.Client) {
pod := &api.Pod{ pod := &api.Pod{
ObjectMeta: api.ObjectMeta{Name: "extender-test-pod"}, ObjectMeta: api.ObjectMeta{Name: "extender-test-pod"},
Spec: api.PodSpec{ Spec: api.PodSpec{
Containers: []api.Container{{Name: "container", Image: "gcr.io/google_containers/pause-amd64:3.0"}}, Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(restClient)}},
}, },
} }

View File

@ -41,6 +41,7 @@ import (
"k8s.io/kubernetes/plugin/pkg/scheduler" "k8s.io/kubernetes/plugin/pkg/scheduler"
_ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider" _ "k8s.io/kubernetes/plugin/pkg/scheduler/algorithmprovider"
"k8s.io/kubernetes/plugin/pkg/scheduler/factory" "k8s.io/kubernetes/plugin/pkg/scheduler/factory"
e2e "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/integration/framework" "k8s.io/kubernetes/test/integration/framework"
) )
@ -232,7 +233,7 @@ func DoTestUnschedulableNodes(t *testing.T, restClient *client.Client, nodeStore
pod := &api.Pod{ pod := &api.Pod{
ObjectMeta: api.ObjectMeta{Name: "node-scheduling-test-pod"}, ObjectMeta: api.ObjectMeta{Name: "node-scheduling-test-pod"},
Spec: api.PodSpec{ Spec: api.PodSpec{
Containers: []api.Container{{Name: "container", Image: "gcr.io/google_containers/pause-amd64:3.0"}}, Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(restClient)}},
}, },
} }
myPod, err := restClient.Pods(api.NamespaceDefault).Create(pod) myPod, err := restClient.Pods(api.NamespaceDefault).Create(pod)
@ -339,21 +340,21 @@ func TestMultiScheduler(t *testing.T) {
restClient.Nodes().Create(node) restClient.Nodes().Create(node)
// 3. create 3 pods for testing // 3. create 3 pods for testing
podWithNoAnnotation := createPod("pod-with-no-annotation", nil) podWithNoAnnotation := createPod(restClient, "pod-with-no-annotation", nil)
testPodNoAnnotation, err := restClient.Pods(api.NamespaceDefault).Create(podWithNoAnnotation) testPodNoAnnotation, err := restClient.Pods(api.NamespaceDefault).Create(podWithNoAnnotation)
if err != nil { if err != nil {
t.Fatalf("Failed to create pod: %v", err) t.Fatalf("Failed to create pod: %v", err)
} }
schedulerAnnotationFitsDefault := map[string]string{"scheduler.alpha.kubernetes.io/name": "default-scheduler"} schedulerAnnotationFitsDefault := map[string]string{"scheduler.alpha.kubernetes.io/name": "default-scheduler"}
podWithAnnotationFitsDefault := createPod("pod-with-annotation-fits-default", schedulerAnnotationFitsDefault) podWithAnnotationFitsDefault := createPod(restClient, "pod-with-annotation-fits-default", schedulerAnnotationFitsDefault)
testPodWithAnnotationFitsDefault, err := restClient.Pods(api.NamespaceDefault).Create(podWithAnnotationFitsDefault) testPodWithAnnotationFitsDefault, err := restClient.Pods(api.NamespaceDefault).Create(podWithAnnotationFitsDefault)
if err != nil { if err != nil {
t.Fatalf("Failed to create pod: %v", err) t.Fatalf("Failed to create pod: %v", err)
} }
schedulerAnnotationFitsFoo := map[string]string{"scheduler.alpha.kubernetes.io/name": "foo-scheduler"} schedulerAnnotationFitsFoo := map[string]string{"scheduler.alpha.kubernetes.io/name": "foo-scheduler"}
podWithAnnotationFitsFoo := createPod("pod-with-annotation-fits-foo", schedulerAnnotationFitsFoo) podWithAnnotationFitsFoo := createPod(restClient, "pod-with-annotation-fits-foo", schedulerAnnotationFitsFoo)
testPodWithAnnotationFitsFoo, err := restClient.Pods(api.NamespaceDefault).Create(podWithAnnotationFitsFoo) testPodWithAnnotationFitsFoo, err := restClient.Pods(api.NamespaceDefault).Create(podWithAnnotationFitsFoo)
if err != nil { if err != nil {
t.Fatalf("Failed to create pod: %v", err) t.Fatalf("Failed to create pod: %v", err)
@ -456,11 +457,11 @@ func TestMultiScheduler(t *testing.T) {
*/ */
} }
func createPod(name string, annotation map[string]string) *api.Pod { func createPod(client *client.Client, name string, annotation map[string]string) *api.Pod {
return &api.Pod{ return &api.Pod{
ObjectMeta: api.ObjectMeta{Name: name, Annotations: annotation}, ObjectMeta: api.ObjectMeta{Name: name, Annotations: annotation},
Spec: api.PodSpec{ Spec: api.PodSpec{
Containers: []api.Container{{Name: "container", Image: "gcr.io/google_containers/pause-amd64:3.0"}}, Containers: []api.Container{{Name: "container", Image: e2e.GetPauseImageName(client)}},
}, },
} }
} }
@ -521,7 +522,7 @@ func TestAllocatable(t *testing.T) {
Containers: []api.Container{ Containers: []api.Container{
{ {
Name: "container", Name: "container",
Image: "gcr.io/google_containers/pause-amd64:3.0", Image: e2e.GetPauseImageName(restClient),
Resources: api.ResourceRequirements{ Resources: api.ResourceRequirements{
Requests: api.ResourceList{ Requests: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI), api.ResourceCPU: *resource.NewMilliQuantity(20, resource.DecimalSI),