This commit is contained in:
Chao Xu 2016-11-18 12:55:17 -08:00
parent 1044aa4500
commit a55c71db4d
102 changed files with 2940 additions and 2836 deletions

View File

@ -25,8 +25,8 @@ import (
"time" "time"
"golang.org/x/crypto/ssh" "golang.org/x/crypto/ssh"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -205,7 +205,7 @@ spec:
const ( const (
addonTestPollInterval = 3 * time.Second addonTestPollInterval = 3 * time.Second
addonTestPollTimeout = 5 * time.Minute addonTestPollTimeout = 5 * time.Minute
defaultNsName = api.NamespaceDefault defaultNsName = v1.NamespaceDefault
addonNsName = "kube-system" addonNsName = "kube-system"
) )

View File

@ -21,8 +21,9 @@ import (
"strconv" "strconv"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -97,7 +98,7 @@ cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can
func newResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores, func newResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer { requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer {
runServiceAndWorkloadForResourceConsumer(f.ClientSet, f.Namespace.Name, name, kind, replicas, cpuLimit, memLimit) runServiceAndWorkloadForResourceConsumer(f.ClientSet, f.InternalClientset, f.Namespace.Name, name, kind, replicas, cpuLimit, memLimit)
rc := &ResourceConsumer{ rc := &ResourceConsumer{
name: name, name: name,
controllerName: name + "-ctrl", controllerName: name + "-ctrl",
@ -303,20 +304,20 @@ func (rc *ResourceConsumer) CleanUp() {
rc.stopCustomMetric <- 0 rc.stopCustomMetric <- 0
// Wait some time to ensure all child goroutines are finished. // Wait some time to ensure all child goroutines are finished.
time.Sleep(10 * time.Second) time.Sleep(10 * time.Second)
framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.ClientSet, rc.framework.Namespace.Name, rc.name)) framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.ClientSet, rc.framework.InternalClientset, rc.framework.Namespace.Name, rc.name))
framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.name, nil)) framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.name, nil))
framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.ClientSet, rc.framework.Namespace.Name, rc.controllerName)) framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.ClientSet, rc.framework.InternalClientset, rc.framework.Namespace.Name, rc.controllerName))
framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.controllerName, nil)) framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.controllerName, nil))
} }
func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) { func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalClient internalclientset.Interface, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) {
By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas)) By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
_, err := c.Core().Services(ns).Create(&api.Service{ _, err := c.Core().Services(ns).Create(&v1.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Ports: []api.ServicePort{{ Ports: []v1.ServicePort{{
Port: port, Port: port,
TargetPort: intstr.FromInt(targetPort), TargetPort: intstr.FromInt(targetPort),
}}, }},
@ -329,16 +330,17 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name, k
framework.ExpectNoError(err) framework.ExpectNoError(err)
rcConfig := testutils.RCConfig{ rcConfig := testutils.RCConfig{
Client: c, Client: c,
Image: resourceConsumerImage, InternalClient: internalClient,
Name: name, Image: resourceConsumerImage,
Namespace: ns, Name: name,
Timeout: timeoutRC, Namespace: ns,
Replicas: replicas, Timeout: timeoutRC,
CpuRequest: cpuLimitMillis, Replicas: replicas,
CpuLimit: cpuLimitMillis, CpuRequest: cpuLimitMillis,
MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes CpuLimit: cpuLimitMillis,
MemLimit: memLimitMb * 1024 * 1024, MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes
MemLimit: memLimitMb * 1024 * 1024,
} }
switch kind { switch kind {
@ -364,12 +366,12 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name, k
By(fmt.Sprintf("Running controller")) By(fmt.Sprintf("Running controller"))
controllerName := name + "-ctrl" controllerName := name + "-ctrl"
_, err = c.Core().Services(ns).Create(&api.Service{ _, err = c.Core().Services(ns).Create(&v1.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: controllerName, Name: controllerName,
}, },
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Ports: []api.ServicePort{{ Ports: []v1.ServicePort{{
Port: port, Port: port,
TargetPort: intstr.FromInt(targetPort), TargetPort: intstr.FromInt(targetPort),
}}, }},
@ -381,7 +383,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name, k
}) })
framework.ExpectNoError(err) framework.ExpectNoError(err)
dnsClusterFirst := api.DNSClusterFirst dnsClusterFirst := v1.DNSClusterFirst
controllerRcConfig := testutils.RCConfig{ controllerRcConfig := testutils.RCConfig{
Client: c, Client: c,
Image: resourceConsumerControllerImage, Image: resourceConsumerControllerImage,

View File

@ -25,8 +25,10 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" batchinternal "k8s.io/kubernetes/pkg/apis/batch"
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -53,7 +55,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
// Simplest case: all pods succeed promptly // Simplest case: all pods succeed promptly
It("should run a job to completion when tasks succeed", func() { It("should run a job to completion when tasks succeed", func() {
By("Creating a job") By("Creating a job")
job := newTestV1Job("succeed", "all-succeed", api.RestartPolicyNever, parallelism, completions) job := newTestV1Job("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions)
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job) job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -72,7 +74,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
// up to 5 minutes between restarts, making test timeouts // up to 5 minutes between restarts, making test timeouts
// due to successive failures too likely with a reasonable // due to successive failures too likely with a reasonable
// test timeout. // test timeout.
job := newTestV1Job("failOnce", "fail-once-local", api.RestartPolicyOnFailure, parallelism, completions) job := newTestV1Job("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions)
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job) job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -90,7 +92,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
// Worst case analysis: 15 failures, each taking 1 minute to // Worst case analysis: 15 failures, each taking 1 minute to
// run due to some slowness, 1 in 2^15 chance of happening, // run due to some slowness, 1 in 2^15 chance of happening,
// causing test flake. Should be very rare. // causing test flake. Should be very rare.
job := newTestV1Job("randomlySucceedOrFail", "rand-non-local", api.RestartPolicyNever, parallelism, completions) job := newTestV1Job("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions)
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job) job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -101,7 +103,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
It("should keep restarting failed pods", func() { It("should keep restarting failed pods", func() {
By("Creating a job") By("Creating a job")
job := newTestV1Job("fail", "all-fail", api.RestartPolicyNever, parallelism, completions) job := newTestV1Job("fail", "all-fail", v1.RestartPolicyNever, parallelism, completions)
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job) job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -119,7 +121,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
startParallelism := int32(1) startParallelism := int32(1)
endParallelism := int32(2) endParallelism := int32(2)
By("Creating a job") By("Creating a job")
job := newTestV1Job("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions) job := newTestV1Job("notTerminate", "scale-up", v1.RestartPolicyNever, startParallelism, completions)
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job) job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -128,7 +130,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scale job up") By("scale job up")
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.ClientSet) scaler, err := kubectl.ScalerFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute) waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute) waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@ -144,7 +146,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
startParallelism := int32(2) startParallelism := int32(2)
endParallelism := int32(1) endParallelism := int32(1)
By("Creating a job") By("Creating a job")
job := newTestV1Job("notTerminate", "scale-down", api.RestartPolicyNever, startParallelism, completions) job := newTestV1Job("notTerminate", "scale-down", v1.RestartPolicyNever, startParallelism, completions)
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job) job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -153,7 +155,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scale job down") By("scale job down")
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.ClientSet) scaler, err := kubectl.ScalerFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute) waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute) waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@ -167,7 +169,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
It("should delete a job", func() { It("should delete a job", func() {
By("Creating a job") By("Creating a job")
job := newTestV1Job("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions) job := newTestV1Job("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions)
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job) job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -176,7 +178,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("delete a job") By("delete a job")
reaper, err := kubectl.ReaperFor(batch.Kind("Job"), f.ClientSet) reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute timeout := 1 * time.Minute
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0)) err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))
@ -190,7 +192,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
It("should fail a job", func() { It("should fail a job", func() {
By("Creating a job") By("Creating a job")
job := newTestV1Job("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions) job := newTestV1Job("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions)
activeDeadlineSeconds := int64(10) activeDeadlineSeconds := int64(10)
job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job) job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
@ -215,34 +217,34 @@ var _ = framework.KubeDescribe("V1Job", func() {
}) })
// newTestV1Job returns a job which does one of several testing behaviors. // newTestV1Job returns a job which does one of several testing behaviors.
func newTestV1Job(behavior, name string, rPol api.RestartPolicy, parallelism, completions int32) *batch.Job { func newTestV1Job(behavior, name string, rPol v1.RestartPolicy, parallelism, completions int32) *batch.Job {
job := &batch.Job{ job := &batch.Job{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: batch.JobSpec{ Spec: batch.JobSpec{
Parallelism: &parallelism, Parallelism: &parallelism,
Completions: &completions, Completions: &completions,
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"somekey": "somevalue"}, Labels: map[string]string{"somekey": "somevalue"},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: rPol, RestartPolicy: rPol,
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: "data", Name: "data",
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{}, EmptyDir: &v1.EmptyDirVolumeSource{},
}, },
}, },
}, },
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "c", Name: "c",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{}, Command: []string{},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
MountPath: "/data", MountPath: "/data",
Name: "data", Name: "data",
@ -289,21 +291,21 @@ func updateV1Job(c clientset.Interface, ns string, job *batch.Job) (*batch.Job,
} }
func deleteV1Job(c clientset.Interface, ns, name string) error { func deleteV1Job(c clientset.Interface, ns, name string) error {
return c.Batch().Jobs(ns).Delete(name, api.NewDeleteOptions(0)) return c.Batch().Jobs(ns).Delete(name, v1.NewDeleteOptions(0))
} }
// Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy. // Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy.
func waitForAllPodsRunningV1(c clientset.Interface, ns, jobName string, parallelism int32) error { func waitForAllPodsRunningV1(c clientset.Interface, ns, jobName string, parallelism int32) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{v1JobSelectorKey: jobName})) label := labels.SelectorFromSet(labels.Set(map[string]string{v1JobSelectorKey: jobName}))
return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) {
options := api.ListOptions{LabelSelector: label} options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options) pods, err := c.Core().Pods(ns).List(options)
if err != nil { if err != nil {
return false, err return false, err
} }
count := int32(0) count := int32(0)
for _, p := range pods.Items { for _, p := range pods.Items {
if p.Status.Phase == api.PodRunning { if p.Status.Phase == v1.PodRunning {
count++ count++
} }
} }
@ -330,7 +332,7 @@ func waitForV1JobFail(c clientset.Interface, ns, jobName string, timeout time.Du
return false, err return false, err
} }
for _, c := range curr.Status.Conditions { for _, c := range curr.Status.Conditions {
if c.Type == batch.JobFailed && c.Status == api.ConditionTrue { if c.Type == batch.JobFailed && c.Status == v1.ConditionTrue {
return true, nil return true, nil
} }
} }

View File

@ -20,8 +20,8 @@ import (
"fmt" "fmt"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -39,7 +39,7 @@ var _ = framework.KubeDescribe("Cadvisor", func() {
func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration) { func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration) {
// It should be OK to list unschedulable Nodes here. // It should be OK to list unschedulable Nodes here.
By("getting list of nodes") By("getting list of nodes")
nodeList, err := c.Core().Nodes().List(api.ListOptions{}) nodeList, err := c.Core().Nodes().List(v1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
var errors []error var errors []error

View File

@ -24,6 +24,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -52,7 +53,7 @@ var _ = framework.KubeDescribe("Cluster level logging using Elasticsearch [Featu
By("Running synthetic logger") By("Running synthetic logger")
createSynthLogger(f, expectedLinesCount) createSynthLogger(f, expectedLinesCount)
defer f.PodClient().Delete(synthLoggerPodName, &api.DeleteOptions{}) defer f.PodClient().Delete(synthLoggerPodName, &v1.DeleteOptions{})
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, synthLoggerPodName, f.Namespace.Name) err = framework.WaitForPodSuccessInNamespace(f.ClientSet, synthLoggerPodName, f.Namespace.Name)
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName)) framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName))
@ -101,7 +102,7 @@ func checkElasticsearchReadiness(f *framework.Framework) error {
// Wait for the Elasticsearch pods to enter the running state. // Wait for the Elasticsearch pods to enter the running state.
By("Checking to make sure the Elasticsearch pods are running") By("Checking to make sure the Elasticsearch pods are running")
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "elasticsearch-logging"})) label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "elasticsearch-logging"}))
options := api.ListOptions{LabelSelector: label} options := v1.ListOptions{LabelSelector: label.String()}
pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options) pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items { for _, pod := range pods.Items {

View File

@ -23,7 +23,7 @@ import (
"strings" "strings"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/json" "k8s.io/kubernetes/pkg/util/json"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -42,7 +42,7 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL", func() {
It("should check that logs from containers are ingested in GCL", func() { It("should check that logs from containers are ingested in GCL", func() {
By("Running synthetic logger") By("Running synthetic logger")
createSynthLogger(f, expectedLinesCount) createSynthLogger(f, expectedLinesCount)
defer f.PodClient().Delete(synthLoggerPodName, &api.DeleteOptions{}) defer f.PodClient().Delete(synthLoggerPodName, &v1.DeleteOptions{})
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, synthLoggerPodName, f.Namespace.Name) err := framework.WaitForPodSuccessInNamespace(f.ClientSet, synthLoggerPodName, f.Namespace.Name)
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName)) framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName))

View File

@ -22,6 +22,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )
@ -41,14 +42,14 @@ const (
) )
func createSynthLogger(f *framework.Framework, linesCount int) { func createSynthLogger(f *framework.Framework, linesCount int) {
f.PodClient().Create(&api.Pod{ f.PodClient().Create(&v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: synthLoggerPodName, Name: synthLoggerPodName,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: api.RestartPolicyOnFailure, RestartPolicy: v1.RestartPolicyOnFailure,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: synthLoggerPodName, Name: synthLoggerPodName,
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
@ -72,7 +73,7 @@ func reportLogsFromFluentdPod(f *framework.Framework) error {
} }
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "fluentd-logging"})) label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "fluentd-logging"}))
options := api.ListOptions{LabelSelector: label} options := v1.ListOptions{LabelSelector: label.String()}
fluentdPods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options) fluentdPods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
for _, fluentdPod := range fluentdPods.Items { for _, fluentdPod := range fluentdPods.Items {

View File

@ -26,8 +26,8 @@ import (
"strings" "strings"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -63,8 +63,8 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items) nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero()) Expect(nodeCount).NotTo(BeZero())
cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU] cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
mem := nodes.Items[0].Status.Capacity[api.ResourceMemory] mem := nodes.Items[0].Status.Capacity[v1.ResourceMemory]
coresPerNode = int((&cpu).MilliValue() / 1000) coresPerNode = int((&cpu).MilliValue() / 1000)
memCapacityMb = int((&mem).Value() / 1024 / 1024) memCapacityMb = int((&mem).Value() / 1024 / 1024)
@ -98,7 +98,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() { It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
By("Creating unschedulable pod") By("Creating unschedulable pod")
ReserveMemory(f, "memory-reservation", 1, memCapacityMb, false) ReserveMemory(f, "memory-reservation", 1, memCapacityMb, false)
defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "memory-reservation") defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
By("Waiting for scale up hoping it won't happen") By("Waiting for scale up hoping it won't happen")
// Verfiy, that the appropreate event was generated. // Verfiy, that the appropreate event was generated.
@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
EventsLoop: EventsLoop:
for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) { for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) {
By("Waiting for NotTriggerScaleUp event") By("Waiting for NotTriggerScaleUp event")
events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(api.ListOptions{}) events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(v1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, e := range events.Items { for _, e := range events.Items {
@ -125,7 +125,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]", func() { It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false) ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false)
defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "memory-reservation") defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
// Verify, that cluster size is increased // Verify, that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
@ -144,7 +144,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).") glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false) ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false)
defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "memory-reservation") defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
// Verify, that cluster size is increased // Verify, that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
@ -166,7 +166,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() { It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
CreateHostPortPods(f, "host-port", nodeCount+2, false) CreateHostPortPods(f, "host-port", nodeCount+2, false)
defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "host-port") defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "host-port")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet, framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout)) func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
@ -218,7 +218,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout)) func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c)) framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "node-selector")) framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "node-selector"))
}) })
It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() { It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
@ -233,7 +233,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool") By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool")
ReserveMemory(f, "memory-reservation", 2, 2*memCapacityMb, false) ReserveMemory(f, "memory-reservation", 2, 2*memCapacityMb, false)
defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "memory-reservation") defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
// Apparently GKE master is restarted couple minutes after the node pool is added // Apparently GKE master is restarted couple minutes after the node pool is added
// reseting all the timers in scale down code. Adding 5 extra minutes to workaround // reseting all the timers in scale down code. Adding 5 extra minutes to workaround
@ -458,14 +458,15 @@ func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nod
By(fmt.Sprintf("Running RC which reserves host port and defines node selector")) By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))
config := &testutils.RCConfig{ config := &testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
Name: "node-selector", InternalClient: f.InternalClientset,
Namespace: f.Namespace.Name, Name: "node-selector",
Timeout: defaultTimeout, Namespace: f.Namespace.Name,
Image: framework.GetPauseImageName(f.ClientSet), Timeout: defaultTimeout,
Replicas: replicas, Image: framework.GetPauseImageName(f.ClientSet),
HostPorts: map[string]int{"port1": 4321}, Replicas: replicas,
NodeSelector: map[string]string{"cluster-autoscaling-test.special-node": "true"}, HostPorts: map[string]int{"port1": 4321},
NodeSelector: map[string]string{"cluster-autoscaling-test.special-node": "true"},
} }
err := framework.RunRC(*config) err := framework.RunRC(*config)
if expectRunning { if expectRunning {
@ -476,13 +477,14 @@ func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nod
func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) { func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
By(fmt.Sprintf("Running RC which reserves host port")) By(fmt.Sprintf("Running RC which reserves host port"))
config := &testutils.RCConfig{ config := &testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
Name: id, InternalClient: f.InternalClientset,
Namespace: f.Namespace.Name, Name: id,
Timeout: defaultTimeout, Namespace: f.Namespace.Name,
Image: framework.GetPauseImageName(f.ClientSet), Timeout: defaultTimeout,
Replicas: replicas, Image: framework.GetPauseImageName(f.ClientSet),
HostPorts: map[string]int{"port1": 4321}, Replicas: replicas,
HostPorts: map[string]int{"port1": 4321},
} }
err := framework.RunRC(*config) err := framework.RunRC(*config)
if expectRunning { if expectRunning {
@ -494,13 +496,14 @@ func ReserveCpu(f *framework.Framework, id string, replicas, millicores int) {
By(fmt.Sprintf("Running RC which reserves %v millicores", millicores)) By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
request := int64(millicores / replicas) request := int64(millicores / replicas)
config := &testutils.RCConfig{ config := &testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
Name: id, InternalClient: f.InternalClientset,
Namespace: f.Namespace.Name, Name: id,
Timeout: defaultTimeout, Namespace: f.Namespace.Name,
Image: framework.GetPauseImageName(f.ClientSet), Timeout: defaultTimeout,
Replicas: replicas, Image: framework.GetPauseImageName(f.ClientSet),
CpuRequest: request, Replicas: replicas,
CpuRequest: request,
} }
framework.ExpectNoError(framework.RunRC(*config)) framework.ExpectNoError(framework.RunRC(*config))
} }
@ -509,13 +512,14 @@ func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes)) By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
request := int64(1024 * 1024 * megabytes / replicas) request := int64(1024 * 1024 * megabytes / replicas)
config := &testutils.RCConfig{ config := &testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
Name: id, InternalClient: f.InternalClientset,
Namespace: f.Namespace.Name, Name: id,
Timeout: defaultTimeout, Namespace: f.Namespace.Name,
Image: framework.GetPauseImageName(f.ClientSet), Timeout: defaultTimeout,
Replicas: replicas, Image: framework.GetPauseImageName(f.ClientSet),
MemRequest: request, Replicas: replicas,
MemRequest: request,
} }
err := framework.RunRC(*config) err := framework.RunRC(*config)
if expectRunning { if expectRunning {
@ -526,9 +530,9 @@ func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
// WaitForClusterSize waits until the cluster size matches the given function. // WaitForClusterSize waits until the cluster size matches the given function.
func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error { func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
nodes, err := c.Core().Nodes().List(api.ListOptions{FieldSelector: fields.Set{ nodes, err := c.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false", "spec.unschedulable": "false",
}.AsSelector()}) }.AsSelector().String()})
if err != nil { if err != nil {
glog.Warningf("Failed to list nodes: %v", err) glog.Warningf("Failed to list nodes: %v", err)
continue continue
@ -536,8 +540,8 @@ func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, time
numNodes := len(nodes.Items) numNodes := len(nodes.Items)
// Filter out not-ready nodes. // Filter out not-ready nodes.
framework.FilterNodes(nodes, func(node api.Node) bool { framework.FilterNodes(nodes, func(node v1.Node) bool {
return framework.IsNodeConditionSetAsExpected(&node, api.NodeReady, true) return framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
}) })
numReady := len(nodes.Items) numReady := len(nodes.Items)
@ -553,7 +557,7 @@ func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, time
func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface) error { func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface) error {
var notready []string var notready []string
for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) { for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) {
pods, err := c.Core().Pods(f.Namespace.Name).List(api.ListOptions{}) pods, err := c.Core().Pods(f.Namespace.Name).List(v1.ListOptions{})
if err != nil { if err != nil {
return fmt.Errorf("failed to get pods: %v", err) return fmt.Errorf("failed to get pods: %v", err)
} }
@ -561,16 +565,16 @@ func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interf
for _, pod := range pods.Items { for _, pod := range pods.Items {
ready := false ready := false
for _, c := range pod.Status.Conditions { for _, c := range pod.Status.Conditions {
if c.Type == api.PodReady && c.Status == api.ConditionTrue { if c.Type == v1.PodReady && c.Status == v1.ConditionTrue {
ready = true ready = true
} }
} }
// Failed pods in this context generally mean that they have been // Failed pods in this context generally mean that they have been
// double scheduled onto a node, but then failed a constraint check. // double scheduled onto a node, but then failed a constraint check.
if pod.Status.Phase == api.PodFailed { if pod.Status.Phase == v1.PodFailed {
glog.Warningf("Pod has failed: %v", pod) glog.Warningf("Pod has failed: %v", pod)
} }
if !ready && pod.Status.Phase != api.PodFailed { if !ready && pod.Status.Phase != v1.PodFailed {
notready = append(notready, pod.Name) notready = append(notready, pod.Name)
} }
} }

View File

@ -21,8 +21,8 @@ import (
"path" "path"
"strings" "strings"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/chaosmonkey" "k8s.io/kubernetes/test/e2e/chaosmonkey"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -152,11 +152,11 @@ func testService(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringD
By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + f.Namespace.Name) By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + f.Namespace.Name)
// TODO it's weird that we have to do this and then wait WaitForLoadBalancer which changes // TODO it's weird that we have to do this and then wait WaitForLoadBalancer which changes
// tcpService. // tcpService.
tcpService := jig.CreateTCPServiceOrFail(f.Namespace.Name, func(s *api.Service) { tcpService := jig.CreateTCPServiceOrFail(f.Namespace.Name, func(s *v1.Service) {
s.Spec.Type = api.ServiceTypeLoadBalancer s.Spec.Type = v1.ServiceTypeLoadBalancer
}) })
tcpService = jig.WaitForLoadBalancerOrFail(f.Namespace.Name, tcpService.Name, loadBalancerCreateTimeoutDefault) tcpService = jig.WaitForLoadBalancerOrFail(f.Namespace.Name, tcpService.Name, loadBalancerCreateTimeoutDefault)
jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer) jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
// Get info to hit it with // Get info to hit it with
tcpIngressIP := getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) tcpIngressIP := getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
@ -188,7 +188,7 @@ func testService(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringD
// Sanity check and hit it once more // Sanity check and hit it once more
By("hitting the pod through the service's LoadBalancer") By("hitting the pod through the service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeoutDefault) jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeoutDefault)
jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer) jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
} }
func checkMasterVersion(c clientset.Interface, want string) error { func checkMasterVersion(c clientset.Interface, want string) error {

View File

@ -21,7 +21,7 @@ import (
"os" "os"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -79,8 +79,8 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
volumeMountPath := "/etc/configmap-volume" volumeMountPath := "/etc/configmap-volume"
containerName := "configmap-volume-test" containerName := "configmap-volume-test"
configMap := &api.ConfigMap{ configMap := &v1.ConfigMap{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Name: name, Name: name,
}, },
@ -95,29 +95,29 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()), Name: "pod-configmaps-" + string(uuid.NewUUID()),
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: volumeName, Name: volumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
ConfigMap: &api.ConfigMapVolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: api.LocalObjectReference{ LocalObjectReference: v1.LocalObjectReference{
Name: name, Name: name,
}, },
}, },
}, },
}, },
}, },
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: containerName, Name: containerName,
Image: "gcr.io/google_containers/mounttest:0.7", Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volume/data-1"}, Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volume/data-1"},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: volumeName, Name: volumeName,
MountPath: volumeMountPath, MountPath: volumeMountPath,
@ -126,7 +126,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
}, },
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
By("Creating the pod") By("Creating the pod")
@ -157,22 +157,22 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()), Name: "pod-configmaps-" + string(uuid.NewUUID()),
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "env-test", Name: "env-test",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"}, Command: []string{"sh", "-c", "env"},
Env: []api.EnvVar{ Env: []v1.EnvVar{
{ {
Name: "CONFIG_DATA_1", Name: "CONFIG_DATA_1",
ValueFrom: &api.EnvVarSource{ ValueFrom: &v1.EnvVarSource{
ConfigMapKeyRef: &api.ConfigMapKeySelector{ ConfigMapKeyRef: &v1.ConfigMapKeySelector{
LocalObjectReference: api.LocalObjectReference{ LocalObjectReference: v1.LocalObjectReference{
Name: name, Name: name,
}, },
Key: "data-1", Key: "data-1",
@ -182,7 +182,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
}, },
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
@ -207,17 +207,17 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()), Name: "pod-configmaps-" + string(uuid.NewUUID()),
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: volumeName, Name: volumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
ConfigMap: &api.ConfigMapVolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: api.LocalObjectReference{ LocalObjectReference: v1.LocalObjectReference{
Name: name, Name: name,
}, },
}, },
@ -225,21 +225,21 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
}, },
{ {
Name: volumeName2, Name: volumeName2,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
ConfigMap: &api.ConfigMapVolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: api.LocalObjectReference{ LocalObjectReference: v1.LocalObjectReference{
Name: name, Name: name,
}, },
}, },
}, },
}, },
}, },
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "configmap-volume-test", Name: "configmap-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7", Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{"--file_content=/etc/configmap-volume/data-1"}, Args: []string{"--file_content=/etc/configmap-volume/data-1"},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: volumeName, Name: volumeName,
MountPath: volumeMountPath, MountPath: volumeMountPath,
@ -253,7 +253,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
}, },
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
@ -264,9 +264,9 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
}) })
}) })
func newConfigMap(f *framework.Framework, name string) *api.ConfigMap { func newConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
return &api.ConfigMap{ return &v1.ConfigMap{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Name: name, Name: name,
}, },
@ -292,32 +292,32 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()), Name: "pod-configmaps-" + string(uuid.NewUUID()),
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
SecurityContext: &api.PodSecurityContext{}, SecurityContext: &v1.PodSecurityContext{},
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: volumeName, Name: volumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
ConfigMap: &api.ConfigMapVolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: api.LocalObjectReference{ LocalObjectReference: v1.LocalObjectReference{
Name: name, Name: name,
}, },
}, },
}, },
}, },
}, },
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "configmap-volume-test", Name: "configmap-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7", Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{ Args: []string{
"--file_content=/etc/configmap-volume/data-1", "--file_content=/etc/configmap-volume/data-1",
"--file_mode=/etc/configmap-volume/data-1"}, "--file_mode=/etc/configmap-volume/data-1"},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: volumeName, Name: volumeName,
MountPath: volumeMountPath, MountPath: volumeMountPath,
@ -325,7 +325,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d
}, },
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
@ -353,7 +353,6 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d
output = append(output, "mode of file \"/etc/configmap-volume/data-1\": "+modeString) output = append(output, "mode of file \"/etc/configmap-volume/data-1\": "+modeString)
} }
f.TestContainerOutput("consume configMaps", pod, 0, output) f.TestContainerOutput("consume configMaps", pod, 0, output)
} }
func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, itemMode *int32) { func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, itemMode *int32) {
@ -371,21 +370,21 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err) framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
} }
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()), Name: "pod-configmaps-" + string(uuid.NewUUID()),
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
SecurityContext: &api.PodSecurityContext{}, SecurityContext: &v1.PodSecurityContext{},
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: volumeName, Name: volumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
ConfigMap: &api.ConfigMapVolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: api.LocalObjectReference{ LocalObjectReference: v1.LocalObjectReference{
Name: name, Name: name,
}, },
Items: []api.KeyToPath{ Items: []v1.KeyToPath{
{ {
Key: "data-2", Key: "data-2",
Path: "path/to/data-2", Path: "path/to/data-2",
@ -395,13 +394,13 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item
}, },
}, },
}, },
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "configmap-volume-test", Name: "configmap-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7", Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{"--file_content=/etc/configmap-volume/path/to/data-2", Args: []string{"--file_content=/etc/configmap-volume/path/to/data-2",
"--file_mode=/etc/configmap-volume/path/to/data-2"}, "--file_mode=/etc/configmap-volume/path/to/data-2"},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: volumeName, Name: volumeName,
MountPath: volumeMountPath, MountPath: volumeMountPath,
@ -410,7 +409,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item
}, },
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }

View File

@ -20,7 +20,7 @@ import (
"fmt" "fmt"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -80,7 +80,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
if err != nil { if err != nil {
return false, err return false, err
} }
return api.IsPodReady(p), nil return v1.IsPodReady(p), nil
}, 1*time.Minute, 1*time.Second).ShouldNot(BeTrue(), "pod should not be ready") }, 1*time.Minute, 1*time.Second).ShouldNot(BeTrue(), "pod should not be ready")
p, err := podClient.Get(p.Name) p, err := podClient.Get(p.Name)
@ -94,20 +94,20 @@ var _ = framework.KubeDescribe("Probing container", func() {
}) })
It("should be restarted with a exec \"cat /tmp/health\" liveness probe [Conformance]", func() { It("should be restarted with a exec \"cat /tmp/health\" liveness probe [Conformance]", func() {
runLivenessTest(f, &api.Pod{ runLivenessTest(f, &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "liveness-exec", Name: "liveness-exec",
Labels: map[string]string{"test": "liveness"}, Labels: map[string]string{"test": "liveness"},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "liveness", Name: "liveness",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 10; rm -rf /tmp/health; sleep 600"}, Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 10; rm -rf /tmp/health; sleep 600"},
LivenessProbe: &api.Probe{ LivenessProbe: &v1.Probe{
Handler: api.Handler{ Handler: v1.Handler{
Exec: &api.ExecAction{ Exec: &v1.ExecAction{
Command: []string{"cat", "/tmp/health"}, Command: []string{"cat", "/tmp/health"},
}, },
}, },
@ -121,20 +121,20 @@ var _ = framework.KubeDescribe("Probing container", func() {
}) })
It("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [Conformance]", func() { It("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [Conformance]", func() {
runLivenessTest(f, &api.Pod{ runLivenessTest(f, &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "liveness-exec", Name: "liveness-exec",
Labels: map[string]string{"test": "liveness"}, Labels: map[string]string{"test": "liveness"},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "liveness", Name: "liveness",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 600"}, Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 600"},
LivenessProbe: &api.Probe{ LivenessProbe: &v1.Probe{
Handler: api.Handler{ Handler: v1.Handler{
Exec: &api.ExecAction{ Exec: &v1.ExecAction{
Command: []string{"cat", "/tmp/health"}, Command: []string{"cat", "/tmp/health"},
}, },
}, },
@ -148,20 +148,20 @@ var _ = framework.KubeDescribe("Probing container", func() {
}) })
It("should be restarted with a /healthz http liveness probe [Conformance]", func() { It("should be restarted with a /healthz http liveness probe [Conformance]", func() {
runLivenessTest(f, &api.Pod{ runLivenessTest(f, &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "liveness-http", Name: "liveness-http",
Labels: map[string]string{"test": "liveness"}, Labels: map[string]string{"test": "liveness"},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "liveness", Name: "liveness",
Image: "gcr.io/google_containers/liveness:e2e", Image: "gcr.io/google_containers/liveness:e2e",
Command: []string{"/server"}, Command: []string{"/server"},
LivenessProbe: &api.Probe{ LivenessProbe: &v1.Probe{
Handler: api.Handler{ Handler: v1.Handler{
HTTPGet: &api.HTTPGetAction{ HTTPGet: &v1.HTTPGetAction{
Path: "/healthz", Path: "/healthz",
Port: intstr.FromInt(8080), Port: intstr.FromInt(8080),
}, },
@ -177,20 +177,20 @@ var _ = framework.KubeDescribe("Probing container", func() {
// Slow by design (5 min) // Slow by design (5 min)
It("should have monotonically increasing restart count [Conformance] [Slow]", func() { It("should have monotonically increasing restart count [Conformance] [Slow]", func() {
runLivenessTest(f, &api.Pod{ runLivenessTest(f, &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "liveness-http", Name: "liveness-http",
Labels: map[string]string{"test": "liveness"}, Labels: map[string]string{"test": "liveness"},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "liveness", Name: "liveness",
Image: "gcr.io/google_containers/liveness:e2e", Image: "gcr.io/google_containers/liveness:e2e",
Command: []string{"/server"}, Command: []string{"/server"},
LivenessProbe: &api.Probe{ LivenessProbe: &v1.Probe{
Handler: api.Handler{ Handler: v1.Handler{
HTTPGet: &api.HTTPGetAction{ HTTPGet: &v1.HTTPGetAction{
Path: "/healthz", Path: "/healthz",
Port: intstr.FromInt(8080), Port: intstr.FromInt(8080),
}, },
@ -205,20 +205,20 @@ var _ = framework.KubeDescribe("Probing container", func() {
}) })
It("should *not* be restarted with a /healthz http liveness probe [Conformance]", func() { It("should *not* be restarted with a /healthz http liveness probe [Conformance]", func() {
runLivenessTest(f, &api.Pod{ runLivenessTest(f, &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "liveness-http", Name: "liveness-http",
Labels: map[string]string{"test": "liveness"}, Labels: map[string]string{"test": "liveness"},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "liveness", Name: "liveness",
Image: "gcr.io/google_containers/nginx-slim:0.7", Image: "gcr.io/google_containers/nginx-slim:0.7",
Ports: []api.ContainerPort{{ContainerPort: 80}}, Ports: []v1.ContainerPort{{ContainerPort: 80}},
LivenessProbe: &api.Probe{ LivenessProbe: &v1.Probe{
Handler: api.Handler{ Handler: v1.Handler{
HTTPGet: &api.HTTPGetAction{ HTTPGet: &v1.HTTPGetAction{
Path: "/", Path: "/",
Port: intstr.FromInt(80), Port: intstr.FromInt(80),
}, },
@ -236,20 +236,20 @@ var _ = framework.KubeDescribe("Probing container", func() {
It("should be restarted with a docker exec liveness probe with timeout [Conformance]", func() { It("should be restarted with a docker exec liveness probe with timeout [Conformance]", func() {
// TODO: enable this test once the default exec handler supports timeout. // TODO: enable this test once the default exec handler supports timeout.
Skip("The default exec handler, dockertools.NativeExecHandler, does not support timeouts due to a limitation in the Docker Remote API") Skip("The default exec handler, dockertools.NativeExecHandler, does not support timeouts due to a limitation in the Docker Remote API")
runLivenessTest(f, &api.Pod{ runLivenessTest(f, &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "liveness-exec", Name: "liveness-exec",
Labels: map[string]string{"test": "liveness"}, Labels: map[string]string{"test": "liveness"},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "liveness", Name: "liveness",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c", "sleep 600"}, Command: []string{"/bin/sh", "-c", "sleep 600"},
LivenessProbe: &api.Probe{ LivenessProbe: &v1.Probe{
Handler: api.Handler{ Handler: v1.Handler{
Exec: &api.ExecAction{ Exec: &v1.ExecAction{
Command: []string{"/bin/sh", "-c", "sleep 10"}, Command: []string{"/bin/sh", "-c", "sleep 10"},
}, },
}, },
@ -264,7 +264,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
}) })
}) })
func getContainerStartedTime(p *api.Pod, containerName string) (time.Time, error) { func getContainerStartedTime(p *v1.Pod, containerName string) (time.Time, error) {
for _, status := range p.Status.ContainerStatuses { for _, status := range p.Status.ContainerStatuses {
if status.Name != containerName { if status.Name != containerName {
continue continue
@ -277,16 +277,16 @@ func getContainerStartedTime(p *api.Pod, containerName string) (time.Time, error
return time.Time{}, fmt.Errorf("cannot find container named %q", containerName) return time.Time{}, fmt.Errorf("cannot find container named %q", containerName)
} }
func getTransitionTimeForReadyCondition(p *api.Pod) (time.Time, error) { func getTransitionTimeForReadyCondition(p *v1.Pod) (time.Time, error) {
for _, cond := range p.Status.Conditions { for _, cond := range p.Status.Conditions {
if cond.Type == api.PodReady { if cond.Type == v1.PodReady {
return cond.LastTransitionTime.Time, nil return cond.LastTransitionTime.Time, nil
} }
} }
return time.Time{}, fmt.Errorf("No ready condition can be found for pod") return time.Time{}, fmt.Errorf("No ready condition can be found for pod")
} }
func getRestartCount(p *api.Pod) int { func getRestartCount(p *v1.Pod) int {
count := 0 count := 0
for _, containerStatus := range p.Status.ContainerStatuses { for _, containerStatus := range p.Status.ContainerStatuses {
count += int(containerStatus.RestartCount) count += int(containerStatus.RestartCount)
@ -294,11 +294,11 @@ func getRestartCount(p *api.Pod) int {
return count return count
} }
func makePodSpec(readinessProbe, livenessProbe *api.Probe) *api.Pod { func makePodSpec(readinessProbe, livenessProbe *v1.Probe) *v1.Pod {
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{Name: "test-webserver-" + string(uuid.NewUUID())}, ObjectMeta: v1.ObjectMeta{Name: "test-webserver-" + string(uuid.NewUUID())},
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: probTestContainerName, Name: probTestContainerName,
Image: "gcr.io/google_containers/test-webserver:e2e", Image: "gcr.io/google_containers/test-webserver:e2e",
@ -326,10 +326,10 @@ func (b webserverProbeBuilder) withInitialDelay() webserverProbeBuilder {
return b return b
} }
func (b webserverProbeBuilder) build() *api.Probe { func (b webserverProbeBuilder) build() *v1.Probe {
probe := &api.Probe{ probe := &v1.Probe{
Handler: api.Handler{ Handler: v1.Handler{
HTTPGet: &api.HTTPGetAction{ HTTPGet: &v1.HTTPGetAction{
Port: intstr.FromInt(80), Port: intstr.FromInt(80),
Path: "/", Path: "/",
}, },
@ -344,7 +344,7 @@ func (b webserverProbeBuilder) build() *api.Probe {
return probe return probe
} }
func runLivenessTest(f *framework.Framework, pod *api.Pod, expectNumRestarts int, timeout time.Duration) { func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) {
podClient := f.PodClient() podClient := f.PodClient()
ns := f.Namespace.Name ns := f.Namespace.Name
Expect(pod.Spec.Containers).NotTo(BeEmpty()) Expect(pod.Spec.Containers).NotTo(BeEmpty())
@ -352,7 +352,7 @@ func runLivenessTest(f *framework.Framework, pod *api.Pod, expectNumRestarts int
// At the end of the test, clean up by removing the pod. // At the end of the test, clean up by removing the pod.
defer func() { defer func() {
By("deleting the pod") By("deleting the pod")
podClient.Delete(pod.Name, api.NewDeleteOptions(0)) podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
}() }()
By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns)) By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod) podClient.Create(pod)
@ -368,7 +368,7 @@ func runLivenessTest(f *framework.Framework, pod *api.Pod, expectNumRestarts int
By("checking the pod's current state and verifying that restartCount is present") By("checking the pod's current state and verifying that restartCount is present")
pod, err := podClient.Get(pod.Name) pod, err := podClient.Get(pod.Name)
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns)) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
initialRestartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount initialRestartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount) framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
// Wait for the restart state to be as desired. // Wait for the restart state to be as desired.
@ -378,7 +378,7 @@ func runLivenessTest(f *framework.Framework, pod *api.Pod, expectNumRestarts int
for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) { for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
pod, err = podClient.Get(pod.Name) pod, err = podClient.Get(pod.Name)
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name)) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
restartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount restartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
if restartCount != lastRestartCount { if restartCount != lastRestartCount {
framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)", framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
ns, pod.Name, restartCount, time.Since(start)) ns, pod.Name, restartCount, time.Since(start))

View File

@ -17,7 +17,7 @@ limitations under the License.
package common package common
import ( import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -67,21 +67,21 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
const testContainerName = "test-container" const testContainerName = "test-container"
// Return a prototypical entrypoint test pod // Return a prototypical entrypoint test pod
func entrypointTestPod() *api.Pod { func entrypointTestPod() *v1.Pod {
podName := "client-containers-" + string(uuid.NewUUID()) podName := "client-containers-" + string(uuid.NewUUID())
return &api.Pod{ return &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: testContainerName, Name: testContainerName,
Image: "gcr.io/google_containers/eptest:0.1", Image: "gcr.io/google_containers/eptest:0.1",
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
} }

View File

@ -19,8 +19,8 @@ package common
import ( import (
"fmt" "fmt"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -32,11 +32,11 @@ var _ = framework.KubeDescribe("Downward API", func() {
It("should provide pod name and namespace as env vars [Conformance]", func() { It("should provide pod name and namespace as env vars [Conformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID()) podName := "downward-api-" + string(uuid.NewUUID())
env := []api.EnvVar{ env := []v1.EnvVar{
{ {
Name: "POD_NAME", Name: "POD_NAME",
ValueFrom: &api.EnvVarSource{ ValueFrom: &v1.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{ FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1", APIVersion: "v1",
FieldPath: "metadata.name", FieldPath: "metadata.name",
}, },
@ -44,8 +44,8 @@ var _ = framework.KubeDescribe("Downward API", func() {
}, },
{ {
Name: "POD_NAMESPACE", Name: "POD_NAMESPACE",
ValueFrom: &api.EnvVarSource{ ValueFrom: &v1.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{ FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1", APIVersion: "v1",
FieldPath: "metadata.namespace", FieldPath: "metadata.namespace",
}, },
@ -63,11 +63,11 @@ var _ = framework.KubeDescribe("Downward API", func() {
It("should provide pod IP as an env var [Conformance]", func() { It("should provide pod IP as an env var [Conformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID()) podName := "downward-api-" + string(uuid.NewUUID())
env := []api.EnvVar{ env := []v1.EnvVar{
{ {
Name: "POD_IP", Name: "POD_IP",
ValueFrom: &api.EnvVarSource{ ValueFrom: &v1.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{ FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1", APIVersion: "v1",
FieldPath: "status.podIP", FieldPath: "status.podIP",
}, },
@ -84,35 +84,35 @@ var _ = framework.KubeDescribe("Downward API", func() {
It("should provide container's limits.cpu/memory and requests.cpu/memory as env vars [Conformance]", func() { It("should provide container's limits.cpu/memory and requests.cpu/memory as env vars [Conformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID()) podName := "downward-api-" + string(uuid.NewUUID())
env := []api.EnvVar{ env := []v1.EnvVar{
{ {
Name: "CPU_LIMIT", Name: "CPU_LIMIT",
ValueFrom: &api.EnvVarSource{ ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &api.ResourceFieldSelector{ ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.cpu", Resource: "limits.cpu",
}, },
}, },
}, },
{ {
Name: "MEMORY_LIMIT", Name: "MEMORY_LIMIT",
ValueFrom: &api.EnvVarSource{ ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &api.ResourceFieldSelector{ ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.memory", Resource: "limits.memory",
}, },
}, },
}, },
{ {
Name: "CPU_REQUEST", Name: "CPU_REQUEST",
ValueFrom: &api.EnvVarSource{ ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &api.ResourceFieldSelector{ ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "requests.cpu", Resource: "requests.cpu",
}, },
}, },
}, },
{ {
Name: "MEMORY_REQUEST", Name: "MEMORY_REQUEST",
ValueFrom: &api.EnvVarSource{ ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &api.ResourceFieldSelector{ ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "requests.memory", Resource: "requests.memory",
}, },
}, },
@ -130,19 +130,19 @@ var _ = framework.KubeDescribe("Downward API", func() {
It("should provide default limits.cpu/memory from node allocatable [Conformance]", func() { It("should provide default limits.cpu/memory from node allocatable [Conformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID()) podName := "downward-api-" + string(uuid.NewUUID())
env := []api.EnvVar{ env := []v1.EnvVar{
{ {
Name: "CPU_LIMIT", Name: "CPU_LIMIT",
ValueFrom: &api.EnvVarSource{ ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &api.ResourceFieldSelector{ ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.cpu", Resource: "limits.cpu",
}, },
}, },
}, },
{ {
Name: "MEMORY_LIMIT", Name: "MEMORY_LIMIT",
ValueFrom: &api.EnvVarSource{ ValueFrom: &v1.EnvVarSource{
ResourceFieldRef: &api.ResourceFieldSelector{ ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.memory", Resource: "limits.memory",
}, },
}, },
@ -152,13 +152,13 @@ var _ = framework.KubeDescribe("Downward API", func() {
fmt.Sprintf("CPU_LIMIT=[1-9]"), fmt.Sprintf("CPU_LIMIT=[1-9]"),
fmt.Sprintf("MEMORY_LIMIT=[1-9]"), fmt.Sprintf("MEMORY_LIMIT=[1-9]"),
} }
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Labels: map[string]string{"name": podName}, Labels: map[string]string{"name": podName},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "dapi-container", Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
@ -166,7 +166,7 @@ var _ = framework.KubeDescribe("Downward API", func() {
Env: env, Env: env,
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
@ -174,38 +174,38 @@ var _ = framework.KubeDescribe("Downward API", func() {
}) })
}) })
func testDownwardAPI(f *framework.Framework, podName string, env []api.EnvVar, expectations []string) { func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) {
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Labels: map[string]string{"name": podName}, Labels: map[string]string{"name": podName},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "dapi-container", Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"}, Command: []string{"sh", "-c", "env"},
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: api.ResourceList{ Requests: v1.ResourceList{
api.ResourceCPU: resource.MustParse("250m"), v1.ResourceCPU: resource.MustParse("250m"),
api.ResourceMemory: resource.MustParse("32Mi"), v1.ResourceMemory: resource.MustParse("32Mi"),
}, },
Limits: api.ResourceList{ Limits: v1.ResourceList{
api.ResourceCPU: resource.MustParse("1250m"), v1.ResourceCPU: resource.MustParse("1250m"),
api.ResourceMemory: resource.MustParse("64Mi"), v1.ResourceMemory: resource.MustParse("64Mi"),
}, },
}, },
Env: env, Env: env,
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
testDownwardAPIUsingPod(f, pod, env, expectations) testDownwardAPIUsingPod(f, pod, env, expectations)
} }
func testDownwardAPIUsingPod(f *framework.Framework, pod *api.Pod, env []api.EnvVar, expectations []string) { func testDownwardAPIUsingPod(f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) {
f.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations) f.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations)
} }

View File

@ -20,8 +20,8 @@ import (
"fmt" "fmt"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -72,7 +72,7 @@ var _ = framework.KubeDescribe("Downward API volume", func() {
uid := int64(1001) uid := int64(1001)
gid := int64(1234) gid := int64(1234)
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname") pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname")
pod.Spec.SecurityContext = &api.PodSecurityContext{ pod.Spec.SecurityContext = &v1.PodSecurityContext{
RunAsUser: &uid, RunAsUser: &uid,
FSGroup: &gid, FSGroup: &gid,
} }
@ -98,7 +98,7 @@ var _ = framework.KubeDescribe("Downward API volume", func() {
podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n")) podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n"))
//modify labels //modify labels
podClient.Update(podName, func(pod *api.Pod) { podClient.Update(podName, func(pod *v1.Pod) {
pod.Labels["key3"] = "value3" pod.Labels["key3"] = "value3"
}) })
@ -127,7 +127,7 @@ var _ = framework.KubeDescribe("Downward API volume", func() {
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n")) podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n"))
//modify annotations //modify annotations
podClient.Update(podName, func(pod *api.Pod) { podClient.Update(podName, func(pod *v1.Pod) {
pod.Annotations["builder"] = "foo" pod.Annotations["builder"] = "foo"
}) })
@ -189,15 +189,15 @@ var _ = framework.KubeDescribe("Downward API volume", func() {
}) })
func downwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMode *int32) *api.Pod { func downwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMode *int32) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil) pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = []api.Container{ pod.Spec.Containers = []v1.Container{
{ {
Name: "client-container", Name: "client-container",
Image: "gcr.io/google_containers/mounttest:0.7", Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_mode=" + filePath}, Command: []string{"/mt", "--file_mode=" + filePath},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: "podinfo", Name: "podinfo",
MountPath: "/etc", MountPath: "/etc",
@ -215,15 +215,15 @@ func downwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMod
return pod return pod
} }
func downwardAPIVolumePodForSimpleTest(name string, filePath string) *api.Pod { func downwardAPIVolumePodForSimpleTest(name string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil) pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = []api.Container{ pod.Spec.Containers = []v1.Container{
{ {
Name: "client-container", Name: "client-container",
Image: "gcr.io/google_containers/mounttest:0.7", Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_content=" + filePath}, Command: []string{"/mt", "--file_content=" + filePath},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: "podinfo", Name: "podinfo",
MountPath: "/etc", MountPath: "/etc",
@ -236,35 +236,35 @@ func downwardAPIVolumePodForSimpleTest(name string, filePath string) *api.Pod {
return pod return pod
} }
func downwardAPIVolumeForContainerResources(name string, filePath string) *api.Pod { func downwardAPIVolumeForContainerResources(name string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil) pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = downwardAPIVolumeBaseContainers("client-container", filePath) pod.Spec.Containers = downwardAPIVolumeBaseContainers("client-container", filePath)
return pod return pod
} }
func downwardAPIVolumeForDefaultContainerResources(name string, filePath string) *api.Pod { func downwardAPIVolumeForDefaultContainerResources(name string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil) pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = downwardAPIVolumeDefaultBaseContainer("client-container", filePath) pod.Spec.Containers = downwardAPIVolumeDefaultBaseContainer("client-container", filePath)
return pod return pod
} }
func downwardAPIVolumeBaseContainers(name, filePath string) []api.Container { func downwardAPIVolumeBaseContainers(name, filePath string) []v1.Container {
return []api.Container{ return []v1.Container{
{ {
Name: name, Name: name,
Image: "gcr.io/google_containers/mounttest:0.7", Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_content=" + filePath}, Command: []string{"/mt", "--file_content=" + filePath},
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: api.ResourceList{ Requests: v1.ResourceList{
api.ResourceCPU: resource.MustParse("250m"), v1.ResourceCPU: resource.MustParse("250m"),
api.ResourceMemory: resource.MustParse("32Mi"), v1.ResourceMemory: resource.MustParse("32Mi"),
}, },
Limits: api.ResourceList{ Limits: v1.ResourceList{
api.ResourceCPU: resource.MustParse("1250m"), v1.ResourceCPU: resource.MustParse("1250m"),
api.ResourceMemory: resource.MustParse("64Mi"), v1.ResourceMemory: resource.MustParse("64Mi"),
}, },
}, },
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: "podinfo", Name: "podinfo",
MountPath: "/etc", MountPath: "/etc",
@ -276,13 +276,13 @@ func downwardAPIVolumeBaseContainers(name, filePath string) []api.Container {
} }
func downwardAPIVolumeDefaultBaseContainer(name, filePath string) []api.Container { func downwardAPIVolumeDefaultBaseContainer(name, filePath string) []v1.Container {
return []api.Container{ return []v1.Container{
{ {
Name: name, Name: name,
Image: "gcr.io/google_containers/mounttest:0.7", Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_content=" + filePath}, Command: []string{"/mt", "--file_content=" + filePath},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: "podinfo", Name: "podinfo",
MountPath: "/etc", MountPath: "/etc",
@ -293,15 +293,15 @@ func downwardAPIVolumeDefaultBaseContainer(name, filePath string) []api.Containe
} }
func downwardAPIVolumePodForUpdateTest(name string, labels, annotations map[string]string, filePath string) *api.Pod { func downwardAPIVolumePodForUpdateTest(name string, labels, annotations map[string]string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, labels, annotations) pod := downwardAPIVolumeBasePod(name, labels, annotations)
pod.Spec.Containers = []api.Container{ pod.Spec.Containers = []v1.Container{
{ {
Name: "client-container", Name: "client-container",
Image: "gcr.io/google_containers/mounttest:0.7", Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath}, Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: "podinfo", Name: "podinfo",
MountPath: "/etc", MountPath: "/etc",
@ -315,51 +315,51 @@ func downwardAPIVolumePodForUpdateTest(name string, labels, annotations map[stri
return pod return pod
} }
func downwardAPIVolumeBasePod(name string, labels, annotations map[string]string) *api.Pod { func downwardAPIVolumeBasePod(name string, labels, annotations map[string]string) *v1.Pod {
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Labels: labels, Labels: labels,
Annotations: annotations, Annotations: annotations,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: "podinfo", Name: "podinfo",
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
DownwardAPI: &api.DownwardAPIVolumeSource{ DownwardAPI: &v1.DownwardAPIVolumeSource{
Items: []api.DownwardAPIVolumeFile{ Items: []v1.DownwardAPIVolumeFile{
{ {
Path: "podname", Path: "podname",
FieldRef: &api.ObjectFieldSelector{ FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1", APIVersion: "v1",
FieldPath: "metadata.name", FieldPath: "metadata.name",
}, },
}, },
{ {
Path: "cpu_limit", Path: "cpu_limit",
ResourceFieldRef: &api.ResourceFieldSelector{ ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container", ContainerName: "client-container",
Resource: "limits.cpu", Resource: "limits.cpu",
}, },
}, },
{ {
Path: "cpu_request", Path: "cpu_request",
ResourceFieldRef: &api.ResourceFieldSelector{ ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container", ContainerName: "client-container",
Resource: "requests.cpu", Resource: "requests.cpu",
}, },
}, },
{ {
Path: "memory_limit", Path: "memory_limit",
ResourceFieldRef: &api.ResourceFieldSelector{ ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container", ContainerName: "client-container",
Resource: "limits.memory", Resource: "limits.memory",
}, },
}, },
{ {
Path: "memory_request", Path: "memory_request",
ResourceFieldRef: &api.ResourceFieldSelector{ ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container", ContainerName: "client-container",
Resource: "requests.memory", Resource: "requests.memory",
}, },
@ -369,18 +369,18 @@ func downwardAPIVolumeBasePod(name string, labels, annotations map[string]string
}, },
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
return pod return pod
} }
func applyLabelsAndAnnotationsToDownwardAPIPod(labels, annotations map[string]string, pod *api.Pod) { func applyLabelsAndAnnotationsToDownwardAPIPod(labels, annotations map[string]string, pod *v1.Pod) {
if len(labels) > 0 { if len(labels) > 0 {
pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, api.DownwardAPIVolumeFile{ pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{
Path: "labels", Path: "labels",
FieldRef: &api.ObjectFieldSelector{ FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1", APIVersion: "v1",
FieldPath: "metadata.labels", FieldPath: "metadata.labels",
}, },
@ -388,9 +388,9 @@ func applyLabelsAndAnnotationsToDownwardAPIPod(labels, annotations map[string]st
} }
if len(annotations) > 0 { if len(annotations) > 0 {
pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, api.DownwardAPIVolumeFile{ pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{
Path: "annotations", Path: "annotations",
FieldRef: &api.ObjectFieldSelector{ FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1", APIVersion: "v1",
FieldPath: "metadata.annotations", FieldPath: "metadata.annotations",
}, },

View File

@ -20,8 +20,8 @@ import (
"fmt" "fmt"
"path" "path"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -40,80 +40,80 @@ var _ = framework.KubeDescribe("EmptyDir volumes", func() {
Context("when FSGroup is specified [Feature:FSGroup]", func() { Context("when FSGroup is specified [Feature:FSGroup]", func() {
It("new files should be created with FSGroup ownership when container is root", func() { It("new files should be created with FSGroup ownership when container is root", func() {
doTestSetgidFSGroup(f, testImageRootUid, api.StorageMediumMemory) doTestSetgidFSGroup(f, testImageRootUid, v1.StorageMediumMemory)
}) })
It("new files should be created with FSGroup ownership when container is non-root", func() { It("new files should be created with FSGroup ownership when container is non-root", func() {
doTestSetgidFSGroup(f, testImageNonRootUid, api.StorageMediumMemory) doTestSetgidFSGroup(f, testImageNonRootUid, v1.StorageMediumMemory)
}) })
It("files with FSGroup ownership should support (root,0644,tmpfs)", func() { It("files with FSGroup ownership should support (root,0644,tmpfs)", func() {
doTest0644FSGroup(f, testImageRootUid, api.StorageMediumMemory) doTest0644FSGroup(f, testImageRootUid, v1.StorageMediumMemory)
}) })
It("volume on default medium should have the correct mode using FSGroup", func() { It("volume on default medium should have the correct mode using FSGroup", func() {
doTestVolumeModeFSGroup(f, testImageRootUid, api.StorageMediumDefault) doTestVolumeModeFSGroup(f, testImageRootUid, v1.StorageMediumDefault)
}) })
It("volume on tmpfs should have the correct mode using FSGroup", func() { It("volume on tmpfs should have the correct mode using FSGroup", func() {
doTestVolumeModeFSGroup(f, testImageRootUid, api.StorageMediumMemory) doTestVolumeModeFSGroup(f, testImageRootUid, v1.StorageMediumMemory)
}) })
}) })
It("volume on tmpfs should have the correct mode [Conformance]", func() { It("volume on tmpfs should have the correct mode [Conformance]", func() {
doTestVolumeMode(f, testImageRootUid, api.StorageMediumMemory) doTestVolumeMode(f, testImageRootUid, v1.StorageMediumMemory)
}) })
It("should support (root,0644,tmpfs) [Conformance]", func() { It("should support (root,0644,tmpfs) [Conformance]", func() {
doTest0644(f, testImageRootUid, api.StorageMediumMemory) doTest0644(f, testImageRootUid, v1.StorageMediumMemory)
}) })
It("should support (root,0666,tmpfs) [Conformance]", func() { It("should support (root,0666,tmpfs) [Conformance]", func() {
doTest0666(f, testImageRootUid, api.StorageMediumMemory) doTest0666(f, testImageRootUid, v1.StorageMediumMemory)
}) })
It("should support (root,0777,tmpfs) [Conformance]", func() { It("should support (root,0777,tmpfs) [Conformance]", func() {
doTest0777(f, testImageRootUid, api.StorageMediumMemory) doTest0777(f, testImageRootUid, v1.StorageMediumMemory)
}) })
It("should support (non-root,0644,tmpfs) [Conformance]", func() { It("should support (non-root,0644,tmpfs) [Conformance]", func() {
doTest0644(f, testImageNonRootUid, api.StorageMediumMemory) doTest0644(f, testImageNonRootUid, v1.StorageMediumMemory)
}) })
It("should support (non-root,0666,tmpfs) [Conformance]", func() { It("should support (non-root,0666,tmpfs) [Conformance]", func() {
doTest0666(f, testImageNonRootUid, api.StorageMediumMemory) doTest0666(f, testImageNonRootUid, v1.StorageMediumMemory)
}) })
It("should support (non-root,0777,tmpfs) [Conformance]", func() { It("should support (non-root,0777,tmpfs) [Conformance]", func() {
doTest0777(f, testImageNonRootUid, api.StorageMediumMemory) doTest0777(f, testImageNonRootUid, v1.StorageMediumMemory)
}) })
It("volume on default medium should have the correct mode [Conformance]", func() { It("volume on default medium should have the correct mode [Conformance]", func() {
doTestVolumeMode(f, testImageRootUid, api.StorageMediumDefault) doTestVolumeMode(f, testImageRootUid, v1.StorageMediumDefault)
}) })
It("should support (root,0644,default) [Conformance]", func() { It("should support (root,0644,default) [Conformance]", func() {
doTest0644(f, testImageRootUid, api.StorageMediumDefault) doTest0644(f, testImageRootUid, v1.StorageMediumDefault)
}) })
It("should support (root,0666,default) [Conformance]", func() { It("should support (root,0666,default) [Conformance]", func() {
doTest0666(f, testImageRootUid, api.StorageMediumDefault) doTest0666(f, testImageRootUid, v1.StorageMediumDefault)
}) })
It("should support (root,0777,default) [Conformance]", func() { It("should support (root,0777,default) [Conformance]", func() {
doTest0777(f, testImageRootUid, api.StorageMediumDefault) doTest0777(f, testImageRootUid, v1.StorageMediumDefault)
}) })
It("should support (non-root,0644,default) [Conformance]", func() { It("should support (non-root,0644,default) [Conformance]", func() {
doTest0644(f, testImageNonRootUid, api.StorageMediumDefault) doTest0644(f, testImageNonRootUid, v1.StorageMediumDefault)
}) })
It("should support (non-root,0666,default) [Conformance]", func() { It("should support (non-root,0666,default) [Conformance]", func() {
doTest0666(f, testImageNonRootUid, api.StorageMediumDefault) doTest0666(f, testImageNonRootUid, v1.StorageMediumDefault)
}) })
It("should support (non-root,0777,default) [Conformance]", func() { It("should support (non-root,0777,default) [Conformance]", func() {
doTest0777(f, testImageNonRootUid, api.StorageMediumDefault) doTest0777(f, testImageNonRootUid, v1.StorageMediumDefault)
}) })
}) })
@ -122,11 +122,11 @@ const (
volumeName = "test-volume" volumeName = "test-volume"
) )
func doTestSetgidFSGroup(f *framework.Framework, image string, medium api.StorageMedium) { func doTestSetgidFSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
var ( var (
volumePath = "/test-volume" volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file") filePath = path.Join(volumePath, "test-file")
source = &api.EmptyDirVolumeSource{Medium: medium} source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(testImageRootUid, volumePath, source) pod = testPodWithVolume(testImageRootUid, volumePath, source)
) )
@ -146,16 +146,16 @@ func doTestSetgidFSGroup(f *framework.Framework, image string, medium api.Storag
"content of file \"/test-volume/test-file\": mount-tester new file", "content of file \"/test-volume/test-file\": mount-tester new file",
"owner GID of \"/test-volume/test-file\": 123", "owner GID of \"/test-volume/test-file\": 123",
} }
if medium == api.StorageMediumMemory { if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs") out = append(out, "mount type of \"/test-volume\": tmpfs")
} }
f.TestContainerOutput(msg, pod, 0, out) f.TestContainerOutput(msg, pod, 0, out)
} }
func doTestVolumeModeFSGroup(f *framework.Framework, image string, medium api.StorageMedium) { func doTestVolumeModeFSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
var ( var (
volumePath = "/test-volume" volumePath = "/test-volume"
source = &api.EmptyDirVolumeSource{Medium: medium} source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(testImageRootUid, volumePath, source) pod = testPodWithVolume(testImageRootUid, volumePath, source)
) )
@ -171,17 +171,17 @@ func doTestVolumeModeFSGroup(f *framework.Framework, image string, medium api.St
out := []string{ out := []string{
"perms of file \"/test-volume\": -rwxrwxrwx", "perms of file \"/test-volume\": -rwxrwxrwx",
} }
if medium == api.StorageMediumMemory { if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs") out = append(out, "mount type of \"/test-volume\": tmpfs")
} }
f.TestContainerOutput(msg, pod, 0, out) f.TestContainerOutput(msg, pod, 0, out)
} }
func doTest0644FSGroup(f *framework.Framework, image string, medium api.StorageMedium) { func doTest0644FSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
var ( var (
volumePath = "/test-volume" volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file") filePath = path.Join(volumePath, "test-file")
source = &api.EmptyDirVolumeSource{Medium: medium} source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source) pod = testPodWithVolume(image, volumePath, source)
) )
@ -199,16 +199,16 @@ func doTest0644FSGroup(f *framework.Framework, image string, medium api.StorageM
"perms of file \"/test-volume/test-file\": -rw-r--r--", "perms of file \"/test-volume/test-file\": -rw-r--r--",
"content of file \"/test-volume/test-file\": mount-tester new file", "content of file \"/test-volume/test-file\": mount-tester new file",
} }
if medium == api.StorageMediumMemory { if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs") out = append(out, "mount type of \"/test-volume\": tmpfs")
} }
f.TestContainerOutput(msg, pod, 0, out) f.TestContainerOutput(msg, pod, 0, out)
} }
func doTestVolumeMode(f *framework.Framework, image string, medium api.StorageMedium) { func doTestVolumeMode(f *framework.Framework, image string, medium v1.StorageMedium) {
var ( var (
volumePath = "/test-volume" volumePath = "/test-volume"
source = &api.EmptyDirVolumeSource{Medium: medium} source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(testImageRootUid, volumePath, source) pod = testPodWithVolume(testImageRootUid, volumePath, source)
) )
@ -221,17 +221,17 @@ func doTestVolumeMode(f *framework.Framework, image string, medium api.StorageMe
out := []string{ out := []string{
"perms of file \"/test-volume\": -rwxrwxrwx", "perms of file \"/test-volume\": -rwxrwxrwx",
} }
if medium == api.StorageMediumMemory { if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs") out = append(out, "mount type of \"/test-volume\": tmpfs")
} }
f.TestContainerOutput(msg, pod, 0, out) f.TestContainerOutput(msg, pod, 0, out)
} }
func doTest0644(f *framework.Framework, image string, medium api.StorageMedium) { func doTest0644(f *framework.Framework, image string, medium v1.StorageMedium) {
var ( var (
volumePath = "/test-volume" volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file") filePath = path.Join(volumePath, "test-file")
source = &api.EmptyDirVolumeSource{Medium: medium} source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source) pod = testPodWithVolume(image, volumePath, source)
) )
@ -246,17 +246,17 @@ func doTest0644(f *framework.Framework, image string, medium api.StorageMedium)
"perms of file \"/test-volume/test-file\": -rw-r--r--", "perms of file \"/test-volume/test-file\": -rw-r--r--",
"content of file \"/test-volume/test-file\": mount-tester new file", "content of file \"/test-volume/test-file\": mount-tester new file",
} }
if medium == api.StorageMediumMemory { if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs") out = append(out, "mount type of \"/test-volume\": tmpfs")
} }
f.TestContainerOutput(msg, pod, 0, out) f.TestContainerOutput(msg, pod, 0, out)
} }
func doTest0666(f *framework.Framework, image string, medium api.StorageMedium) { func doTest0666(f *framework.Framework, image string, medium v1.StorageMedium) {
var ( var (
volumePath = "/test-volume" volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file") filePath = path.Join(volumePath, "test-file")
source = &api.EmptyDirVolumeSource{Medium: medium} source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source) pod = testPodWithVolume(image, volumePath, source)
) )
@ -271,17 +271,17 @@ func doTest0666(f *framework.Framework, image string, medium api.StorageMedium)
"perms of file \"/test-volume/test-file\": -rw-rw-rw-", "perms of file \"/test-volume/test-file\": -rw-rw-rw-",
"content of file \"/test-volume/test-file\": mount-tester new file", "content of file \"/test-volume/test-file\": mount-tester new file",
} }
if medium == api.StorageMediumMemory { if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs") out = append(out, "mount type of \"/test-volume\": tmpfs")
} }
f.TestContainerOutput(msg, pod, 0, out) f.TestContainerOutput(msg, pod, 0, out)
} }
func doTest0777(f *framework.Framework, image string, medium api.StorageMedium) { func doTest0777(f *framework.Framework, image string, medium v1.StorageMedium) {
var ( var (
volumePath = "/test-volume" volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file") filePath = path.Join(volumePath, "test-file")
source = &api.EmptyDirVolumeSource{Medium: medium} source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source) pod = testPodWithVolume(image, volumePath, source)
) )
@ -296,36 +296,36 @@ func doTest0777(f *framework.Framework, image string, medium api.StorageMedium)
"perms of file \"/test-volume/test-file\": -rwxrwxrwx", "perms of file \"/test-volume/test-file\": -rwxrwxrwx",
"content of file \"/test-volume/test-file\": mount-tester new file", "content of file \"/test-volume/test-file\": mount-tester new file",
} }
if medium == api.StorageMediumMemory { if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs") out = append(out, "mount type of \"/test-volume\": tmpfs")
} }
f.TestContainerOutput(msg, pod, 0, out) f.TestContainerOutput(msg, pod, 0, out)
} }
func formatMedium(medium api.StorageMedium) string { func formatMedium(medium v1.StorageMedium) string {
if medium == api.StorageMediumMemory { if medium == v1.StorageMediumMemory {
return "tmpfs" return "tmpfs"
} }
return "node default medium" return "node default medium"
} }
func testPodWithVolume(image, path string, source *api.EmptyDirVolumeSource) *api.Pod { func testPodWithVolume(image, path string, source *v1.EmptyDirVolumeSource) *v1.Pod {
podName := "pod-" + string(uuid.NewUUID()) podName := "pod-" + string(uuid.NewUUID())
return &api.Pod{ return &v1.Pod{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "Pod", Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: containerName, Name: containerName,
Image: image, Image: image,
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: volumeName, Name: volumeName,
MountPath: path, MountPath: path,
@ -333,16 +333,16 @@ func testPodWithVolume(image, path string, source *api.EmptyDirVolumeSource) *ap
}, },
}, },
}, },
SecurityContext: &api.PodSecurityContext{ SecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &api.SELinuxOptions{ SELinuxOptions: &v1.SELinuxOptions{
Level: "s0", Level: "s0",
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: volumeName, Name: volumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
EmptyDir: source, EmptyDir: source,
}, },
}, },

View File

@ -17,7 +17,7 @@ limitations under the License.
package common package common
import ( import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -31,18 +31,18 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
It("should allow composing env vars into new env vars [Conformance]", func() { It("should allow composing env vars into new env vars [Conformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID()) podName := "var-expansion-" + string(uuid.NewUUID())
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Labels: map[string]string{"name": podName}, Labels: map[string]string{"name": podName},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "dapi-container", Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"}, Command: []string{"sh", "-c", "env"},
Env: []api.EnvVar{ Env: []v1.EnvVar{
{ {
Name: "FOO", Name: "FOO",
Value: "foo-value", Value: "foo-value",
@ -58,7 +58,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
}, },
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
@ -71,18 +71,18 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
It("should allow substituting values in a container's command [Conformance]", func() { It("should allow substituting values in a container's command [Conformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID()) podName := "var-expansion-" + string(uuid.NewUUID())
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Labels: map[string]string{"name": podName}, Labels: map[string]string{"name": podName},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "dapi-container", Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "TEST_VAR=wrong echo \"$(TEST_VAR)\""}, Command: []string{"sh", "-c", "TEST_VAR=wrong echo \"$(TEST_VAR)\""},
Env: []api.EnvVar{ Env: []v1.EnvVar{
{ {
Name: "TEST_VAR", Name: "TEST_VAR",
Value: "test-value", Value: "test-value",
@ -90,7 +90,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
}, },
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
@ -101,19 +101,19 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
It("should allow substituting values in a container's args [Conformance]", func() { It("should allow substituting values in a container's args [Conformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID()) podName := "var-expansion-" + string(uuid.NewUUID())
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Labels: map[string]string{"name": podName}, Labels: map[string]string{"name": podName},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "dapi-container", Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c"}, Command: []string{"sh", "-c"},
Args: []string{"TEST_VAR=wrong echo \"$(TEST_VAR)\""}, Args: []string{"TEST_VAR=wrong echo \"$(TEST_VAR)\""},
Env: []api.EnvVar{ Env: []v1.EnvVar{
{ {
Name: "TEST_VAR", Name: "TEST_VAR",
Value: "test-value", Value: "test-value",
@ -121,7 +121,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
}, },
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }

View File

@ -21,8 +21,8 @@ import (
"os" "os"
"path" "path"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -41,7 +41,7 @@ var _ = framework.KubeDescribe("HostPath", func() {
It("should give a volume the correct mode [Conformance]", func() { It("should give a volume the correct mode [Conformance]", func() {
volumePath := "/test-volume" volumePath := "/test-volume"
source := &api.HostPathVolumeSource{ source := &v1.HostPathVolumeSource{
Path: "/tmp", Path: "/tmp",
} }
pod := testPodWithHostVol(volumePath, source) pod := testPodWithHostVol(volumePath, source)
@ -60,7 +60,7 @@ var _ = framework.KubeDescribe("HostPath", func() {
volumePath := "/test-volume" volumePath := "/test-volume"
filePath := path.Join(volumePath, "test-file") filePath := path.Join(volumePath, "test-file")
retryDuration := 180 retryDuration := 180
source := &api.HostPathVolumeSource{ source := &v1.HostPathVolumeSource{
Path: "/tmp", Path: "/tmp",
} }
pod := testPodWithHostVol(volumePath, source) pod := testPodWithHostVol(volumePath, source)
@ -90,7 +90,7 @@ var _ = framework.KubeDescribe("HostPath", func() {
filePathInWriter := path.Join(volumePath, fileName) filePathInWriter := path.Join(volumePath, fileName)
filePathInReader := path.Join(volumePath, subPath, fileName) filePathInReader := path.Join(volumePath, subPath, fileName)
source := &api.HostPathVolumeSource{ source := &v1.HostPathVolumeSource{
Path: "/tmp", Path: "/tmp",
} }
pod := testPodWithHostVol(volumePath, source) pod := testPodWithHostVol(volumePath, source)
@ -118,11 +118,11 @@ var _ = framework.KubeDescribe("HostPath", func() {
const containerName1 = "test-container-1" const containerName1 = "test-container-1"
const containerName2 = "test-container-2" const containerName2 = "test-container-2"
func mount(source *api.HostPathVolumeSource) []api.Volume { func mount(source *v1.HostPathVolumeSource) []v1.Volume {
return []api.Volume{ return []v1.Volume{
{ {
Name: volumeName, Name: volumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
HostPath: source, HostPath: source,
}, },
}, },
@ -130,23 +130,23 @@ func mount(source *api.HostPathVolumeSource) []api.Volume {
} }
//TODO: To merge this with the emptyDir tests, we can make source a lambda. //TODO: To merge this with the emptyDir tests, we can make source a lambda.
func testPodWithHostVol(path string, source *api.HostPathVolumeSource) *api.Pod { func testPodWithHostVol(path string, source *v1.HostPathVolumeSource) *v1.Pod {
podName := "pod-host-path-test" podName := "pod-host-path-test"
return &api.Pod{ return &v1.Pod{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "Pod", Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: containerName1, Name: containerName1,
Image: "gcr.io/google_containers/mounttest:0.7", Image: "gcr.io/google_containers/mounttest:0.7",
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: volumeName, Name: volumeName,
MountPath: path, MountPath: path,
@ -156,7 +156,7 @@ func testPodWithHostVol(path string, source *api.HostPathVolumeSource) *api.Pod
{ {
Name: containerName2, Name: containerName2,
Image: "gcr.io/google_containers/mounttest:0.7", Image: "gcr.io/google_containers/mounttest:0.7",
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: volumeName, Name: volumeName,
MountPath: path, MountPath: path,
@ -164,7 +164,7 @@ func testPodWithHostVol(path string, source *api.HostPathVolumeSource) *api.Pod
}, },
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Volumes: mount(source), Volumes: mount(source),
}, },
} }

View File

@ -21,9 +21,10 @@ import (
"strconv" "strconv"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
client "k8s.io/kubernetes/pkg/client/unversioned" "k8s.io/kubernetes/pkg/api/v1"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -45,17 +46,17 @@ var _ = framework.KubeDescribe("InitContainer", func() {
By("creating the pod") By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID()) name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond()) value := strconv.Itoa(time.Now().Nanosecond())
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Labels: map[string]string{ Labels: map[string]string{
"name": "foo", "name": "foo",
"time": value, "time": value,
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
InitContainers: []api.Container{ InitContainers: []v1.Container{
{ {
Name: "init1", Name: "init1",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
@ -67,7 +68,7 @@ var _ = framework.KubeDescribe("InitContainer", func() {
Command: []string{"/bin/true"}, Command: []string{"/bin/true"},
}, },
}, },
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "run1", Name: "run1",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
@ -76,19 +77,25 @@ var _ = framework.KubeDescribe("InitContainer", func() {
}, },
}, },
} }
if err := podutil.SetInitContainersAnnotations(pod); err != nil {
Expect(err).To(BeNil())
}
startedPod := podClient.Create(pod) startedPod := podClient.Create(pod)
w, err := podClient.Watch(api.SingleObject(startedPod.ObjectMeta)) w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod") Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w) wr := watch.NewRecorder(w)
event, err := watch.Until(framework.PodStartTimeout, wr, client.PodCompleted) event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodCompleted)
Expect(err).To(BeNil()) Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant) framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*api.Pod) endPod := event.Object.(*v1.Pod)
if err := podutil.SetInitContainersAndStatuses(endPod); err != nil {
Expect(err).To(BeNil())
}
Expect(endPod.Status.Phase).To(Equal(api.PodSucceeded)) Expect(endPod.Status.Phase).To(Equal(v1.PodSucceeded))
_, init := api.GetPodCondition(&endPod.Status, api.PodInitialized) _, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil()) Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(api.ConditionTrue)) Expect(init.Status).To(Equal(v1.ConditionTrue))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2)) Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
for _, status := range endPod.Status.InitContainerStatuses { for _, status := range endPod.Status.InitContainerStatuses {
@ -104,16 +111,16 @@ var _ = framework.KubeDescribe("InitContainer", func() {
By("creating the pod") By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID()) name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond()) value := strconv.Itoa(time.Now().Nanosecond())
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Labels: map[string]string{ Labels: map[string]string{
"name": "foo", "name": "foo",
"time": value, "time": value,
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
InitContainers: []api.Container{ InitContainers: []v1.Container{
{ {
Name: "init1", Name: "init1",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
@ -125,33 +132,39 @@ var _ = framework.KubeDescribe("InitContainer", func() {
Command: []string{"/bin/true"}, Command: []string{"/bin/true"},
}, },
}, },
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "run1", Name: "run1",
Image: framework.GetPauseImageName(f.ClientSet), Image: framework.GetPauseImageName(f.ClientSet),
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Limits: api.ResourceList{ Limits: v1.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
}, },
}, },
}, },
}, },
}, },
} }
if err := podutil.SetInitContainersAnnotations(pod); err != nil {
Expect(err).To(BeNil())
}
startedPod := podClient.Create(pod) startedPod := podClient.Create(pod)
w, err := podClient.Watch(api.SingleObject(startedPod.ObjectMeta)) w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod") Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w) wr := watch.NewRecorder(w)
event, err := watch.Until(framework.PodStartTimeout, wr, client.PodRunning) event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodRunning)
Expect(err).To(BeNil()) Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant) framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*api.Pod) endPod := event.Object.(*v1.Pod)
Expect(endPod.Status.Phase).To(Equal(api.PodRunning)) Expect(endPod.Status.Phase).To(Equal(v1.PodRunning))
_, init := api.GetPodCondition(&endPod.Status, api.PodInitialized) _, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil()) Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(api.ConditionTrue)) Expect(init.Status).To(Equal(v1.ConditionTrue))
if err := podutil.SetInitContainersAndStatuses(endPod); err != nil {
Expect(err).To(BeNil())
}
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2)) Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
for _, status := range endPod.Status.InitContainerStatuses { for _, status := range endPod.Status.InitContainerStatuses {
@ -167,16 +180,17 @@ var _ = framework.KubeDescribe("InitContainer", func() {
By("creating the pod") By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID()) name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond()) value := strconv.Itoa(time.Now().Nanosecond())
pod := &api.Pod{
ObjectMeta: api.ObjectMeta{ pod := &v1.Pod{
ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Labels: map[string]string{ Labels: map[string]string{
"name": "foo", "name": "foo",
"time": value, "time": value,
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
InitContainers: []api.Container{ InitContainers: []v1.Container{
{ {
Name: "init1", Name: "init1",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
@ -188,22 +202,25 @@ var _ = framework.KubeDescribe("InitContainer", func() {
Command: []string{"/bin/true"}, Command: []string{"/bin/true"},
}, },
}, },
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "run1", Name: "run1",
Image: framework.GetPauseImageName(f.ClientSet), Image: framework.GetPauseImageName(f.ClientSet),
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Limits: api.ResourceList{ Limits: v1.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
}, },
}, },
}, },
}, },
}, },
} }
if err := podutil.SetInitContainersAnnotations(pod); err != nil {
Expect(err).To(BeNil())
}
startedPod := podClient.Create(pod) startedPod := podClient.Create(pod)
w, err := podClient.Watch(api.SingleObject(startedPod.ObjectMeta)) w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod") Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w) wr := watch.NewRecorder(w)
@ -212,7 +229,10 @@ var _ = framework.KubeDescribe("InitContainer", func() {
// check for the first container to fail at least once // check for the first container to fail at least once
func(evt watch.Event) (bool, error) { func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) { switch t := evt.Object.(type) {
case *api.Pod: case *v1.Pod:
if err := podutil.SetInitContainersAndStatuses(t); err != nil {
Expect(err).To(BeNil())
}
for _, status := range t.Status.ContainerStatuses { for _, status := range t.Status.ContainerStatuses {
if status.State.Waiting == nil { if status.State.Waiting == nil {
return false, fmt.Errorf("container %q should not be out of waiting: %#v", status.Name, status) return false, fmt.Errorf("container %q should not be out of waiting: %#v", status.Name, status)
@ -244,7 +264,10 @@ var _ = framework.KubeDescribe("InitContainer", func() {
// verify we get two restarts // verify we get two restarts
func(evt watch.Event) (bool, error) { func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) { switch t := evt.Object.(type) {
case *api.Pod: case *v1.Pod:
if err := podutil.SetInitContainersAndStatuses(t); err != nil {
Expect(err).To(BeNil())
}
status := t.Status.InitContainerStatuses[0] status := t.Status.InitContainerStatuses[0]
if status.RestartCount < 3 { if status.RestartCount < 3 {
return false, nil return false, nil
@ -259,12 +282,15 @@ var _ = framework.KubeDescribe("InitContainer", func() {
) )
Expect(err).To(BeNil()) Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant) framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*api.Pod) endPod := event.Object.(*v1.Pod)
if err := podutil.SetInitContainersAndStatuses(endPod); err != nil {
Expect(err).To(BeNil())
}
Expect(endPod.Status.Phase).To(Equal(api.PodPending)) Expect(endPod.Status.Phase).To(Equal(v1.PodPending))
_, init := api.GetPodCondition(&endPod.Status, api.PodInitialized) _, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil()) Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(api.ConditionFalse)) Expect(init.Status).To(Equal(v1.ConditionFalse))
Expect(init.Reason).To(Equal("ContainersNotInitialized")) Expect(init.Reason).To(Equal("ContainersNotInitialized"))
Expect(init.Message).To(Equal("containers with incomplete status: [init1 init2]")) Expect(init.Message).To(Equal("containers with incomplete status: [init1 init2]"))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2)) Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
@ -276,17 +302,17 @@ var _ = framework.KubeDescribe("InitContainer", func() {
By("creating the pod") By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID()) name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond()) value := strconv.Itoa(time.Now().Nanosecond())
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Labels: map[string]string{ Labels: map[string]string{
"name": "foo", "name": "foo",
"time": value, "time": value,
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
InitContainers: []api.Container{ InitContainers: []v1.Container{
{ {
Name: "init1", Name: "init1",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
@ -298,24 +324,27 @@ var _ = framework.KubeDescribe("InitContainer", func() {
Command: []string{"/bin/false"}, Command: []string{"/bin/false"},
}, },
}, },
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "run1", Name: "run1",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/true"}, Command: []string{"/bin/true"},
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Limits: api.ResourceList{ Limits: v1.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI), v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
}, },
}, },
}, },
}, },
}, },
} }
if err := podutil.SetInitContainersAnnotations(pod); err != nil {
Expect(err).To(BeNil())
}
startedPod := podClient.Create(pod) startedPod := podClient.Create(pod)
w, err := podClient.Watch(api.SingleObject(startedPod.ObjectMeta)) w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod") Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w) wr := watch.NewRecorder(w)
@ -324,7 +353,10 @@ var _ = framework.KubeDescribe("InitContainer", func() {
// check for the second container to fail at least once // check for the second container to fail at least once
func(evt watch.Event) (bool, error) { func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) { switch t := evt.Object.(type) {
case *api.Pod: case *v1.Pod:
if err := podutil.SetInitContainersAndStatuses(t); err != nil {
Expect(err).To(BeNil())
}
for _, status := range t.Status.ContainerStatuses { for _, status := range t.Status.ContainerStatuses {
if status.State.Waiting == nil { if status.State.Waiting == nil {
return false, fmt.Errorf("container %q should not be out of waiting: %#v", status.Name, status) return false, fmt.Errorf("container %q should not be out of waiting: %#v", status.Name, status)
@ -358,16 +390,16 @@ var _ = framework.KubeDescribe("InitContainer", func() {
return false, fmt.Errorf("unexpected object: %#v", t) return false, fmt.Errorf("unexpected object: %#v", t)
} }
}, },
client.PodCompleted, conditions.PodCompleted,
) )
Expect(err).To(BeNil()) Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant) framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
endPod := event.Object.(*api.Pod) endPod := event.Object.(*v1.Pod)
Expect(endPod.Status.Phase).To(Equal(api.PodFailed)) Expect(endPod.Status.Phase).To(Equal(v1.PodFailed))
_, init := api.GetPodCondition(&endPod.Status, api.PodInitialized) _, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil()) Expect(init).NotTo(BeNil())
Expect(init.Status).To(Equal(api.ConditionFalse)) Expect(init.Status).To(Equal(v1.ConditionFalse))
Expect(init.Reason).To(Equal("ContainersNotInitialized")) Expect(init.Reason).To(Equal("ContainersNotInitialized"))
Expect(init.Message).To(Equal("containers with incomplete status: [init2]")) Expect(init.Message).To(Equal("containers with incomplete status: [init2]"))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2)) Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))

View File

@ -22,7 +22,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
api "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )
@ -34,8 +34,8 @@ const (
) )
type KubeletManagedHostConfig struct { type KubeletManagedHostConfig struct {
hostNetworkPod *api.Pod hostNetworkPod *v1.Pod
pod *api.Pod pod *v1.Pod
f *framework.Framework f *framework.Framework
} }
@ -128,17 +128,17 @@ func (config *KubeletManagedHostConfig) getEtcHostsContent(podName, containerNam
return config.f.ExecCommandInContainer(podName, containerName, "cat", "/etc/hosts") return config.f.ExecCommandInContainer(podName, containerName, "cat", "/etc/hosts")
} }
func (config *KubeletManagedHostConfig) createPodSpec(podName string) *api.Pod { func (config *KubeletManagedHostConfig) createPodSpec(podName string) *v1.Pod {
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "busybox-1", Name: "busybox-1",
Image: etcHostsImageName, Image: etcHostsImageName,
ImagePullPolicy: api.PullIfNotPresent, ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{ Command: []string{
"sleep", "sleep",
"900", "900",
@ -147,7 +147,7 @@ func (config *KubeletManagedHostConfig) createPodSpec(podName string) *api.Pod {
{ {
Name: "busybox-2", Name: "busybox-2",
Image: etcHostsImageName, Image: etcHostsImageName,
ImagePullPolicy: api.PullIfNotPresent, ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{ Command: []string{
"sleep", "sleep",
"900", "900",
@ -156,12 +156,12 @@ func (config *KubeletManagedHostConfig) createPodSpec(podName string) *api.Pod {
{ {
Name: "busybox-3", Name: "busybox-3",
Image: etcHostsImageName, Image: etcHostsImageName,
ImagePullPolicy: api.PullIfNotPresent, ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{ Command: []string{
"sleep", "sleep",
"900", "900",
}, },
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: "host-etc-hosts", Name: "host-etc-hosts",
MountPath: "/etc/hosts", MountPath: "/etc/hosts",
@ -169,11 +169,11 @@ func (config *KubeletManagedHostConfig) createPodSpec(podName string) *api.Pod {
}, },
}, },
}, },
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: "host-etc-hosts", Name: "host-etc-hosts",
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
HostPath: &api.HostPathVolumeSource{ HostPath: &v1.HostPathVolumeSource{
Path: "/etc/hosts", Path: "/etc/hosts",
}, },
}, },
@ -184,20 +184,19 @@ func (config *KubeletManagedHostConfig) createPodSpec(podName string) *api.Pod {
return pod return pod
} }
func (config *KubeletManagedHostConfig) createPodSpecWithHostNetwork(podName string) *api.Pod { func (config *KubeletManagedHostConfig) createPodSpecWithHostNetwork(podName string) *v1.Pod {
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
SecurityContext: &api.PodSecurityContext{ HostNetwork: true,
HostNetwork: true, SecurityContext: &v1.PodSecurityContext{},
}, Containers: []v1.Container{
Containers: []api.Container{
{ {
Name: "busybox-1", Name: "busybox-1",
Image: etcHostsImageName, Image: etcHostsImageName,
ImagePullPolicy: api.PullIfNotPresent, ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{ Command: []string{
"sleep", "sleep",
"900", "900",
@ -206,7 +205,7 @@ func (config *KubeletManagedHostConfig) createPodSpecWithHostNetwork(podName str
{ {
Name: "busybox-2", Name: "busybox-2",
Image: etcHostsImageName, Image: etcHostsImageName,
ImagePullPolicy: api.PullIfNotPresent, ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{ Command: []string{
"sleep", "sleep",
"900", "900",

View File

@ -26,7 +26,7 @@ import (
"golang.org/x/net/websocket" "golang.org/x/net/websocket"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet" "k8s.io/kubernetes/pkg/kubelet"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
@ -46,7 +46,7 @@ var (
) )
// testHostIP tests that a pod gets a host IP // testHostIP tests that a pod gets a host IP
func testHostIP(podClient *framework.PodClient, pod *api.Pod) { func testHostIP(podClient *framework.PodClient, pod *v1.Pod) {
By("creating pod") By("creating pod")
podClient.CreateSync(pod) podClient.CreateSync(pod)
@ -69,7 +69,7 @@ func testHostIP(podClient *framework.PodClient, pod *api.Pod) {
} }
} }
func startPodAndGetBackOffs(podClient *framework.PodClient, pod *api.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) { func startPodAndGetBackOffs(podClient *framework.PodClient, pod *v1.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) {
podClient.CreateSync(pod) podClient.CreateSync(pod)
time.Sleep(sleepAmount) time.Sleep(sleepAmount)
Expect(pod.Spec.Containers).NotTo(BeEmpty()) Expect(pod.Spec.Containers).NotTo(BeEmpty())
@ -102,7 +102,7 @@ func getRestartDelay(podClient *framework.PodClient, podName string, containerNa
time.Sleep(time.Second) time.Sleep(time.Second)
pod, err := podClient.Get(podName) pod, err := podClient.Get(podName)
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName)) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
status, ok := api.GetContainerStatus(pod.Status.ContainerStatuses, containerName) status, ok := v1.GetContainerStatus(pod.Status.ContainerStatuses, containerName)
if !ok { if !ok {
framework.Logf("getRestartDelay: status missing") framework.Logf("getRestartDelay: status missing")
continue continue
@ -127,12 +127,12 @@ var _ = framework.KubeDescribe("Pods", func() {
It("should get a host IP [Conformance]", func() { It("should get a host IP [Conformance]", func() {
name := "pod-hostip-" + string(uuid.NewUUID()) name := "pod-hostip-" + string(uuid.NewUUID())
testHostIP(podClient, &api.Pod{ testHostIP(podClient, &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "test", Name: "test",
Image: framework.GetPauseImageName(f.ClientSet), Image: framework.GetPauseImageName(f.ClientSet),
@ -146,16 +146,16 @@ var _ = framework.KubeDescribe("Pods", func() {
By("creating the pod") By("creating the pod")
name := "pod-submit-remove-" + string(uuid.NewUUID()) name := "pod-submit-remove-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond()) value := strconv.Itoa(time.Now().Nanosecond())
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Labels: map[string]string{ Labels: map[string]string{
"name": "foo", "name": "foo",
"time": value, "time": value,
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "nginx", Name: "nginx",
Image: "gcr.io/google_containers/nginx-slim:0.7", Image: "gcr.io/google_containers/nginx-slim:0.7",
@ -166,12 +166,12 @@ var _ = framework.KubeDescribe("Pods", func() {
By("setting up watch") By("setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := api.ListOptions{LabelSelector: selector} options := v1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options) pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods") Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(0)) Expect(len(pods.Items)).To(Equal(0))
options = api.ListOptions{ options = v1.ListOptions{
LabelSelector: selector, LabelSelector: selector.String(),
ResourceVersion: pods.ListMeta.ResourceVersion, ResourceVersion: pods.ListMeta.ResourceVersion,
} }
w, err := podClient.Watch(options) w, err := podClient.Watch(options)
@ -182,7 +182,7 @@ var _ = framework.KubeDescribe("Pods", func() {
By("verifying the pod is in kubernetes") By("verifying the pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = api.ListOptions{LabelSelector: selector} options = v1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options) pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods") Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(1)) Expect(len(pods.Items)).To(Equal(1))
@ -206,7 +206,7 @@ var _ = framework.KubeDescribe("Pods", func() {
framework.Logf("running pod: %#v", pod) framework.Logf("running pod: %#v", pod)
By("deleting the pod gracefully") By("deleting the pod gracefully")
err = podClient.Delete(pod.Name, api.NewDeleteOptions(30)) err = podClient.Delete(pod.Name, v1.NewDeleteOptions(30))
Expect(err).NotTo(HaveOccurred(), "failed to delete pod") Expect(err).NotTo(HaveOccurred(), "failed to delete pod")
By("verifying the kubelet observed the termination notice") By("verifying the kubelet observed the termination notice")
@ -233,13 +233,13 @@ var _ = framework.KubeDescribe("Pods", func() {
By("verifying pod deletion was observed") By("verifying pod deletion was observed")
deleted := false deleted := false
timeout := false timeout := false
var lastPod *api.Pod var lastPod *v1.Pod
timer := time.After(30 * time.Second) timer := time.After(30 * time.Second)
for !deleted && !timeout { for !deleted && !timeout {
select { select {
case event, _ := <-w.ResultChan(): case event, _ := <-w.ResultChan():
if event.Type == watch.Deleted { if event.Type == watch.Deleted {
lastPod = event.Object.(*api.Pod) lastPod = event.Object.(*v1.Pod)
deleted = true deleted = true
} }
case <-timer: case <-timer:
@ -254,7 +254,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero()) Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero())
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = api.ListOptions{LabelSelector: selector} options = v1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options) pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods") Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(0)) Expect(len(pods.Items)).To(Equal(0))
@ -264,16 +264,16 @@ var _ = framework.KubeDescribe("Pods", func() {
By("creating the pod") By("creating the pod")
name := "pod-update-" + string(uuid.NewUUID()) name := "pod-update-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond()) value := strconv.Itoa(time.Now().Nanosecond())
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Labels: map[string]string{ Labels: map[string]string{
"name": "foo", "name": "foo",
"time": value, "time": value,
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "nginx", Name: "nginx",
Image: "gcr.io/google_containers/nginx-slim:0.7", Image: "gcr.io/google_containers/nginx-slim:0.7",
@ -287,13 +287,13 @@ var _ = framework.KubeDescribe("Pods", func() {
By("verifying the pod is in kubernetes") By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := api.ListOptions{LabelSelector: selector} options := v1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options) pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods") Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(1)) Expect(len(pods.Items)).To(Equal(1))
By("updating the pod") By("updating the pod")
podClient.Update(name, func(pod *api.Pod) { podClient.Update(name, func(pod *v1.Pod) {
value = strconv.Itoa(time.Now().Nanosecond()) value = strconv.Itoa(time.Now().Nanosecond())
pod.Labels["time"] = value pod.Labels["time"] = value
}) })
@ -302,7 +302,7 @@ var _ = framework.KubeDescribe("Pods", func() {
By("verifying the updated pod is in kubernetes") By("verifying the updated pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = api.ListOptions{LabelSelector: selector} options = v1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options) pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods") Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(1)) Expect(len(pods.Items)).To(Equal(1))
@ -313,16 +313,16 @@ var _ = framework.KubeDescribe("Pods", func() {
By("creating the pod") By("creating the pod")
name := "pod-update-activedeadlineseconds-" + string(uuid.NewUUID()) name := "pod-update-activedeadlineseconds-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond()) value := strconv.Itoa(time.Now().Nanosecond())
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Labels: map[string]string{ Labels: map[string]string{
"name": "foo", "name": "foo",
"time": value, "time": value,
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "nginx", Name: "nginx",
Image: "gcr.io/google_containers/nginx-slim:0.7", Image: "gcr.io/google_containers/nginx-slim:0.7",
@ -336,13 +336,13 @@ var _ = framework.KubeDescribe("Pods", func() {
By("verifying the pod is in kubernetes") By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := api.ListOptions{LabelSelector: selector} options := v1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options) pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods") Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(1)) Expect(len(pods.Items)).To(Equal(1))
By("updating the pod") By("updating the pod")
podClient.Update(name, func(pod *api.Pod) { podClient.Update(name, func(pod *v1.Pod) {
newDeadline := int64(5) newDeadline := int64(5)
pod.Spec.ActiveDeadlineSeconds = &newDeadline pod.Spec.ActiveDeadlineSeconds = &newDeadline
}) })
@ -354,17 +354,17 @@ var _ = framework.KubeDescribe("Pods", func() {
// Make a pod that will be a service. // Make a pod that will be a service.
// This pod serves its hostname via HTTP. // This pod serves its hostname via HTTP.
serverName := "server-envvars-" + string(uuid.NewUUID()) serverName := "server-envvars-" + string(uuid.NewUUID())
serverPod := &api.Pod{ serverPod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: serverName, Name: serverName,
Labels: map[string]string{"name": serverName}, Labels: map[string]string{"name": serverName},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "srv", Name: "srv",
Image: "gcr.io/google_containers/serve_hostname:v1.4", Image: "gcr.io/google_containers/serve_hostname:v1.4",
Ports: []api.ContainerPort{{ContainerPort: 9376}}, Ports: []v1.ContainerPort{{ContainerPort: 9376}},
}, },
}, },
}, },
@ -379,15 +379,15 @@ var _ = framework.KubeDescribe("Pods", func() {
// to match the service. Another is to rethink environment variable names and possibly // to match the service. Another is to rethink environment variable names and possibly
// allow overriding the prefix in the service manifest. // allow overriding the prefix in the service manifest.
svcName := "fooservice" svcName := "fooservice"
svc := &api.Service{ svc := &v1.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: svcName, Name: svcName,
Labels: map[string]string{ Labels: map[string]string{
"name": svcName, "name": svcName,
}, },
}, },
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Ports: []api.ServicePort{{ Ports: []v1.ServicePort{{
Port: 8765, Port: 8765,
TargetPort: intstr.FromInt(8080), TargetPort: intstr.FromInt(8080),
}}, }},
@ -402,20 +402,20 @@ var _ = framework.KubeDescribe("Pods", func() {
// Make a client pod that verifies that it has the service environment variables. // Make a client pod that verifies that it has the service environment variables.
podName := "client-envvars-" + string(uuid.NewUUID()) podName := "client-envvars-" + string(uuid.NewUUID())
const containerName = "env3cont" const containerName = "env3cont"
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Labels: map[string]string{"name": podName}, Labels: map[string]string{"name": podName},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: containerName, Name: containerName,
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"}, Command: []string{"sh", "-c", "env"},
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
@ -442,12 +442,12 @@ var _ = framework.KubeDescribe("Pods", func() {
By("creating the pod") By("creating the pod")
name := "pod-exec-websocket-" + string(uuid.NewUUID()) name := "pod-exec-websocket-" + string(uuid.NewUUID())
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "main", Name: "main",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
@ -512,12 +512,12 @@ var _ = framework.KubeDescribe("Pods", func() {
By("creating the pod") By("creating the pod")
name := "pod-logs-websocket-" + string(uuid.NewUUID()) name := "pod-logs-websocket-" + string(uuid.NewUUID())
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "main", Name: "main",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
@ -566,13 +566,13 @@ var _ = framework.KubeDescribe("Pods", func() {
It("should have their auto-restart back-off timer reset on image update [Slow]", func() { It("should have their auto-restart back-off timer reset on image update [Slow]", func() {
podName := "pod-back-off-image" podName := "pod-back-off-image"
containerName := "back-off" containerName := "back-off"
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Labels: map[string]string{"test": "back-off-image"}, Labels: map[string]string{"test": "back-off-image"},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: containerName, Name: containerName,
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
@ -585,7 +585,7 @@ var _ = framework.KubeDescribe("Pods", func() {
delay1, delay2 := startPodAndGetBackOffs(podClient, pod, buildBackOffDuration) delay1, delay2 := startPodAndGetBackOffs(podClient, pod, buildBackOffDuration)
By("updating the image") By("updating the image")
podClient.Update(podName, func(pod *api.Pod) { podClient.Update(podName, func(pod *v1.Pod) {
pod.Spec.Containers[0].Image = "gcr.io/google_containers/nginx-slim:0.7" pod.Spec.Containers[0].Image = "gcr.io/google_containers/nginx-slim:0.7"
}) })
@ -607,13 +607,13 @@ var _ = framework.KubeDescribe("Pods", func() {
It("should cap back-off at MaxContainerBackOff [Slow]", func() { It("should cap back-off at MaxContainerBackOff [Slow]", func() {
podName := "back-off-cap" podName := "back-off-cap"
containerName := "back-off-cap" containerName := "back-off-cap"
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Labels: map[string]string{"test": "liveness"}, Labels: map[string]string{"test": "liveness"},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: containerName, Name: containerName,
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",

View File

@ -23,7 +23,7 @@ import (
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )
@ -40,9 +40,9 @@ const (
) )
type PrivilegedPodTestConfig struct { type PrivilegedPodTestConfig struct {
privilegedPod *api.Pod privilegedPod *v1.Pod
f *framework.Framework f *framework.Framework
hostExecPod *api.Pod hostExecPod *v1.Pod
} }
var _ = framework.KubeDescribe("PrivilegedPod", func() { var _ = framework.KubeDescribe("PrivilegedPod", func() {
@ -96,21 +96,21 @@ func (config *PrivilegedPodTestConfig) dialFromContainer(containerIP string, con
return output return output
} }
func (config *PrivilegedPodTestConfig) createPrivilegedPodSpec() *api.Pod { func (config *PrivilegedPodTestConfig) createPrivilegedPodSpec() *v1.Pod {
isPrivileged := true isPrivileged := true
notPrivileged := false notPrivileged := false
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: privilegedPodName, Name: privilegedPodName,
Namespace: config.f.Namespace.Name, Namespace: config.f.Namespace.Name,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: privilegedContainerName, Name: privilegedContainerName,
Image: privilegedContainerImage, Image: privilegedContainerImage,
ImagePullPolicy: api.PullIfNotPresent, ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: &api.SecurityContext{Privileged: &isPrivileged}, SecurityContext: &v1.SecurityContext{Privileged: &isPrivileged},
Command: []string{ Command: []string{
"/netexec", "/netexec",
fmt.Sprintf("--http-port=%d", privilegedHttpPort), fmt.Sprintf("--http-port=%d", privilegedHttpPort),
@ -120,8 +120,8 @@ func (config *PrivilegedPodTestConfig) createPrivilegedPodSpec() *api.Pod {
{ {
Name: notPrivilegedContainerName, Name: notPrivilegedContainerName,
Image: privilegedContainerImage, Image: privilegedContainerImage,
ImagePullPolicy: api.PullIfNotPresent, ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: &api.SecurityContext{Privileged: &notPrivileged}, SecurityContext: &v1.SecurityContext{Privileged: &notPrivileged},
Command: []string{ Command: []string{
"/netexec", "/netexec",
fmt.Sprintf("--http-port=%d", notPrivilegedHttpPort), fmt.Sprintf("--http-port=%d", notPrivilegedHttpPort),

View File

@ -20,7 +20,7 @@ import (
"fmt" "fmt"
"os" "os"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -50,7 +50,7 @@ var _ = framework.KubeDescribe("Secrets", func() {
It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace", func() { It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace", func() {
var ( var (
namespace2 *api.Namespace namespace2 *v1.Namespace
err error err error
secret2Name = "secret-test-" + string(uuid.NewUUID()) secret2Name = "secret-test-" + string(uuid.NewUUID())
) )
@ -88,37 +88,37 @@ var _ = framework.KubeDescribe("Secrets", func() {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()), Name: "pod-secrets-" + string(uuid.NewUUID()),
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: volumeName, Name: volumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
Secret: &api.SecretVolumeSource{ Secret: &v1.SecretVolumeSource{
SecretName: name, SecretName: name,
}, },
}, },
}, },
{ {
Name: volumeName2, Name: volumeName2,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
Secret: &api.SecretVolumeSource{ Secret: &v1.SecretVolumeSource{
SecretName: name, SecretName: name,
}, },
}, },
}, },
}, },
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "secret-volume-test", Name: "secret-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7", Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{ Args: []string{
"--file_content=/etc/secret-volume/data-1", "--file_content=/etc/secret-volume/data-1",
"--file_mode=/etc/secret-volume/data-1"}, "--file_mode=/etc/secret-volume/data-1"},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: volumeName, Name: volumeName,
MountPath: volumeMountPath, MountPath: volumeMountPath,
@ -132,7 +132,7 @@ var _ = framework.KubeDescribe("Secrets", func() {
}, },
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
@ -152,22 +152,22 @@ var _ = framework.KubeDescribe("Secrets", func() {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()), Name: "pod-secrets-" + string(uuid.NewUUID()),
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "secret-env-test", Name: "secret-env-test",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"}, Command: []string{"sh", "-c", "env"},
Env: []api.EnvVar{ Env: []v1.EnvVar{
{ {
Name: "SECRET_DATA", Name: "SECRET_DATA",
ValueFrom: &api.EnvVarSource{ ValueFrom: &v1.EnvVarSource{
SecretKeyRef: &api.SecretKeySelector{ SecretKeyRef: &v1.SecretKeySelector{
LocalObjectReference: api.LocalObjectReference{ LocalObjectReference: v1.LocalObjectReference{
Name: name, Name: name,
}, },
Key: "data-1", Key: "data-1",
@ -177,7 +177,7 @@ var _ = framework.KubeDescribe("Secrets", func() {
}, },
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
@ -187,9 +187,9 @@ var _ = framework.KubeDescribe("Secrets", func() {
}) })
}) })
func secretForTest(namespace, name string) *api.Secret { func secretForTest(namespace, name string) *v1.Secret {
return &api.Secret{ return &v1.Secret{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Namespace: namespace, Namespace: namespace,
Name: name, Name: name,
}, },
@ -214,30 +214,30 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()), Name: "pod-secrets-" + string(uuid.NewUUID()),
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: volumeName, Name: volumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
Secret: &api.SecretVolumeSource{ Secret: &v1.SecretVolumeSource{
SecretName: secretName, SecretName: secretName,
}, },
}, },
}, },
}, },
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "secret-volume-test", Name: "secret-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7", Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{ Args: []string{
"--file_content=/etc/secret-volume/data-1", "--file_content=/etc/secret-volume/data-1",
"--file_mode=/etc/secret-volume/data-1"}, "--file_mode=/etc/secret-volume/data-1"},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: volumeName, Name: volumeName,
MountPath: volumeMountPath, MountPath: volumeMountPath,
@ -245,7 +245,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre
}, },
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
@ -279,18 +279,18 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
framework.Failf("unable to create test secret %s: %v", secret.Name, err) framework.Failf("unable to create test secret %s: %v", secret.Name, err)
} }
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()), Name: "pod-secrets-" + string(uuid.NewUUID()),
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: volumeName, Name: volumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
Secret: &api.SecretVolumeSource{ Secret: &v1.SecretVolumeSource{
SecretName: name, SecretName: name,
Items: []api.KeyToPath{ Items: []v1.KeyToPath{
{ {
Key: "data-1", Key: "data-1",
Path: "new-path-data-1", Path: "new-path-data-1",
@ -300,14 +300,14 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
}, },
}, },
}, },
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "secret-volume-test", Name: "secret-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7", Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{ Args: []string{
"--file_content=/etc/secret-volume/new-path-data-1", "--file_content=/etc/secret-volume/new-path-data-1",
"--file_mode=/etc/secret-volume/new-path-data-1"}, "--file_mode=/etc/secret-volume/new-path-data-1"},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: volumeName, Name: volumeName,
MountPath: volumeMountPath, MountPath: volumeMountPath,
@ -315,7 +315,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
}, },
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }

View File

@ -19,7 +19,7 @@ package common
import ( import (
"fmt" "fmt"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/sysctl" "k8s.io/kubernetes/pkg/kubelet/sysctl"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
@ -34,29 +34,29 @@ var _ = framework.KubeDescribe("Sysctls", func() {
f := framework.NewDefaultFramework("sysctl") f := framework.NewDefaultFramework("sysctl")
var podClient *framework.PodClient var podClient *framework.PodClient
testPod := func() *api.Pod { testPod := func() *v1.Pod {
podName := "sysctl-" + string(uuid.NewUUID()) podName := "sysctl-" + string(uuid.NewUUID())
pod := api.Pod{ pod := v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Annotations: map[string]string{}, Annotations: map[string]string{},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "test-container", Name: "test-container",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
return &pod return &pod
} }
waitForPodErrorEventOrStarted := func(pod *api.Pod) (*api.Event, error) { waitForPodErrorEventOrStarted := func(pod *v1.Pod) (*v1.Event, error) {
var ev *api.Event var ev *v1.Event
err := wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) { err := wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
evnts, err := f.ClientSet.Core().Events(pod.Namespace).Search(pod) evnts, err := f.ClientSet.Core().Events(pod.Namespace).Search(pod)
if err != nil { if err != nil {
@ -82,7 +82,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
It("should support sysctls", func() { It("should support sysctls", func() {
pod := testPod() pod := testPod()
pod.Annotations[api.SysctlsPodAnnotationKey] = api.PodAnnotationsFromSysctls([]api.Sysctl{ pod.Annotations[v1.SysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{ {
Name: "kernel.shm_rmid_forced", Name: "kernel.shm_rmid_forced",
Value: "1", Value: "1",
@ -111,7 +111,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Checking that the pod succeeded") By("Checking that the pod succeeded")
Expect(pod.Status.Phase).To(Equal(api.PodSucceeded)) Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
By("Getting logs from the pod") By("Getting logs from the pod")
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
@ -123,7 +123,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
It("should support unsafe sysctls which are actually whitelisted", func() { It("should support unsafe sysctls which are actually whitelisted", func() {
pod := testPod() pod := testPod()
pod.Annotations[api.UnsafeSysctlsPodAnnotationKey] = api.PodAnnotationsFromSysctls([]api.Sysctl{ pod.Annotations[v1.UnsafeSysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{ {
Name: "kernel.shm_rmid_forced", Name: "kernel.shm_rmid_forced",
Value: "1", Value: "1",
@ -152,7 +152,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Checking that the pod succeeded") By("Checking that the pod succeeded")
Expect(pod.Status.Phase).To(Equal(api.PodSucceeded)) Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
By("Getting logs from the pod") By("Getting logs from the pod")
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name) log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
@ -164,7 +164,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
It("should reject invalid sysctls", func() { It("should reject invalid sysctls", func() {
pod := testPod() pod := testPod()
pod.Annotations[api.SysctlsPodAnnotationKey] = api.PodAnnotationsFromSysctls([]api.Sysctl{ pod.Annotations[v1.SysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{ {
Name: "foo-", Name: "foo-",
Value: "bar", Value: "bar",
@ -178,7 +178,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
Value: "100000000", Value: "100000000",
}, },
}) })
pod.Annotations[api.UnsafeSysctlsPodAnnotationKey] = api.PodAnnotationsFromSysctls([]api.Sysctl{ pod.Annotations[v1.UnsafeSysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{ {
Name: "kernel.shmall", Name: "kernel.shmall",
Value: "100000000", Value: "100000000",
@ -206,7 +206,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
It("should not launch unsafe, but not explicitly enabled sysctls on the node", func() { It("should not launch unsafe, but not explicitly enabled sysctls on the node", func() {
pod := testPod() pod := testPod()
pod.Annotations[api.SysctlsPodAnnotationKey] = api.PodAnnotationsFromSysctls([]api.Sysctl{ pod.Annotations[v1.SysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{ {
Name: "kernel.msgmax", Name: "kernel.msgmax",
Value: "10000000000", Value: "10000000000",

View File

@ -47,10 +47,10 @@ import (
"strings" "strings"
"time" "time"
"k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors" apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"github.com/golang/glog" "github.com/golang/glog"
@ -79,31 +79,31 @@ type VolumeTestConfig struct {
// Starts a container specified by config.serverImage and exports all // Starts a container specified by config.serverImage and exports all
// config.serverPorts from it. The returned pod should be used to get the server // config.serverPorts from it. The returned pod should be used to get the server
// IP address and create appropriate VolumeSource. // IP address and create appropriate VolumeSource.
func startVolumeServer(f *framework.Framework, config VolumeTestConfig) *api.Pod { func startVolumeServer(f *framework.Framework, config VolumeTestConfig) *v1.Pod {
podClient := f.PodClient() podClient := f.PodClient()
portCount := len(config.serverPorts) portCount := len(config.serverPorts)
serverPodPorts := make([]api.ContainerPort, portCount) serverPodPorts := make([]v1.ContainerPort, portCount)
for i := 0; i < portCount; i++ { for i := 0; i < portCount; i++ {
portName := fmt.Sprintf("%s-%d", config.prefix, i) portName := fmt.Sprintf("%s-%d", config.prefix, i)
serverPodPorts[i] = api.ContainerPort{ serverPodPorts[i] = v1.ContainerPort{
Name: portName, Name: portName,
ContainerPort: int32(config.serverPorts[i]), ContainerPort: int32(config.serverPorts[i]),
Protocol: api.ProtocolTCP, Protocol: v1.ProtocolTCP,
} }
} }
volumeCount := len(config.volumes) volumeCount := len(config.volumes)
volumes := make([]api.Volume, volumeCount) volumes := make([]v1.Volume, volumeCount)
mounts := make([]api.VolumeMount, volumeCount) mounts := make([]v1.VolumeMount, volumeCount)
i := 0 i := 0
for src, dst := range config.volumes { for src, dst := range config.volumes {
mountName := fmt.Sprintf("path%d", i) mountName := fmt.Sprintf("path%d", i)
volumes[i].Name = mountName volumes[i].Name = mountName
volumes[i].VolumeSource.HostPath = &api.HostPathVolumeSource{ volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
Path: src, Path: src,
} }
@ -117,24 +117,24 @@ func startVolumeServer(f *framework.Framework, config VolumeTestConfig) *api.Pod
By(fmt.Sprint("creating ", config.prefix, " server pod")) By(fmt.Sprint("creating ", config.prefix, " server pod"))
privileged := new(bool) privileged := new(bool)
*privileged = true *privileged = true
serverPod := &api.Pod{ serverPod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "Pod", Kind: "Pod",
APIVersion: "v1", APIVersion: "v1",
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-server", Name: config.prefix + "-server",
Labels: map[string]string{ Labels: map[string]string{
"role": config.prefix + "-server", "role": config.prefix + "-server",
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: config.prefix + "-server", Name: config.prefix + "-server",
Image: config.serverImage, Image: config.serverImage,
SecurityContext: &api.SecurityContext{ SecurityContext: &v1.SecurityContext{
Privileged: privileged, Privileged: privileged,
}, },
Args: config.serverArgs, Args: config.serverArgs,
@ -191,21 +191,21 @@ func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) {
// Start a client pod using given VolumeSource (exported by startVolumeServer()) // Start a client pod using given VolumeSource (exported by startVolumeServer())
// and check that the pod sees the data from the server pod. // and check that the pod sees the data from the server pod.
func testVolumeClient(f *framework.Framework, config VolumeTestConfig, volume api.VolumeSource, fsGroup *int64, expectedContent string) { func testVolumeClient(f *framework.Framework, config VolumeTestConfig, volume v1.VolumeSource, fsGroup *int64, expectedContent string) {
By(fmt.Sprint("starting ", config.prefix, " client")) By(fmt.Sprint("starting ", config.prefix, " client"))
clientPod := &api.Pod{ clientPod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "Pod", Kind: "Pod",
APIVersion: "v1", APIVersion: "v1",
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-client", Name: config.prefix + "-client",
Labels: map[string]string{ Labels: map[string]string{
"role": config.prefix + "-client", "role": config.prefix + "-client",
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: config.prefix + "-client", Name: config.prefix + "-client",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
@ -218,7 +218,7 @@ func testVolumeClient(f *framework.Framework, config VolumeTestConfig, volume ap
"-c", "-c",
"while true ; do cat /opt/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done ", "while true ; do cat /opt/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done ",
}, },
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: config.prefix + "-volume", Name: config.prefix + "-volume",
MountPath: "/opt/", MountPath: "/opt/",
@ -226,12 +226,12 @@ func testVolumeClient(f *framework.Framework, config VolumeTestConfig, volume ap
}, },
}, },
}, },
SecurityContext: &api.PodSecurityContext{ SecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &api.SELinuxOptions{ SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1", Level: "s0:c0,c1",
}, },
}, },
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: config.prefix + "-volume", Name: config.prefix + "-volume",
VolumeSource: volume, VolumeSource: volume,
@ -265,29 +265,29 @@ func testVolumeClient(f *framework.Framework, config VolumeTestConfig, volume ap
// Insert index.html with given content into given volume. It does so by // Insert index.html with given content into given volume. It does so by
// starting and auxiliary pod which writes the file there. // starting and auxiliary pod which writes the file there.
// The volume must be writable. // The volume must be writable.
func injectHtml(client clientset.Interface, config VolumeTestConfig, volume api.VolumeSource, content string) { func injectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.VolumeSource, content string) {
By(fmt.Sprint("starting ", config.prefix, " injector")) By(fmt.Sprint("starting ", config.prefix, " injector"))
podClient := client.Core().Pods(config.namespace) podClient := client.Core().Pods(config.namespace)
injectPod := &api.Pod{ injectPod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "Pod", Kind: "Pod",
APIVersion: "v1", APIVersion: "v1",
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-injector", Name: config.prefix + "-injector",
Labels: map[string]string{ Labels: map[string]string{
"role": config.prefix + "-injector", "role": config.prefix + "-injector",
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: config.prefix + "-injector", Name: config.prefix + "-injector",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh"}, Command: []string{"/bin/sh"},
Args: []string{"-c", "echo '" + content + "' > /mnt/index.html && chmod o+rX /mnt /mnt/index.html"}, Args: []string{"-c", "echo '" + content + "' > /mnt/index.html && chmod o+rX /mnt /mnt/index.html"},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: config.prefix + "-volume", Name: config.prefix + "-volume",
MountPath: "/mnt", MountPath: "/mnt",
@ -295,13 +295,13 @@ func injectHtml(client clientset.Interface, config VolumeTestConfig, volume api.
}, },
}, },
}, },
SecurityContext: &api.PodSecurityContext{ SecurityContext: &v1.PodSecurityContext{
SELinuxOptions: &api.SELinuxOptions{ SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1", Level: "s0:c0,c1",
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: config.prefix + "-volume", Name: config.prefix + "-volume",
VolumeSource: volume, VolumeSource: volume,
@ -350,7 +350,7 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
// note that namespace deletion is handled by delete-namespace flag // note that namespace deletion is handled by delete-namespace flag
clean := true clean := true
// filled in BeforeEach // filled in BeforeEach
var namespace *api.Namespace var namespace *v1.Namespace
BeforeEach(func() { BeforeEach(func() {
if !isTestEnabled(f.ClientSet) { if !isTestEnabled(f.ClientSet) {
@ -381,8 +381,8 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
serverIP := pod.Status.PodIP serverIP := pod.Status.PodIP
framework.Logf("NFS server IP address: %v", serverIP) framework.Logf("NFS server IP address: %v", serverIP)
volume := api.VolumeSource{ volume := v1.VolumeSource{
NFS: &api.NFSVolumeSource{ NFS: &v1.NFSVolumeSource{
Server: serverIP, Server: serverIP,
Path: "/", Path: "/",
ReadOnly: true, ReadOnly: true,
@ -416,26 +416,26 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
framework.Logf("Gluster server IP address: %v", serverIP) framework.Logf("Gluster server IP address: %v", serverIP)
// create Endpoints for the server // create Endpoints for the server
endpoints := api.Endpoints{ endpoints := v1.Endpoints{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "Endpoints", Kind: "Endpoints",
APIVersion: "v1", APIVersion: "v1",
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-server", Name: config.prefix + "-server",
}, },
Subsets: []api.EndpointSubset{ Subsets: []v1.EndpointSubset{
{ {
Addresses: []api.EndpointAddress{ Addresses: []v1.EndpointAddress{
{ {
IP: serverIP, IP: serverIP,
}, },
}, },
Ports: []api.EndpointPort{ Ports: []v1.EndpointPort{
{ {
Name: "gluster", Name: "gluster",
Port: 24007, Port: 24007,
Protocol: api.ProtocolTCP, Protocol: v1.ProtocolTCP,
}, },
}, },
}, },
@ -454,8 +454,8 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
framework.Failf("Failed to create endpoints for Gluster server: %v", err) framework.Failf("Failed to create endpoints for Gluster server: %v", err)
} }
volume := api.VolumeSource{ volume := v1.VolumeSource{
Glusterfs: &api.GlusterfsVolumeSource{ Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: config.prefix + "-server", EndpointsName: config.prefix + "-server",
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh // 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
Path: "test_vol", Path: "test_vol",

View File

@ -23,10 +23,11 @@ import (
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/controller/job" "k8s.io/kubernetes/pkg/controller/job"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -62,7 +63,7 @@ var _ = framework.KubeDescribe("CronJob", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring at least two running jobs exists by listing jobs explicitly") By("Ensuring at least two running jobs exists by listing jobs explicitly")
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{}) jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
activeJobs := filterActiveJobs(jobs) activeJobs := filterActiveJobs(jobs)
Expect(len(activeJobs) >= 2).To(BeTrue()) Expect(len(activeJobs) >= 2).To(BeTrue())
@ -85,7 +86,7 @@ var _ = framework.KubeDescribe("CronJob", func() {
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
By("Ensuring no job exists by listing jobs explicitly") By("Ensuring no job exists by listing jobs explicitly")
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{}) jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(jobs.Items).To(HaveLen(0)) Expect(jobs.Items).To(HaveLen(0))
@ -111,7 +112,7 @@ var _ = framework.KubeDescribe("CronJob", func() {
Expect(cronJob.Status.Active).Should(HaveLen(1)) Expect(cronJob.Status.Active).Should(HaveLen(1))
By("Ensuring exaclty one running job exists by listing jobs explicitly") By("Ensuring exaclty one running job exists by listing jobs explicitly")
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{}) jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
activeJobs := filterActiveJobs(jobs) activeJobs := filterActiveJobs(jobs)
Expect(activeJobs).To(HaveLen(1)) Expect(activeJobs).To(HaveLen(1))
@ -142,7 +143,7 @@ var _ = framework.KubeDescribe("CronJob", func() {
Expect(cronJob.Status.Active).Should(HaveLen(1)) Expect(cronJob.Status.Active).Should(HaveLen(1))
By("Ensuring exaclty one running job exists by listing jobs explicitly") By("Ensuring exaclty one running job exists by listing jobs explicitly")
jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{}) jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
activeJobs := filterActiveJobs(jobs) activeJobs := filterActiveJobs(jobs)
Expect(activeJobs).To(HaveLen(1)) Expect(activeJobs).To(HaveLen(1))
@ -184,7 +185,7 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batch.ConcurrencyPo
parallelism := int32(1) parallelism := int32(1)
completions := int32(1) completions := int32(1)
sj := &batch.CronJob{ sj := &batch.CronJob{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: batch.CronJobSpec{ Spec: batch.CronJobSpec{
@ -194,22 +195,22 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batch.ConcurrencyPo
Spec: batch.JobSpec{ Spec: batch.JobSpec{
Parallelism: &parallelism, Parallelism: &parallelism,
Completions: &completions, Completions: &completions,
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
Spec: api.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: api.RestartPolicyOnFailure, RestartPolicy: v1.RestartPolicyOnFailure,
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: "data", Name: "data",
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{}, EmptyDir: &v1.EmptyDirVolumeSource{},
}, },
}, },
}, },
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "c", Name: "c",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
MountPath: "/data", MountPath: "/data",
Name: "data", Name: "data",
@ -230,21 +231,21 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batch.ConcurrencyPo
} }
func createCronJob(c clientset.Interface, ns string, cronJob *batch.CronJob) (*batch.CronJob, error) { func createCronJob(c clientset.Interface, ns string, cronJob *batch.CronJob) (*batch.CronJob, error) {
return c.Batch().CronJobs(ns).Create(cronJob) return c.BatchV2alpha1().CronJobs(ns).Create(cronJob)
} }
func getCronJob(c clientset.Interface, ns, name string) (*batch.CronJob, error) { func getCronJob(c clientset.Interface, ns, name string) (*batch.CronJob, error) {
return c.Batch().CronJobs(ns).Get(name) return c.BatchV2alpha1().CronJobs(ns).Get(name)
} }
func deleteCronJob(c clientset.Interface, ns, name string) error { func deleteCronJob(c clientset.Interface, ns, name string) error {
return c.Batch().CronJobs(ns).Delete(name, nil) return c.BatchV2alpha1().CronJobs(ns).Delete(name, nil)
} }
// Wait for at least given amount of active jobs. // Wait for at least given amount of active jobs.
func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int) error { func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
curr, err := c.Batch().CronJobs(ns).Get(cronJobName) curr, err := c.BatchV2alpha1().CronJobs(ns).Get(cronJobName)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -255,7 +256,7 @@ func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int
// Wait for no jobs to appear. // Wait for no jobs to appear.
func waitForNoJobs(c clientset.Interface, ns, jobName string) error { func waitForNoJobs(c clientset.Interface, ns, jobName string) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
curr, err := c.Batch().CronJobs(ns).Get(jobName) curr, err := c.BatchV2alpha1().CronJobs(ns).Get(jobName)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -267,7 +268,7 @@ func waitForNoJobs(c clientset.Interface, ns, jobName string) error {
// Wait for a job to be replaced with a new one. // Wait for a job to be replaced with a new one.
func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error { func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
jobs, err := c.Batch().Jobs(ns).List(api.ListOptions{}) jobs, err := c.Batch().Jobs(ns).List(v1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -284,7 +285,7 @@ func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error
// waitForJobsAtLeast waits for at least a number of jobs to appear. // waitForJobsAtLeast waits for at least a number of jobs to appear.
func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error { func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
jobs, err := c.Batch().Jobs(ns).List(api.ListOptions{}) jobs, err := c.Batch().Jobs(ns).List(v1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -295,7 +296,7 @@ func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error {
// waitForAnyFinishedJob waits for any completed job to appear. // waitForAnyFinishedJob waits for any completed job to appear.
func waitForAnyFinishedJob(c clientset.Interface, ns string) error { func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
jobs, err := c.Batch().Jobs(ns).List(api.ListOptions{}) jobs, err := c.Batch().Jobs(ns).List(v1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -311,7 +312,7 @@ func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
// checkNoUnexpectedEvents checks unexpected events didn't happen. // checkNoUnexpectedEvents checks unexpected events didn't happen.
// Currently only "UnexpectedJob" is checked. // Currently only "UnexpectedJob" is checked.
func checkNoUnexpectedEvents(c clientset.Interface, ns, cronJobName string) error { func checkNoUnexpectedEvents(c clientset.Interface, ns, cronJobName string) error {
sj, err := c.Batch().CronJobs(ns).Get(cronJobName) sj, err := c.BatchV2alpha1().CronJobs(ns).Get(cronJobName)
if err != nil { if err != nil {
return fmt.Errorf("error in getting cronjob %s/%s: %v", ns, cronJobName, err) return fmt.Errorf("error in getting cronjob %s/%s: %v", ns, cronJobName, err)
} }
@ -327,7 +328,7 @@ func checkNoUnexpectedEvents(c clientset.Interface, ns, cronJobName string) erro
return nil return nil
} }
func filterActiveJobs(jobs *batch.JobList) (active []*batch.Job) { func filterActiveJobs(jobs *batchv1.JobList) (active []*batchv1.Job) {
for i := range jobs.Items { for i := range jobs.Items {
j := jobs.Items[i] j := jobs.Items[i]
if !job.IsJobFinished(&j) { if !job.IsJobFinished(&j) {

View File

@ -21,9 +21,9 @@ import (
"strconv" "strconv"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@ -134,8 +134,8 @@ type podTracker struct {
cache.ThreadSafeStore cache.ThreadSafeStore
} }
func (p *podTracker) remember(pod *api.Pod, eventType string) { func (p *podTracker) remember(pod *v1.Pod, eventType string) {
if eventType == UPDATE && pod.Status.Phase == api.PodRunning { if eventType == UPDATE && pod.Status.Phase == v1.PodRunning {
return return
} }
p.Add(fmt.Sprintf("[%v] %v: %v", time.Now(), eventType, pod.Name), pod) p.Add(fmt.Sprintf("[%v] %v: %v", time.Now(), eventType, pod.Name), pod)
@ -147,7 +147,7 @@ func (p *podTracker) String() (msg string) {
if !exists { if !exists {
continue continue
} }
pod := obj.(*api.Pod) pod := obj.(*v1.Pod)
msg += fmt.Sprintf("%v Phase %v Host %v\n", k, pod.Status.Phase, pod.Spec.NodeName) msg += fmt.Sprintf("%v Phase %v Host %v\n", k, pod.Status.Phase, pod.Spec.NodeName)
} }
return return
@ -159,7 +159,7 @@ func newPodTracker() *podTracker {
} }
// replacePods replaces content of the store with the given pods. // replacePods replaces content of the store with the given pods.
func replacePods(pods []*api.Pod, store cache.Store) { func replacePods(pods []*v1.Pod, store cache.Store) {
found := make([]interface{}, 0, len(pods)) found := make([]interface{}, 0, len(pods))
for i := range pods { for i := range pods {
found = append(found, pods[i]) found = append(found, pods[i])
@ -170,7 +170,7 @@ func replacePods(pods []*api.Pod, store cache.Store) {
// getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector, // getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector,
// and a list of nodenames across which these containers restarted. // and a list of nodenames across which these containers restarted.
func getContainerRestarts(c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) { func getContainerRestarts(c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) {
options := api.ListOptions{LabelSelector: labelSelector} options := v1.ListOptions{LabelSelector: labelSelector.String()}
pods, err := c.Core().Pods(ns).List(options) pods, err := c.Core().Pods(ns).List(options)
framework.ExpectNoError(err) framework.ExpectNoError(err)
failedContainers := 0 failedContainers := 0
@ -205,12 +205,13 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
// All the restart tests need an rc and a watch on pods of the rc. // All the restart tests need an rc and a watch on pods of the rc.
// Additionally some of them might scale the rc during the test. // Additionally some of them might scale the rc during the test.
config = testutils.RCConfig{ config = testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
Name: rcName, InternalClient: f.InternalClientset,
Namespace: ns, Name: rcName,
Image: framework.GetPauseImageName(f.ClientSet), Namespace: ns,
Replicas: numPods, Image: framework.GetPauseImageName(f.ClientSet),
CreatedPods: &[]*api.Pod{}, Replicas: numPods,
CreatedPods: &[]*v1.Pod{},
} }
Expect(framework.RunRC(config)).NotTo(HaveOccurred()) Expect(framework.RunRC(config)).NotTo(HaveOccurred())
replacePods(*config.CreatedPods, existingPods) replacePods(*config.CreatedPods, existingPods)
@ -219,27 +220,27 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
tracker = newPodTracker() tracker = newPodTracker()
newPods, controller = cache.NewInformer( newPods, controller = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
options.LabelSelector = labelSelector options.LabelSelector = labelSelector.String()
obj, err := f.ClientSet.Core().Pods(ns).List(options) obj, err := f.ClientSet.Core().Pods(ns).List(options)
return runtime.Object(obj), err return runtime.Object(obj), err
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
options.LabelSelector = labelSelector options.LabelSelector = labelSelector.String()
return f.ClientSet.Core().Pods(ns).Watch(options) return f.ClientSet.Core().Pods(ns).Watch(options)
}, },
}, },
&api.Pod{}, &v1.Pod{},
0, 0,
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
tracker.remember(obj.(*api.Pod), ADD) tracker.remember(obj.(*v1.Pod), ADD)
}, },
UpdateFunc: func(oldObj, newObj interface{}) { UpdateFunc: func(oldObj, newObj interface{}) {
tracker.remember(newObj.(*api.Pod), UPDATE) tracker.remember(newObj.(*v1.Pod), UPDATE)
}, },
DeleteFunc: func(obj interface{}) { DeleteFunc: func(obj interface{}) {
tracker.remember(obj.(*api.Pod), DEL) tracker.remember(obj.(*v1.Pod), DEL)
}, },
}, },
) )
@ -263,7 +264,7 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC // that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
// to the same size achieves this, because the scale operation advances the RC's sequence number // to the same size achieves this, because the scale operation advances the RC's sequence number
// and awaits it to be observed and reported back in the RC's status. // and awaits it to be observed and reported back in the RC's status.
framework.ScaleRC(f.ClientSet, ns, rcName, numPods, true) framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods, true)
// Only check the keys, the pods can be different if the kubelet updated it. // Only check the keys, the pods can be different if the kubelet updated it.
// TODO: Can it really? // TODO: Can it really?
@ -294,9 +295,9 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
restarter.kill() restarter.kill()
// This is best effort to try and create pods while the scheduler is down, // This is best effort to try and create pods while the scheduler is down,
// since we don't know exactly when it is restarted after the kill signal. // since we don't know exactly when it is restarted after the kill signal.
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, ns, rcName, numPods+5, false)) framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods+5, false))
restarter.waitUp() restarter.waitUp()
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, ns, rcName, numPods+5, true)) framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods+5, true))
}) })
It("Kubelet should not restart containers across restart", func() { It("Kubelet should not restart containers across restart", func() {

View File

@ -25,9 +25,11 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors" apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/extensions" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@ -58,12 +60,12 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
var f *framework.Framework var f *framework.Framework
AfterEach(func() { AfterEach(func() {
if daemonsets, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).List(api.ListOptions{}); err == nil { if daemonsets, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).List(v1.ListOptions{}); err == nil {
framework.Logf("daemonset: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), daemonsets)) framework.Logf("daemonset: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), daemonsets))
} else { } else {
framework.Logf("unable to dump daemonsets: %v", err) framework.Logf("unable to dump daemonsets: %v", err)
} }
if pods, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(api.ListOptions{}); err == nil { if pods, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(v1.ListOptions{}); err == nil {
framework.Logf("pods: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), pods)) framework.Logf("pods: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), pods))
} else { } else {
framework.Logf("unable to dump pods: %v", err) framework.Logf("unable to dump pods: %v", err)
@ -93,20 +95,20 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
framework.Logf("Creating simple daemon set %s", dsName) framework.Logf("Creating simple daemon set %s", dsName)
_, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{ _, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: dsName, Name: dsName,
}, },
Spec: extensions.DaemonSetSpec{ Spec: extensions.DaemonSetSpec{
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: label, Labels: label,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: dsName, Name: dsName,
Image: image, Image: image,
Ports: []api.ContainerPort{{ContainerPort: 9376}}, Ports: []v1.ContainerPort{{ContainerPort: 9376}},
}, },
}, },
}, },
@ -116,7 +118,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defer func() { defer func() {
framework.Logf("Check that reaper kills all daemon pods for %s", dsName) framework.Logf("Check that reaper kills all daemon pods for %s", dsName)
dsReaper, err := kubectl.ReaperFor(extensions.Kind("DaemonSet"), f.ClientSet) dsReaper, err := kubectl.ReaperFor(extensionsinternal.Kind("DaemonSet"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = dsReaper.Stop(ns, dsName, 0, nil) err = dsReaper.Stop(ns, dsName, 0, nil)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -135,7 +137,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
podClient := c.Core().Pods(ns) podClient := c.Core().Pods(ns)
selector := labels.Set(label).AsSelector() selector := labels.Set(label).AsSelector()
options := api.ListOptions{LabelSelector: selector} options := v1.ListOptions{LabelSelector: selector.String()}
podList, err := podClient.List(options) podList, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
Expect(len(podList.Items)).To(BeNumerically(">", 0)) Expect(len(podList.Items)).To(BeNumerically(">", 0))
@ -152,22 +154,22 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
nodeSelector := map[string]string{daemonsetColorLabel: "blue"} nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
framework.Logf("Creating daemon with a node selector %s", dsName) framework.Logf("Creating daemon with a node selector %s", dsName)
_, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{ _, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: dsName, Name: dsName,
}, },
Spec: extensions.DaemonSetSpec{ Spec: extensions.DaemonSetSpec{
Selector: &unversioned.LabelSelector{MatchLabels: complexLabel}, Selector: &unversioned.LabelSelector{MatchLabels: complexLabel},
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: complexLabel, Labels: complexLabel,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
NodeSelector: nodeSelector, NodeSelector: nodeSelector,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: dsName, Name: dsName,
Image: image, Image: image,
Ports: []api.ContainerPort{{ContainerPort: 9376}}, Ports: []v1.ContainerPort{{ContainerPort: 9376}},
}, },
}, },
}, },
@ -208,7 +210,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
nodeSelector := map[string]string{daemonsetColorLabel: "blue"} nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
framework.Logf("Creating daemon with a node affinity %s", dsName) framework.Logf("Creating daemon with a node affinity %s", dsName)
affinity := map[string]string{ affinity := map[string]string{
api.AffinityAnnotationKey: fmt.Sprintf(` v1.AffinityAnnotationKey: fmt.Sprintf(`
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [{ "nodeSelectorTerms": [{
"matchExpressions": [{ "matchExpressions": [{
@ -220,22 +222,22 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
}}}`, daemonsetColorLabel, nodeSelector[daemonsetColorLabel]), }}}`, daemonsetColorLabel, nodeSelector[daemonsetColorLabel]),
} }
_, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{ _, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: dsName, Name: dsName,
}, },
Spec: extensions.DaemonSetSpec{ Spec: extensions.DaemonSetSpec{
Selector: &unversioned.LabelSelector{MatchLabels: complexLabel}, Selector: &unversioned.LabelSelector{MatchLabels: complexLabel},
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: complexLabel, Labels: complexLabel,
Annotations: affinity, Annotations: affinity,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: dsName, Name: dsName,
Image: image, Image: image,
Ports: []api.ContainerPort{{ContainerPort: 9376}}, Ports: []v1.ContainerPort{{ContainerPort: 9376}},
}, },
}, },
}, },
@ -296,9 +298,9 @@ func clearDaemonSetNodeLabels(c clientset.Interface) error {
return nil return nil
} }
func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*api.Node, error) { func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) {
nodeClient := c.Core().Nodes() nodeClient := c.Core().Nodes()
var newNode *api.Node var newNode *v1.Node
var newLabels map[string]string var newLabels map[string]string
err := wait.Poll(dsRetryPeriod, dsRetryTimeout, func() (bool, error) { err := wait.Poll(dsRetryPeriod, dsRetryTimeout, func() (bool, error) {
node, err := nodeClient.Get(nodeName) node, err := nodeClient.Get(nodeName)
@ -339,7 +341,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
func checkDaemonPodOnNodes(f *framework.Framework, selector map[string]string, nodeNames []string) func() (bool, error) { func checkDaemonPodOnNodes(f *framework.Framework, selector map[string]string, nodeNames []string) func() (bool, error) {
return func() (bool, error) { return func() (bool, error) {
selector := labels.Set(selector).AsSelector() selector := labels.Set(selector).AsSelector()
options := api.ListOptions{LabelSelector: selector} options := v1.ListOptions{LabelSelector: selector.String()}
podList, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(options) podList, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(options)
if err != nil { if err != nil {
return false, nil return false, nil
@ -368,7 +370,7 @@ func checkDaemonPodOnNodes(f *framework.Framework, selector map[string]string, n
func checkRunningOnAllNodes(f *framework.Framework, selector map[string]string) func() (bool, error) { func checkRunningOnAllNodes(f *framework.Framework, selector map[string]string) func() (bool, error) {
return func() (bool, error) { return func() (bool, error) {
nodeList, err := f.ClientSet.Core().Nodes().List(api.ListOptions{}) nodeList, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
nodeNames := make([]string, 0) nodeNames := make([]string, 0)
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
@ -385,7 +387,7 @@ func checkRunningOnNoNodes(f *framework.Framework, selector map[string]string) f
func checkDaemonStatus(f *framework.Framework, dsName string) error { func checkDaemonStatus(f *framework.Framework, dsName string) error {
ds, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).Get(dsName) ds, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).Get(dsName)
if err != nil { if err != nil {
return fmt.Errorf("Could not get daemon set from api.") return fmt.Errorf("Could not get daemon set from v1.")
} }
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
if desired != scheduled && desired != ready { if desired != scheduled && desired != ready {

View File

@ -28,8 +28,10 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@ -54,10 +56,11 @@ const (
var MaxContainerFailures = 0 var MaxContainerFailures = 0
type DensityTestConfig struct { type DensityTestConfig struct {
Configs []testutils.RCConfig Configs []testutils.RCConfig
ClientSet internalclientset.Interface ClientSet clientset.Interface
PollInterval time.Duration InternalClientset internalclientset.Interface
PodCount int PollInterval time.Duration
PodCount int
} }
func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint { func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
@ -159,9 +162,9 @@ func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceC
return constraints return constraints
} }
func logPodStartupStatus(c internalclientset.Interface, expectedPods int, observedLabels map[string]string, period time.Duration, stopCh chan struct{}) { func logPodStartupStatus(c clientset.Interface, expectedPods int, observedLabels map[string]string, period time.Duration, stopCh chan struct{}) {
label := labels.SelectorFromSet(labels.Set(observedLabels)) label := labels.SelectorFromSet(labels.Set(observedLabels))
podStore := testutils.NewPodStore(c, api.NamespaceAll, label, fields.Everything()) podStore := testutils.NewPodStore(c, v1.NamespaceAll, label, fields.Everything())
defer podStore.Stop() defer podStore.Stop()
ticker := time.NewTicker(period) ticker := time.NewTicker(period)
defer ticker.Stop() defer ticker.Stop()
@ -209,7 +212,7 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
// Print some data about Pod to Node allocation // Print some data about Pod to Node allocation
By("Printing Pod to Node allocation data") By("Printing Pod to Node allocation data")
podList, err := dtc.ClientSet.Core().Pods(api.NamespaceAll).List(api.ListOptions{}) podList, err := dtc.ClientSet.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
pausePodAllocation := make(map[string]int) pausePodAllocation := make(map[string]int)
systemPodAllocation := make(map[string][]string) systemPodAllocation := make(map[string][]string)
@ -238,14 +241,14 @@ func cleanupDensityTest(dtc DensityTestConfig) {
for i := range dtc.Configs { for i := range dtc.Configs {
rcName := dtc.Configs[i].Name rcName := dtc.Configs[i].Name
rc, err := dtc.ClientSet.Core().ReplicationControllers(dtc.Configs[i].Namespace).Get(rcName) rc, err := dtc.ClientSet.Core().ReplicationControllers(dtc.Configs[i].Namespace).Get(rcName)
if err == nil && rc.Spec.Replicas != 0 { if err == nil && *(rc.Spec.Replicas) != 0 {
if framework.TestContext.GarbageCollectorEnabled { if framework.TestContext.GarbageCollectorEnabled {
By("Cleaning up only the replication controller, garbage collector will clean up the pods") By("Cleaning up only the replication controller, garbage collector will clean up the pods")
err := framework.DeleteRCAndWaitForGC(dtc.ClientSet, dtc.Configs[i].Namespace, rcName) err := framework.DeleteRCAndWaitForGC(dtc.ClientSet, dtc.Configs[i].Namespace, rcName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} else { } else {
By("Cleaning up the replication controller and pods") By("Cleaning up the replication controller and pods")
err := framework.DeleteRCAndPods(dtc.ClientSet, dtc.Configs[i].Namespace, rcName) err := framework.DeleteRCAndPods(dtc.ClientSet, dtc.InternalClientset, dtc.Configs[i].Namespace, rcName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
} }
@ -260,7 +263,7 @@ func cleanupDensityTest(dtc DensityTestConfig) {
// results will not be representative for control-plane performance as we'll start hitting // results will not be representative for control-plane performance as we'll start hitting
// limits on Docker's concurrent container startup. // limits on Docker's concurrent container startup.
var _ = framework.KubeDescribe("Density", func() { var _ = framework.KubeDescribe("Density", func() {
var c internalclientset.Interface var c clientset.Interface
var nodeCount int var nodeCount int
var RCName string var RCName string
var additionalPodsPrefix string var additionalPodsPrefix string
@ -270,7 +273,7 @@ var _ = framework.KubeDescribe("Density", func() {
var totalPods int var totalPods int
var nodeCpuCapacity int64 var nodeCpuCapacity int64
var nodeMemCapacity int64 var nodeMemCapacity int64
var nodes *api.NodeList var nodes *v1.NodeList
var masters sets.String var masters sets.String
// Gathers data prior to framework namespace teardown // Gathers data prior to framework namespace teardown
@ -332,10 +335,10 @@ var _ = framework.KubeDescribe("Density", func() {
for _, node := range nodes.Items { for _, node := range nodes.Items {
var internalIP, externalIP string var internalIP, externalIP string
for _, address := range node.Status.Addresses { for _, address := range node.Status.Addresses {
if address.Type == api.NodeInternalIP { if address.Type == v1.NodeInternalIP {
internalIP = address.Address internalIP = address.Address
} }
if address.Type == api.NodeExternalIP { if address.Type == v1.NodeExternalIP {
externalIP = address.Address externalIP = address.Address
} }
} }
@ -399,12 +402,13 @@ var _ = framework.KubeDescribe("Density", func() {
podThroughput := 20 podThroughput := 20
timeout := time.Duration(totalPods/podThroughput)*time.Second + 3*time.Minute timeout := time.Duration(totalPods/podThroughput)*time.Second + 3*time.Minute
// createClients is defined in load.go // createClients is defined in load.go
clients, err := createClients(numberOfRCs) clients, internalClients, err := createClients(numberOfRCs)
for i := 0; i < numberOfRCs; i++ { for i := 0; i < numberOfRCs; i++ {
RCName := fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid) RCName := fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid)
nsName := namespaces[i].Name nsName := namespaces[i].Name
RCConfigs[i] = testutils.RCConfig{ RCConfigs[i] = testutils.RCConfig{
Client: clients[i], Client: clients[i],
InternalClient: internalClients[i],
Image: framework.GetPauseImageName(f.ClientSet), Image: framework.GetPauseImageName(f.ClientSet),
Name: RCName, Name: RCName,
Namespace: nsName, Namespace: nsName,
@ -421,10 +425,11 @@ var _ = framework.KubeDescribe("Density", func() {
} }
dConfig := DensityTestConfig{ dConfig := DensityTestConfig{
ClientSet: f.ClientSet, ClientSet: f.ClientSet,
Configs: RCConfigs, InternalClientset: f.InternalClientset,
PodCount: totalPods, Configs: RCConfigs,
PollInterval: DensityPollInterval, PodCount: totalPods,
PollInterval: DensityPollInterval,
} }
e2eStartupTime = runDensityTest(dConfig) e2eStartupTime = runDensityTest(dConfig)
if itArg.runLatencyTest { if itArg.runLatencyTest {
@ -437,12 +442,12 @@ var _ = framework.KubeDescribe("Density", func() {
watchTimes := make(map[string]unversioned.Time, 0) watchTimes := make(map[string]unversioned.Time, 0)
var mutex sync.Mutex var mutex sync.Mutex
checkPod := func(p *api.Pod) { checkPod := func(p *v1.Pod) {
mutex.Lock() mutex.Lock()
defer mutex.Unlock() defer mutex.Unlock()
defer GinkgoRecover() defer GinkgoRecover()
if p.Status.Phase == api.PodRunning { if p.Status.Phase == v1.PodRunning {
if _, found := watchTimes[p.Name]; !found { if _, found := watchTimes[p.Name]; !found {
watchTimes[p.Name] = unversioned.Now() watchTimes[p.Name] = unversioned.Now()
createTimes[p.Name] = p.CreationTimestamp createTimes[p.Name] = p.CreationTimestamp
@ -472,31 +477,31 @@ var _ = framework.KubeDescribe("Density", func() {
nsName := namespaces[i].Name nsName := namespaces[i].Name
latencyPodsStore, controller := cache.NewInformer( latencyPodsStore, controller := cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}) options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String()
obj, err := c.Core().Pods(nsName).List(options) obj, err := c.Core().Pods(nsName).List(options)
return runtime.Object(obj), err return runtime.Object(obj), err
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}) options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String()
return c.Core().Pods(nsName).Watch(options) return c.Core().Pods(nsName).Watch(options)
}, },
}, },
&api.Pod{}, &v1.Pod{},
0, 0,
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
p, ok := obj.(*api.Pod) p, ok := obj.(*v1.Pod)
if !ok { if !ok {
framework.Logf("Failed to cast observed object to *api.Pod.") framework.Logf("Failed to cast observed object to *v1.Pod.")
} }
Expect(ok).To(Equal(true)) Expect(ok).To(Equal(true))
go checkPod(p) go checkPod(p)
}, },
UpdateFunc: func(oldObj, newObj interface{}) { UpdateFunc: func(oldObj, newObj interface{}) {
p, ok := newObj.(*api.Pod) p, ok := newObj.(*v1.Pod)
if !ok { if !ok {
framework.Logf("Failed to cast observed object to *api.Pod.") framework.Logf("Failed to cast observed object to *v1.Pod.")
} }
Expect(ok).To(Equal(true)) Expect(ok).To(Equal(true))
go checkPod(p) go checkPod(p)
@ -545,7 +550,7 @@ var _ = framework.KubeDescribe("Density", func() {
nodeToLatencyPods := make(map[string]int) nodeToLatencyPods := make(map[string]int)
for i := range latencyPodStores { for i := range latencyPodStores {
for _, item := range latencyPodStores[i].List() { for _, item := range latencyPodStores[i].List() {
pod := item.(*api.Pod) pod := item.(*v1.Pod)
nodeToLatencyPods[pod.Spec.NodeName]++ nodeToLatencyPods[pod.Spec.NodeName]++
} }
for node, count := range nodeToLatencyPods { for node, count := range nodeToLatencyPods {
@ -560,9 +565,9 @@ var _ = framework.KubeDescribe("Density", func() {
selector := fields.Set{ selector := fields.Set{
"involvedObject.kind": "Pod", "involvedObject.kind": "Pod",
"involvedObject.namespace": nsName, "involvedObject.namespace": nsName,
"source": api.DefaultSchedulerName, "source": v1.DefaultSchedulerName,
}.AsSelector() }.AsSelector().String()
options := api.ListOptions{FieldSelector: selector} options := v1.ListOptions{FieldSelector: selector}
schedEvents, err := c.Core().Events(nsName).List(options) schedEvents, err := c.Core().Events(nsName).List(options)
framework.ExpectNoError(err) framework.ExpectNoError(err)
for k := range createTimes { for k := range createTimes {
@ -683,39 +688,39 @@ var _ = framework.KubeDescribe("Density", func() {
}) })
}) })
func createRunningPodFromRC(wg *sync.WaitGroup, c internalclientset.Interface, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) { func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) {
defer GinkgoRecover() defer GinkgoRecover()
defer wg.Done() defer wg.Done()
labels := map[string]string{ labels := map[string]string{
"type": podType, "type": podType,
"name": name, "name": name,
} }
rc := &api.ReplicationController{ rc := &v1.ReplicationController{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Labels: labels, Labels: labels,
}, },
Spec: api.ReplicationControllerSpec{ Spec: v1.ReplicationControllerSpec{
Replicas: 1, Replicas: func(i int) *int32 { x := int32(i); return &x }(1),
Selector: labels, Selector: labels,
Template: &api.PodTemplateSpec{ Template: &v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: labels, Labels: labels,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: name, Name: name,
Image: image, Image: image,
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: api.ResourceList{ Requests: v1.ResourceList{
api.ResourceCPU: cpuRequest, v1.ResourceCPU: cpuRequest,
api.ResourceMemory: memRequest, v1.ResourceMemory: memRequest,
}, },
}, },
}, },
}, },
DNSPolicy: api.DNSDefault, DNSPolicy: v1.DNSDefault,
}, },
}, },
}, },

View File

@ -28,9 +28,12 @@ import (
"k8s.io/kubernetes/pkg/api/annotations" "k8s.io/kubernetes/pkg/api/annotations"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
client "k8s.io/kubernetes/pkg/client/unversioned" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/extensions/v1beta1"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@ -109,23 +112,23 @@ var _ = framework.KubeDescribe("Deployment", func() {
func newDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName string, image string, strategyType extensions.DeploymentStrategyType, revisionHistoryLimit *int32) *extensions.Deployment { func newDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName string, image string, strategyType extensions.DeploymentStrategyType, revisionHistoryLimit *int32) *extensions.Deployment {
zero := int64(0) zero := int64(0)
return &extensions.Deployment{ return &extensions.Deployment{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: deploymentName, Name: deploymentName,
}, },
Spec: extensions.DeploymentSpec{ Spec: extensions.DeploymentSpec{
Replicas: replicas, Replicas: func(i int32) *int32 { return &i }(replicas),
Selector: &unversioned.LabelSelector{MatchLabels: podLabels}, Selector: &unversioned.LabelSelector{MatchLabels: podLabels},
Strategy: extensions.DeploymentStrategy{ Strategy: extensions.DeploymentStrategy{
Type: strategyType, Type: strategyType,
}, },
RevisionHistoryLimit: revisionHistoryLimit, RevisionHistoryLimit: revisionHistoryLimit,
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: podLabels, Labels: podLabels,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero, TerminationGracePeriodSeconds: &zero,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: imageName, Name: imageName,
Image: image, Image: image,
@ -168,20 +171,20 @@ func checkDeploymentRevision(c clientset.Interface, ns, deploymentName, revision
return deployment, newRS return deployment, newRS
} }
func stopDeploymentOverlap(c clientset.Interface, ns, deploymentName, overlapWith string) { func stopDeploymentOverlap(c clientset.Interface, internalClient internalclientset.Interface, ns, deploymentName, overlapWith string) {
stopDeploymentMaybeOverlap(c, ns, deploymentName, overlapWith) stopDeploymentMaybeOverlap(c, internalClient, ns, deploymentName, overlapWith)
} }
func stopDeployment(c clientset.Interface, ns, deploymentName string) { func stopDeployment(c clientset.Interface, internalClient internalclientset.Interface, ns, deploymentName string) {
stopDeploymentMaybeOverlap(c, ns, deploymentName, "") stopDeploymentMaybeOverlap(c, internalClient, ns, deploymentName, "")
} }
func stopDeploymentMaybeOverlap(c clientset.Interface, ns, deploymentName, overlapWith string) { func stopDeploymentMaybeOverlap(c clientset.Interface, internalClient internalclientset.Interface, ns, deploymentName, overlapWith string) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName) deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.Logf("Deleting deployment %s", deploymentName) framework.Logf("Deleting deployment %s", deploymentName)
reaper, err := kubectl.ReaperFor(extensions.Kind("Deployment"), c) reaper, err := kubectl.ReaperFor(extensionsinternal.Kind("Deployment"), internalClient)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute timeout := 1 * time.Minute
err = reaper.Stop(ns, deployment.Name, timeout, api.NewDeleteOptions(0)) err = reaper.Stop(ns, deployment.Name, timeout, api.NewDeleteOptions(0))
@ -194,7 +197,7 @@ func stopDeploymentMaybeOverlap(c clientset.Interface, ns, deploymentName, overl
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName) framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
options := api.ListOptions{LabelSelector: selector} options := v1.ListOptions{LabelSelector: selector.String()}
rss, err := c.Extensions().ReplicaSets(ns).List(options) rss, err := c.Extensions().ReplicaSets(ns).List(options)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// RSes may be created by overlapping deployments right after this deployment is deleted, ignore them // RSes may be created by overlapping deployments right after this deployment is deleted, ignore them
@ -210,7 +213,7 @@ func stopDeploymentMaybeOverlap(c clientset.Interface, ns, deploymentName, overl
Expect(noOverlapRSes).Should(HaveLen(0)) Expect(noOverlapRSes).Should(HaveLen(0))
} }
framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName) framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
var pods *api.PodList var pods *v1.PodList
if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) { if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
pods, err = c.Core().Pods(ns).List(options) pods, err = c.Core().Pods(ns).List(options)
if err != nil { if err != nil {
@ -220,7 +223,7 @@ func stopDeploymentMaybeOverlap(c clientset.Interface, ns, deploymentName, overl
if len(overlapWith) == 0 && len(pods.Items) == 0 { if len(overlapWith) == 0 && len(pods.Items) == 0 {
return true, nil return true, nil
} else if len(overlapWith) != 0 { } else if len(overlapWith) != 0 {
noOverlapPods := []api.Pod{} noOverlapPods := []v1.Pod{}
for _, pod := range pods.Items { for _, pod := range pods.Items {
if !strings.HasPrefix(pod.Name, overlapWith) { if !strings.HasPrefix(pod.Name, overlapWith) {
noOverlapPods = append(noOverlapPods, pod) noOverlapPods = append(noOverlapPods, pod)
@ -270,6 +273,7 @@ func testNewDeployment(f *framework.Framework) {
func testDeleteDeployment(f *framework.Framework) { func testDeleteDeployment(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := f.ClientSet c := f.ClientSet
internalClient := f.InternalClientset
deploymentName := "test-new-deployment" deploymentName := "test-new-deployment"
podLabels := map[string]string{"name": nginxImageName} podLabels := map[string]string{"name": nginxImageName}
@ -295,7 +299,7 @@ func testDeleteDeployment(f *framework.Framework) {
err = fmt.Errorf("expected a replica set, got nil") err = fmt.Errorf("expected a replica set, got nil")
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
stopDeployment(c, ns, deploymentName) stopDeployment(c, internalClient, ns, deploymentName)
} }
func testRollingUpdateDeployment(f *framework.Framework) { func testRollingUpdateDeployment(f *framework.Framework) {
@ -481,11 +485,11 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
deploymentName := "test-cleanup-deployment" deploymentName := "test-cleanup-deployment"
framework.Logf("Creating deployment %s", deploymentName) framework.Logf("Creating deployment %s", deploymentName)
pods, err := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: labels.Everything()}) pods, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil { if err != nil {
Expect(err).NotTo(HaveOccurred(), "Failed to query for pods: %v", err) Expect(err).NotTo(HaveOccurred(), "Failed to query for pods: %v", err)
} }
options := api.ListOptions{ options := v1.ListOptions{
ResourceVersion: pods.ListMeta.ResourceVersion, ResourceVersion: pods.ListMeta.ResourceVersion,
} }
stopCh := make(chan struct{}) stopCh := make(chan struct{})
@ -504,7 +508,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
if numPodCreation < 0 { if numPodCreation < 0 {
framework.Failf("Expect only one pod creation, the second creation event: %#v\n", event) framework.Failf("Expect only one pod creation, the second creation event: %#v\n", event)
} }
pod, ok := event.Object.(*api.Pod) pod, ok := event.Object.(*v1.Pod)
if !ok { if !ok {
Fail("Expect event Object to be a pod") Fail("Expect event Object to be a pod")
} }
@ -556,8 +560,8 @@ func testRolloverDeployment(f *framework.Framework) {
framework.Logf("Creating deployment %s", deploymentName) framework.Logf("Creating deployment %s", deploymentName)
newDeployment := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil) newDeployment := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{ newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{
MaxUnavailable: intstr.FromInt(1), MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(1),
MaxSurge: intstr.FromInt(1), MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(1),
} }
_, err = c.Extensions().Deployments(ns).Create(newDeployment) _, err = c.Extensions().Deployments(ns).Create(newDeployment)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -571,7 +575,7 @@ func testRolloverDeployment(f *framework.Framework) {
_, newRS := checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage) _, newRS := checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
// Before the deployment finishes, update the deployment to rollover the above 2 ReplicaSets and bring up redis pods. // Before the deployment finishes, update the deployment to rollover the above 2 ReplicaSets and bring up redis pods.
Expect(newRS.Spec.Replicas).Should(BeNumerically("<", deploymentReplicas)) Expect(*newRS.Spec.Replicas).Should(BeNumerically("<", deploymentReplicas))
updatedDeploymentImageName, updatedDeploymentImage := redisImageName, redisImage updatedDeploymentImageName, updatedDeploymentImage := redisImageName, redisImage
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *extensions.Deployment) { deployment, err = framework.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
@ -629,7 +633,7 @@ func testPausedDeployment(f *framework.Framework) {
if err != nil { if err != nil {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
opts := api.ListOptions{LabelSelector: selector} opts := v1.ListOptions{LabelSelector: selector.String()}
w, err := c.Extensions().ReplicaSets(ns).Watch(opts) w, err := c.Extensions().ReplicaSets(ns).Watch(opts)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -973,7 +977,7 @@ func testDeploymentLabelAdopted(f *framework.Framework) {
// All pods targeted by the deployment should contain pod-template-hash in their labels, and there should be only 3 pods // All pods targeted by the deployment should contain pod-template-hash in their labels, and there should be only 3 pods
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
options := api.ListOptions{LabelSelector: selector} options := v1.ListOptions{LabelSelector: selector.String()}
pods, err := c.Core().Pods(ns).List(options) pods, err := c.Core().Pods(ns).List(options)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = framework.CheckPodHashLabel(pods) err = framework.CheckPodHashLabel(pods)
@ -1015,7 +1019,7 @@ func testScalePausedDeployment(f *framework.Framework) {
framework.Logf("Scaling up the paused deployment %q", deploymentName) framework.Logf("Scaling up the paused deployment %q", deploymentName)
newReplicas := int32(5) newReplicas := int32(5)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
update.Spec.Replicas = newReplicas update.Spec.Replicas = &newReplicas
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -1025,8 +1029,8 @@ func testScalePausedDeployment(f *framework.Framework) {
rs, err = deploymentutil.GetNewReplicaSet(deployment, c) rs, err = deploymentutil.GetNewReplicaSet(deployment, c)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if rs.Spec.Replicas != newReplicas { if *(rs.Spec.Replicas) != newReplicas {
err = fmt.Errorf("Expected %d replicas for the new replica set, got %d", newReplicas, rs.Spec.Replicas) err = fmt.Errorf("Expected %d replicas for the new replica set, got %d", newReplicas, *(rs.Spec.Replicas))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
} }
@ -1042,8 +1046,8 @@ func testScaledRolloutDeployment(f *framework.Framework) {
deploymentName := "nginx" deploymentName := "nginx"
d := newDeployment(deploymentName, replicas, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType, nil) d := newDeployment(deploymentName, replicas, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType, nil)
d.Spec.Strategy.RollingUpdate = new(extensions.RollingUpdateDeployment) d.Spec.Strategy.RollingUpdate = new(extensions.RollingUpdateDeployment)
d.Spec.Strategy.RollingUpdate.MaxSurge = intstr.FromInt(3) d.Spec.Strategy.RollingUpdate.MaxSurge = func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(3)
d.Spec.Strategy.RollingUpdate.MaxUnavailable = intstr.FromInt(2) d.Spec.Strategy.RollingUpdate.MaxUnavailable = func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(2)
By(fmt.Sprintf("Creating deployment %q", deploymentName)) By(fmt.Sprintf("Creating deployment %q", deploymentName))
deployment, err := c.Extensions().Deployments(ns).Create(d) deployment, err := c.Extensions().Deployments(ns).Create(d)
@ -1054,7 +1058,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
// Verify that the required pods have come up. // Verify that the required pods have come up.
By("Waiting for all required pods to come up") By("Waiting for all required pods to come up")
err = framework.VerifyPods(f.ClientSet, ns, nginxImageName, false, deployment.Spec.Replicas) err = framework.VerifyPods(f.ClientSet, ns, nginxImageName, false, *(deployment.Spec.Replicas))
if err != nil { if err != nil {
framework.Logf("error in waiting for pods to come up: %s", err) framework.Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -1090,18 +1094,18 @@ func testScaledRolloutDeployment(f *framework.Framework) {
first, err = c.Extensions().ReplicaSets(first.Namespace).Get(first.Name) first, err = c.Extensions().ReplicaSets(first.Namespace).Get(first.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
firstCond := client.ReplicaSetHasDesiredReplicas(c.Extensions(), first) firstCond := replicaSetHasDesiredReplicas(c.Extensions(), first)
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, firstCond) err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, firstCond)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
secondCond := client.ReplicaSetHasDesiredReplicas(c.Extensions(), second) secondCond := replicaSetHasDesiredReplicas(c.Extensions(), second)
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, secondCond) err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, secondCond)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Updating the size (up) and template at the same time for deployment %q", deploymentName)) By(fmt.Sprintf("Updating the size (up) and template at the same time for deployment %q", deploymentName))
newReplicas := int32(20) newReplicas := int32(20)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
update.Spec.Replicas = newReplicas update.Spec.Replicas = &newReplicas
update.Spec.Template.Spec.Containers[0].Image = nautilusImage update.Spec.Template.Spec.Containers[0].Image = nautilusImage
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -1118,7 +1122,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
for _, rs := range append(oldRSs, rs) { for _, rs := range append(oldRSs, rs) {
By(fmt.Sprintf("Ensuring replica set %q has the correct desiredReplicas annotation", rs.Name)) By(fmt.Sprintf("Ensuring replica set %q has the correct desiredReplicas annotation", rs.Name))
desired, ok := deploymentutil.GetDesiredReplicasAnnotation(rs) desired, ok := deploymentutil.GetDesiredReplicasAnnotation(rs)
if !ok || desired == deployment.Spec.Replicas { if !ok || desired == *(deployment.Spec.Replicas) {
continue continue
} }
err = fmt.Errorf("unexpected desiredReplicas annotation %d for replica set %q", desired, rs.Name) err = fmt.Errorf("unexpected desiredReplicas annotation %d for replica set %q", desired, rs.Name)
@ -1150,18 +1154,18 @@ func testScaledRolloutDeployment(f *framework.Framework) {
newRs, err := deploymentutil.GetNewReplicaSet(deployment, c) newRs, err := deploymentutil.GetNewReplicaSet(deployment, c)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
oldCond := client.ReplicaSetHasDesiredReplicas(c.Extensions(), oldRs) oldCond := replicaSetHasDesiredReplicas(c.Extensions(), oldRs)
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, oldCond) err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, oldCond)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
newCond := client.ReplicaSetHasDesiredReplicas(c.Extensions(), newRs) newCond := replicaSetHasDesiredReplicas(c.Extensions(), newRs)
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, newCond) err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, newCond)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Updating the size (down) and template at the same time for deployment %q", deploymentName)) By(fmt.Sprintf("Updating the size (down) and template at the same time for deployment %q", deploymentName))
newReplicas = int32(5) newReplicas = int32(5)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
update.Spec.Replicas = newReplicas update.Spec.Replicas = &newReplicas
update.Spec.Template.Spec.Containers[0].Image = kittenImage update.Spec.Template.Spec.Containers[0].Image = kittenImage
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -1178,7 +1182,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
for _, rs := range append(oldRSs, rs) { for _, rs := range append(oldRSs, rs) {
By(fmt.Sprintf("Ensuring replica set %q has the correct desiredReplicas annotation", rs.Name)) By(fmt.Sprintf("Ensuring replica set %q has the correct desiredReplicas annotation", rs.Name))
desired, ok := deploymentutil.GetDesiredReplicasAnnotation(rs) desired, ok := deploymentutil.GetDesiredReplicasAnnotation(rs)
if !ok || desired == deployment.Spec.Replicas { if !ok || desired == *(deployment.Spec.Replicas) {
continue continue
} }
err = fmt.Errorf("unexpected desiredReplicas annotation %d for replica set %q", desired, rs.Name) err = fmt.Errorf("unexpected desiredReplicas annotation %d for replica set %q", desired, rs.Name)
@ -1189,6 +1193,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
func testOverlappingDeployment(f *framework.Framework) { func testOverlappingDeployment(f *framework.Framework) {
ns := f.Namespace.Name ns := f.Namespace.Name
c := f.ClientSet c := f.ClientSet
internalClient := f.InternalClientset
deploymentName := "first-deployment" deploymentName := "first-deployment"
podLabels := map[string]string{"name": redisImageName} podLabels := map[string]string{"name": redisImageName}
@ -1219,7 +1224,7 @@ func testOverlappingDeployment(f *framework.Framework) {
// Only the first deployment is synced // Only the first deployment is synced
By("Checking only the first overlapping deployment is synced") By("Checking only the first overlapping deployment is synced")
options := api.ListOptions{} options := v1.ListOptions{}
rsList, err := c.Extensions().ReplicaSets(ns).List(options) rsList, err := c.Extensions().ReplicaSets(ns).List(options)
Expect(err).NotTo(HaveOccurred(), "Failed listing all replica sets in namespace %s", ns) Expect(err).NotTo(HaveOccurred(), "Failed listing all replica sets in namespace %s", ns)
Expect(rsList.Items).To(HaveLen(int(replicas))) Expect(rsList.Items).To(HaveLen(int(replicas)))
@ -1227,7 +1232,7 @@ func testOverlappingDeployment(f *framework.Framework) {
Expect(rsList.Items[0].Spec.Template.Spec.Containers[0].Image).To(Equal(deploy.Spec.Template.Spec.Containers[0].Image)) Expect(rsList.Items[0].Spec.Template.Spec.Containers[0].Image).To(Equal(deploy.Spec.Template.Spec.Containers[0].Image))
By("Deleting the first deployment") By("Deleting the first deployment")
stopDeploymentOverlap(c, ns, deploy.Name, deployOverlapping.Name) stopDeploymentOverlap(c, internalClient, ns, deploy.Name, deployOverlapping.Name)
// Wait for overlapping annotation cleared // Wait for overlapping annotation cleared
By("Waiting for the second deployment to clear overlapping annotation") By("Waiting for the second deployment to clear overlapping annotation")
@ -1335,11 +1340,11 @@ func randomScale(d *extensions.Deployment, i int) {
switch r := rand.Float32(); { switch r := rand.Float32(); {
case r < 0.3: case r < 0.3:
framework.Logf("%02d: scaling up", i) framework.Logf("%02d: scaling up", i)
d.Spec.Replicas++ *(d.Spec.Replicas)++
case r < 0.6: case r < 0.6:
if d.Spec.Replicas > 1 { if *(d.Spec.Replicas) > 1 {
framework.Logf("%02d: scaling down", i) framework.Logf("%02d: scaling down", i)
d.Spec.Replicas-- *(d.Spec.Replicas)--
} }
} }
} }
@ -1375,7 +1380,7 @@ func testIterativeDeployments(f *framework.Framework) {
// trigger a new deployment // trigger a new deployment
framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name) framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) { deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
newEnv := api.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)} newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv) update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
randomScale(update, i) randomScale(update, i)
}) })
@ -1421,7 +1426,7 @@ func testIterativeDeployments(f *framework.Framework) {
framework.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name) framework.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name)
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
opts := api.ListOptions{LabelSelector: selector} opts := v1.ListOptions{LabelSelector: selector.String()}
podList, err := c.Core().Pods(ns).List(opts) podList, err := c.Core().Pods(ns).List(opts)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if len(podList.Items) == 0 { if len(podList.Items) == 0 {
@ -1460,3 +1465,14 @@ func testIterativeDeployments(f *framework.Framework) {
framework.Logf("Checking deployment %q for a complete condition", deploymentName) framework.Logf("Checking deployment %q for a complete condition", deploymentName)
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred()) Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred())
} }
func replicaSetHasDesiredReplicas(rsClient extensionsclient.ReplicaSetsGetter, replicaSet *extensions.ReplicaSet) wait.ConditionFunc {
desiredGeneration := replicaSet.Generation
return func() (bool, error) {
rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name)
if err != nil {
return false, err
}
return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.Replicas == *(rs.Spec.Replicas), nil
}
}

View File

@ -24,7 +24,7 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/unversioned" "k8s.io/client-go/pkg/api/unversioned"
api "k8s.io/client-go/pkg/api/v1" "k8s.io/client-go/pkg/api/v1"
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1" extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
policy "k8s.io/client-go/pkg/apis/policy/v1beta1" policy "k8s.io/client-go/pkg/apis/policy/v1beta1"
"k8s.io/client-go/pkg/util/intstr" "k8s.io/client-go/pkg/util/intstr"
@ -127,15 +127,15 @@ var _ = framework.KubeDescribe("DisruptionController", func() {
} }
// Locate a running pod. // Locate a running pod.
var pod api.Pod var pod v1.Pod
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
podList, err := cs.Pods(ns).List(api.ListOptions{}) podList, err := cs.Pods(ns).List(v1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
for i := range podList.Items { for i := range podList.Items {
if podList.Items[i].Status.Phase == api.PodRunning { if podList.Items[i].Status.Phase == v1.PodRunning {
pod = podList.Items[i] pod = podList.Items[i]
return true, nil return true, nil
} }
@ -146,7 +146,7 @@ var _ = framework.KubeDescribe("DisruptionController", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
e := &policy.Eviction{ e := &policy.Eviction{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: pod.Name, Name: pod.Name,
Namespace: ns, Namespace: ns,
}, },
@ -184,7 +184,7 @@ var _ = framework.KubeDescribe("DisruptionController", func() {
func createPodDisruptionBudgetOrDie(cs *kubernetes.Clientset, ns string, minAvailable intstr.IntOrString) { func createPodDisruptionBudgetOrDie(cs *kubernetes.Clientset, ns string, minAvailable intstr.IntOrString) {
pdb := policy.PodDisruptionBudget{ pdb := policy.PodDisruptionBudget{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
}, },
@ -199,20 +199,20 @@ func createPodDisruptionBudgetOrDie(cs *kubernetes.Clientset, ns string, minAvai
func createPodsOrDie(cs *kubernetes.Clientset, ns string, n int) { func createPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("pod-%d", i), Name: fmt.Sprintf("pod-%d", i),
Namespace: ns, Namespace: ns,
Labels: map[string]string{"foo": "bar"}, Labels: map[string]string{"foo": "bar"},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "busybox", Name: "busybox",
Image: "gcr.io/google_containers/echoserver:1.4", Image: "gcr.io/google_containers/echoserver:1.4",
}, },
}, },
RestartPolicy: api.RestartPolicyAlways, RestartPolicy: v1.RestartPolicyAlways,
}, },
} }
@ -224,7 +224,7 @@ func createPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
func waitForPodsOrDie(cs *kubernetes.Clientset, ns string, n int) { func waitForPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
By("Waiting for all pods to be running") By("Waiting for all pods to be running")
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) { err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
pods, err := cs.Core().Pods(ns).List(api.ListOptions{LabelSelector: "foo=bar"}) pods, err := cs.Core().Pods(ns).List(v1.ListOptions{LabelSelector: "foo=bar"})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -237,7 +237,7 @@ func waitForPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
} }
ready := 0 ready := 0
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
if pods.Items[i].Status.Phase == api.PodRunning { if pods.Items[i].Status.Phase == v1.PodRunning {
ready++ ready++
} }
} }
@ -251,18 +251,18 @@ func waitForPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
} }
func createReplicaSetOrDie(cs *kubernetes.Clientset, ns string, size int32, exclusive bool) { func createReplicaSetOrDie(cs *kubernetes.Clientset, ns string, size int32, exclusive bool) {
container := api.Container{ container := v1.Container{
Name: "busybox", Name: "busybox",
Image: "gcr.io/google_containers/echoserver:1.4", Image: "gcr.io/google_containers/echoserver:1.4",
} }
if exclusive { if exclusive {
container.Ports = []api.ContainerPort{ container.Ports = []v1.ContainerPort{
{HostPort: 5555, ContainerPort: 5555}, {HostPort: 5555, ContainerPort: 5555},
} }
} }
rs := &extensions.ReplicaSet{ rs := &extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "rs", Name: "rs",
Namespace: ns, Namespace: ns,
}, },
@ -271,12 +271,12 @@ func createReplicaSetOrDie(cs *kubernetes.Clientset, ns string, size int32, excl
Selector: &unversioned.LabelSelector{ Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"}, MatchLabels: map[string]string{"foo": "bar"},
}, },
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"foo": "bar"}, Labels: map[string]string{"foo": "bar"},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{container}, Containers: []v1.Container{container},
}, },
}, },
}, },

View File

@ -24,10 +24,11 @@ import (
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/pod"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -42,37 +43,37 @@ var dnsServiceLabelSelector = labels.Set{
"kubernetes.io/cluster-service": "true", "kubernetes.io/cluster-service": "true",
}.AsSelector() }.AsSelector()
func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd string, useAnnotation bool) *api.Pod { func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd string, useAnnotation bool) *v1.Pod {
dnsPod := &api.Pod{ dnsPod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "Pod", Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "dns-test-" + string(uuid.NewUUID()), Name: "dns-test-" + string(uuid.NewUUID()),
Namespace: namespace, Namespace: namespace,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: "results", Name: "results",
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{}, EmptyDir: &v1.EmptyDirVolumeSource{},
}, },
}, },
}, },
Containers: []api.Container{ Containers: []v1.Container{
// TODO: Consider scraping logs instead of running a webserver. // TODO: Consider scraping logs instead of running a webserver.
{ {
Name: "webserver", Name: "webserver",
Image: "gcr.io/google_containers/test-webserver:e2e", Image: "gcr.io/google_containers/test-webserver:e2e",
Ports: []api.ContainerPort{ Ports: []v1.ContainerPort{
{ {
Name: "http", Name: "http",
ContainerPort: 80, ContainerPort: 80,
}, },
}, },
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: "results", Name: "results",
MountPath: "/results", MountPath: "/results",
@ -83,7 +84,7 @@ func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd string, useAnnotatio
Name: "querier", Name: "querier",
Image: "gcr.io/google_containers/dnsutils:e2e", Image: "gcr.io/google_containers/dnsutils:e2e",
Command: []string{"sh", "-c", wheezyProbeCmd}, Command: []string{"sh", "-c", wheezyProbeCmd},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: "results", Name: "results",
MountPath: "/results", MountPath: "/results",
@ -94,7 +95,7 @@ func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd string, useAnnotatio
Name: "jessie-querier", Name: "jessie-querier",
Image: "gcr.io/google_containers/jessie-dnsutils:e2e", Image: "gcr.io/google_containers/jessie-dnsutils:e2e",
Command: []string{"sh", "-c", jessieProbeCmd}, Command: []string{"sh", "-c", jessieProbeCmd},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: "results", Name: "results",
MountPath: "/results", MountPath: "/results",
@ -171,11 +172,11 @@ func createTargetedProbeCommand(nameToResolve string, lookup string, fileNamePre
return probeCmd, fileName return probeCmd, fileName
} }
func assertFilesExist(fileNames []string, fileDir string, pod *api.Pod, client clientset.Interface) { func assertFilesExist(fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) {
assertFilesContain(fileNames, fileDir, pod, client, false, "") assertFilesContain(fileNames, fileDir, pod, client, false, "")
} }
func assertFilesContain(fileNames []string, fileDir string, pod *api.Pod, client clientset.Interface, check bool, expected string) { func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface, check bool, expected string) {
var failed []string var failed []string
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
@ -220,14 +221,14 @@ func assertFilesContain(fileNames []string, fileDir string, pod *api.Pod, client
Expect(len(failed)).To(Equal(0)) Expect(len(failed)).To(Equal(0))
} }
func validateDNSResults(f *framework.Framework, pod *api.Pod, fileNames []string) { func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) {
By("submitting the pod to kubernetes") By("submitting the pod to kubernetes")
podClient := f.ClientSet.Core().Pods(f.Namespace.Name) podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
defer func() { defer func() {
By("deleting the pod") By("deleting the pod")
defer GinkgoRecover() defer GinkgoRecover()
podClient.Delete(pod.Name, api.NewDeleteOptions(0)) podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
}() }()
if _, err := podClient.Create(pod); err != nil { if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create %s pod: %v", pod.Name, err) framework.Failf("Failed to create %s pod: %v", pod.Name, err)
@ -249,14 +250,14 @@ func validateDNSResults(f *framework.Framework, pod *api.Pod, fileNames []string
framework.Logf("DNS probes using %s succeeded\n", pod.Name) framework.Logf("DNS probes using %s succeeded\n", pod.Name)
} }
func validateTargetedProbeOutput(f *framework.Framework, pod *api.Pod, fileNames []string, value string) { func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {
By("submitting the pod to kubernetes") By("submitting the pod to kubernetes")
podClient := f.ClientSet.Core().Pods(f.Namespace.Name) podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
defer func() { defer func() {
By("deleting the pod") By("deleting the pod")
defer GinkgoRecover() defer GinkgoRecover()
podClient.Delete(pod.Name, api.NewDeleteOptions(0)) podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
}() }()
if _, err := podClient.Create(pod); err != nil { if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create %s pod: %v", pod.Name, err) framework.Failf("Failed to create %s pod: %v", pod.Name, err)
@ -279,7 +280,7 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *api.Pod, fileNames
func verifyDNSPodIsRunning(f *framework.Framework) { func verifyDNSPodIsRunning(f *framework.Framework) {
systemClient := f.ClientSet.Core().Pods(api.NamespaceSystem) systemClient := f.ClientSet.Core().Pods(api.NamespaceSystem)
By("Waiting for DNS Service to be Running") By("Waiting for DNS Service to be Running")
options := api.ListOptions{LabelSelector: dnsServiceLabelSelector} options := v1.ListOptions{LabelSelector: dnsServiceLabelSelector.String()}
dnsPods, err := systemClient.List(options) dnsPods, err := systemClient.List(options)
if err != nil { if err != nil {
framework.Failf("Failed to list all dns service pods") framework.Failf("Failed to list all dns service pods")
@ -291,20 +292,20 @@ func verifyDNSPodIsRunning(f *framework.Framework) {
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, &pod)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, &pod))
} }
func createServiceSpec(serviceName, externalName string, isHeadless bool, selector map[string]string) *api.Service { func createServiceSpec(serviceName, externalName string, isHeadless bool, selector map[string]string) *v1.Service {
headlessService := &api.Service{ headlessService := &v1.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: serviceName, Name: serviceName,
}, },
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Selector: selector, Selector: selector,
}, },
} }
if externalName != "" { if externalName != "" {
headlessService.Spec.Type = api.ServiceTypeExternalName headlessService.Spec.Type = v1.ServiceTypeExternalName
headlessService.Spec.ExternalName = externalName headlessService.Spec.ExternalName = externalName
} else { } else {
headlessService.Spec.Ports = []api.ServicePort{ headlessService.Spec.Ports = []v1.ServicePort{
{Port: 80, Name: "http", Protocol: "TCP"}, {Port: 80, Name: "http", Protocol: "TCP"},
} }
} }
@ -463,7 +464,7 @@ var _ = framework.KubeDescribe("DNS", func() {
// Test changing the externalName field // Test changing the externalName field
By("changing the externalName to bar.example.com") By("changing the externalName to bar.example.com")
_, err = updateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *api.Service) { _, err = updateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
s.Spec.ExternalName = "bar.example.com" s.Spec.ExternalName = "bar.example.com"
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -480,10 +481,10 @@ var _ = framework.KubeDescribe("DNS", func() {
// Test changing type from ExternalName to ClusterIP // Test changing type from ExternalName to ClusterIP
By("changing the service to type=ClusterIP") By("changing the service to type=ClusterIP")
_, err = updateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *api.Service) { _, err = updateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
s.Spec.Type = api.ServiceTypeClusterIP s.Spec.Type = v1.ServiceTypeClusterIP
s.Spec.ClusterIP = "127.1.2.3" s.Spec.ClusterIP = "127.1.2.3"
s.Spec.Ports = []api.ServicePort{ s.Spec.Ports = []v1.ServicePort{
{Port: 80, Name: "http", Protocol: "TCP"}, {Port: 80, Name: "http", Protocol: "TCP"},
} }
}) })

View File

@ -23,8 +23,8 @@ import (
"strings" "strings"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -144,7 +144,7 @@ var _ = framework.KubeDescribe("DNS horizontal autoscaling", func() {
}) })
}) })
func fetchDNSScalingConfigMap(c clientset.Interface) (*api.ConfigMap, error) { func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) {
cm, err := c.Core().ConfigMaps(DNSNamespace).Get(DNSAutoscalerLabelName) cm, err := c.Core().ConfigMaps(DNSNamespace).Get(DNSAutoscalerLabelName)
if err != nil { if err != nil {
return nil, err return nil, err
@ -160,15 +160,15 @@ func deleteDNSScalingConfigMap(c clientset.Interface) error {
return nil return nil
} }
func packDNSScalingConfigMap(params map[string]string) *api.ConfigMap { func packDNSScalingConfigMap(params map[string]string) *v1.ConfigMap {
configMap := api.ConfigMap{} configMap := v1.ConfigMap{}
configMap.ObjectMeta.Name = DNSAutoscalerLabelName configMap.ObjectMeta.Name = DNSAutoscalerLabelName
configMap.ObjectMeta.Namespace = DNSNamespace configMap.ObjectMeta.Namespace = DNSNamespace
configMap.Data = params configMap.Data = params
return &configMap return &configMap
} }
func updateDNSScalingConfigMap(c clientset.Interface, configMap *api.ConfigMap) error { func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) error {
_, err := c.Core().ConfigMaps(DNSNamespace).Update(configMap) _, err := c.Core().ConfigMaps(DNSNamespace).Update(configMap)
if err != nil { if err != nil {
return err return err
@ -179,7 +179,7 @@ func updateDNSScalingConfigMap(c clientset.Interface, configMap *api.ConfigMap)
func getDNSReplicas(c clientset.Interface) (int, error) { func getDNSReplicas(c clientset.Interface) (int, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: KubeDNSLabelName})) label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: KubeDNSLabelName}))
listOpts := api.ListOptions{LabelSelector: label} listOpts := v1.ListOptions{LabelSelector: label.String()}
deployments, err := c.Extensions().Deployments(DNSNamespace).List(listOpts) deployments, err := c.Extensions().Deployments(DNSNamespace).List(listOpts)
if err != nil { if err != nil {
return 0, err return 0, err
@ -187,12 +187,12 @@ func getDNSReplicas(c clientset.Interface) (int, error) {
Expect(len(deployments.Items)).Should(Equal(1)) Expect(len(deployments.Items)).Should(Equal(1))
deployment := deployments.Items[0] deployment := deployments.Items[0]
return int(deployment.Spec.Replicas), nil return int(*(deployment.Spec.Replicas)), nil
} }
func deleteDNSAutoscalerPod(c clientset.Interface) error { func deleteDNSAutoscalerPod(c clientset.Interface) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSAutoscalerLabelName})) label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSAutoscalerLabelName}))
listOpts := api.ListOptions{LabelSelector: label} listOpts := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(DNSNamespace).List(listOpts) pods, err := c.Core().Pods(DNSNamespace).List(listOpts)
if err != nil { if err != nil {
return err return err
@ -227,7 +227,7 @@ func waitForDNSReplicasSatisfied(c clientset.Interface, expected int, timeout ti
return nil return nil
} }
func waitForDNSConfigMapCreated(c clientset.Interface, timeout time.Duration) (configMap *api.ConfigMap, err error) { func waitForDNSConfigMapCreated(c clientset.Interface, timeout time.Duration) (configMap *v1.ConfigMap, err error) {
framework.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout) framework.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout)
condition := func() (bool, error) { condition := func() (bool, error) {
configMap, err = fetchDNSScalingConfigMap(c) configMap, err = fetchDNSScalingConfigMap(c)

View File

@ -31,6 +31,7 @@ import (
"github.com/onsi/gomega" "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/util/logs" "k8s.io/kubernetes/pkg/util/logs"
"k8s.io/kubernetes/pkg/util/runtime" "k8s.io/kubernetes/pkg/util/runtime"
@ -94,19 +95,15 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
framework.Failf("Failed to setup provider config: %v", err) framework.Failf("Failed to setup provider config: %v", err)
} }
c, err := framework.LoadInternalClientset() c, err := framework.LoadClientset()
if err != nil { if err != nil {
glog.Fatal("Error loading client: ", err) glog.Fatal("Error loading client: ", err)
} }
clientset, err := framework.LoadClientset()
if err != nil {
glog.Fatal("Error loading clientset: ", err)
}
// Delete any namespaces except default and kube-system. This ensures no // Delete any namespaces except default and kube-system. This ensures no
// lingering resources are left over from a previous test run. // lingering resources are left over from a previous test run.
if framework.TestContext.CleanStart { if framework.TestContext.CleanStart {
deleted, err := framework.DeleteNamespaces(c, nil /* deleteFilter */, []string{api.NamespaceSystem, api.NamespaceDefault}) deleted, err := framework.DeleteNamespaces(c, nil /* deleteFilter */, []string{api.NamespaceSystem, v1.NamespaceDefault})
if err != nil { if err != nil {
framework.Failf("Error deleting orphaned namespaces: %v", err) framework.Failf("Error deleting orphaned namespaces: %v", err)
} }
@ -127,9 +124,9 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
// ready will fail). // ready will fail).
podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout
if err := framework.WaitForPodsRunningReady(c, api.NamespaceSystem, int32(framework.TestContext.MinStartupPods), podStartupTimeout, framework.ImagePullerLabels); err != nil { if err := framework.WaitForPodsRunningReady(c, api.NamespaceSystem, int32(framework.TestContext.MinStartupPods), podStartupTimeout, framework.ImagePullerLabels); err != nil {
framework.DumpAllNamespaceInfo(c, clientset, api.NamespaceSystem) framework.DumpAllNamespaceInfo(c, api.NamespaceSystem)
framework.LogFailedContainers(c, api.NamespaceSystem, framework.Logf) framework.LogFailedContainers(c, api.NamespaceSystem, framework.Logf)
framework.RunKubernetesServiceTestContainer(c, api.NamespaceDefault) framework.RunKubernetesServiceTestContainer(c, v1.NamespaceDefault)
framework.Failf("Error waiting for all pods to be running and ready: %v", err) framework.Failf("Error waiting for all pods to be running and ready: %v", err)
} }

View File

@ -17,8 +17,8 @@ limitations under the License.
package e2e package e2e
import ( import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -57,8 +57,8 @@ var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() {
volumeName := "secret-volume" volumeName := "secret-volume"
volumeMountPath := "/etc/secret-volume" volumeMountPath := "/etc/secret-volume"
secret := &api.Secret{ secret := &v1.Secret{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Name: name, Name: name,
}, },
@ -77,35 +77,35 @@ var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() {
gitURL, gitRepo, gitCleanup := createGitServer(f) gitURL, gitRepo, gitCleanup := createGitServer(f)
defer gitCleanup() defer gitCleanup()
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()), Name: "pod-secrets-" + string(uuid.NewUUID()),
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: volumeName, Name: volumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
Secret: &api.SecretVolumeSource{ Secret: &v1.SecretVolumeSource{
SecretName: name, SecretName: name,
}, },
}, },
}, },
{ {
Name: gitVolumeName, Name: gitVolumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
GitRepo: &api.GitRepoVolumeSource{ GitRepo: &v1.GitRepoVolumeSource{
Repository: gitURL, Repository: gitURL,
Directory: gitRepo, Directory: gitRepo,
}, },
}, },
}, },
}, },
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "secret-test", Name: "secret-test",
Image: "gcr.io/google_containers/test-webserver:e2e", Image: "gcr.io/google_containers/test-webserver:e2e",
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: volumeName, Name: volumeName,
MountPath: volumeMountPath, MountPath: volumeMountPath,
@ -128,7 +128,7 @@ var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() {
framework.Failf("unable to delete secret %v: %v", secret.Name, err) framework.Failf("unable to delete secret %v: %v", secret.Name, err)
} }
By("Cleaning up the git vol pod") By("Cleaning up the git vol pod")
if err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)); err != nil { if err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete git vol pod %v: %v", pod.Name, err) framework.Failf("unable to delete git vol pod %v: %v", pod.Name, err)
} }
}() }()
@ -177,18 +177,18 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
labels := map[string]string{"name": gitServerPodName} labels := map[string]string{"name": gitServerPodName}
gitServerPod := &api.Pod{ gitServerPod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: gitServerPodName, Name: gitServerPodName,
Labels: labels, Labels: labels,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "git-repo", Name: "git-repo",
Image: "gcr.io/google_containers/fakegitserver:0.1", Image: "gcr.io/google_containers/fakegitserver:0.1",
ImagePullPolicy: "IfNotPresent", ImagePullPolicy: "IfNotPresent",
Ports: []api.ContainerPort{ Ports: []v1.ContainerPort{
{ContainerPort: int32(containerPort)}, {ContainerPort: int32(containerPort)},
}, },
}, },
@ -200,13 +200,13 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
// Portal IP and port // Portal IP and port
httpPort := 2345 httpPort := 2345
gitServerSvc := &api.Service{ gitServerSvc := &v1.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "git-server-svc", Name: "git-server-svc",
}, },
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Selector: labels, Selector: labels,
Ports: []api.ServicePort{ Ports: []v1.ServicePort{
{ {
Name: "http-portal", Name: "http-portal",
Port: int32(httpPort), Port: int32(httpPort),
@ -222,7 +222,7 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() { return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
By("Cleaning up the git server pod") By("Cleaning up the git server pod")
if err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(gitServerPod.Name, api.NewDeleteOptions(0)); err != nil { if err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(gitServerPod.Name, v1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err) framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
} }
By("Cleaning up the git server svc") By("Cleaning up the git server svc")
@ -232,19 +232,19 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
} }
} }
func makeGitRepoVolumes(gitURL, gitRepo string) (volumes []api.Volume, volumeMounts []api.VolumeMount) { func makeGitRepoVolumes(gitURL, gitRepo string) (volumes []v1.Volume, volumeMounts []v1.VolumeMount) {
for i := 0; i < wrappedVolumeRaceGitRepoVolumeCount; i++ { for i := 0; i < wrappedVolumeRaceGitRepoVolumeCount; i++ {
volumeName := fmt.Sprintf("racey-git-repo-%d", i) volumeName := fmt.Sprintf("racey-git-repo-%d", i)
volumes = append(volumes, api.Volume{ volumes = append(volumes, v1.Volume{
Name: volumeName, Name: volumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
GitRepo: &api.GitRepoVolumeSource{ GitRepo: &v1.GitRepoVolumeSource{
Repository: gitURL, Repository: gitURL,
Directory: gitRepo, Directory: gitRepo,
}, },
}, },
}) })
volumeMounts = append(volumeMounts, api.VolumeMount{ volumeMounts = append(volumeMounts, v1.VolumeMount{
Name: volumeName, Name: volumeName,
MountPath: fmt.Sprintf("/etc/git-volume-%d", i), MountPath: fmt.Sprintf("/etc/git-volume-%d", i),
}) })
@ -257,8 +257,8 @@ func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) {
for i := 0; i < wrappedVolumeRaceConfigMapVolumeCount; i++ { for i := 0; i < wrappedVolumeRaceConfigMapVolumeCount; i++ {
configMapName := fmt.Sprintf("racey-configmap-%d", i) configMapName := fmt.Sprintf("racey-configmap-%d", i)
configMapNames = append(configMapNames, configMapName) configMapNames = append(configMapNames, configMapName)
configMap := &api.ConfigMap{ configMap := &v1.ConfigMap{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Name: configMapName, Name: configMapName,
}, },
@ -280,17 +280,17 @@ func deleteConfigMaps(f *framework.Framework, configMapNames []string) {
} }
} }
func makeConfigMapVolumes(configMapNames []string) (volumes []api.Volume, volumeMounts []api.VolumeMount) { func makeConfigMapVolumes(configMapNames []string) (volumes []v1.Volume, volumeMounts []v1.VolumeMount) {
for i, configMapName := range configMapNames { for i, configMapName := range configMapNames {
volumeName := fmt.Sprintf("racey-configmap-%d", i) volumeName := fmt.Sprintf("racey-configmap-%d", i)
volumes = append(volumes, api.Volume{ volumes = append(volumes, v1.Volume{
Name: volumeName, Name: volumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
ConfigMap: &api.ConfigMapVolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: api.LocalObjectReference{ LocalObjectReference: v1.LocalObjectReference{
Name: configMapName, Name: configMapName,
}, },
Items: []api.KeyToPath{ Items: []v1.KeyToPath{
{ {
Key: "data-1", Key: "data-1",
Path: "data-1", Path: "data-1",
@ -299,7 +299,7 @@ func makeConfigMapVolumes(configMapNames []string) (volumes []api.Volume, volume
}, },
}, },
}) })
volumeMounts = append(volumeMounts, api.VolumeMount{ volumeMounts = append(volumeMounts, v1.VolumeMount{
Name: volumeName, Name: volumeName,
MountPath: fmt.Sprintf("/etc/config-%d", i), MountPath: fmt.Sprintf("/etc/config-%d", i),
}) })
@ -307,7 +307,7 @@ func makeConfigMapVolumes(configMapNames []string) (volumes []api.Volume, volume
return return
} }
func testNoWrappedVolumeRace(f *framework.Framework, volumes []api.Volume, volumeMounts []api.VolumeMount, podCount int32) { func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volumeMounts []v1.VolumeMount, podCount int32) {
rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID()) rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID())
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet) nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0)) Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
@ -315,7 +315,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []api.Volume, volum
By("Creating RC which spawns configmap-volume pods") By("Creating RC which spawns configmap-volume pods")
affinity := map[string]string{ affinity := map[string]string{
api.AffinityAnnotationKey: fmt.Sprintf(` v1.AffinityAnnotationKey: fmt.Sprintf(`
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [{ "nodeSelectorTerms": [{
"matchExpressions": [{ "matchExpressions": [{
@ -327,35 +327,35 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []api.Volume, volum
}}}`, targetNode.Name), }}}`, targetNode.Name),
} }
rc := &api.ReplicationController{ rc := &v1.ReplicationController{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: rcName, Name: rcName,
}, },
Spec: api.ReplicationControllerSpec{ Spec: v1.ReplicationControllerSpec{
Replicas: podCount, Replicas: &podCount,
Selector: map[string]string{ Selector: map[string]string{
"name": rcName, "name": rcName,
}, },
Template: &api.PodTemplateSpec{ Template: &v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Annotations: affinity, Annotations: affinity,
Labels: map[string]string{"name": rcName}, Labels: map[string]string{"name": rcName},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "test-container", Name: "test-container",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sleep", "10000"}, Command: []string{"sleep", "10000"},
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: api.ResourceList{ Requests: v1.ResourceList{
api.ResourceCPU: resource.MustParse("10m"), v1.ResourceCPU: resource.MustParse("10m"),
}, },
}, },
VolumeMounts: volumeMounts, VolumeMounts: volumeMounts,
}, },
}, },
DNSPolicy: api.DNSDefault, DNSPolicy: v1.DNSDefault,
Volumes: volumes, Volumes: volumes,
}, },
}, },
@ -365,7 +365,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []api.Volume, volum
Expect(err).NotTo(HaveOccurred(), "error creating replication controller") Expect(err).NotTo(HaveOccurred(), "error creating replication controller")
defer func() { defer func() {
err := framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, rcName) err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
}() }()

View File

@ -19,7 +19,7 @@ package e2e
import ( import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -106,7 +106,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
By("deleting pods from existing replication controller") By("deleting pods from existing replication controller")
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
options := api.ListOptions{LabelSelector: rcSelector} options := v1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(options) pods, err := podClient.List(options)
if err != nil { if err != nil {
framework.Logf("apiserver returned error, as expected before recovery: %v", err) framework.Logf("apiserver returned error, as expected before recovery: %v", err)
@ -116,7 +116,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
return false, nil return false, nil
} }
for _, pod := range pods.Items { for _, pod := range pods.Items {
err = podClient.Delete(pod.Name, api.NewDeleteOptions(0)) err = podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
} }
framework.Logf("apiserver has recovered") framework.Logf("apiserver has recovered")
@ -125,11 +125,11 @@ func checkExistingRCRecovers(f *framework.Framework) {
By("waiting for replication controller to recover") By("waiting for replication controller to recover")
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
options := api.ListOptions{LabelSelector: rcSelector} options := v1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(options) pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items { for _, pod := range pods.Items {
if pod.DeletionTimestamp == nil && api.IsPodReady(&pod) { if pod.DeletionTimestamp == nil && v1.IsPodReady(&pod) {
return true, nil return true, nil
} }
} }

View File

@ -21,7 +21,7 @@ import (
"strconv" "strconv"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
@ -42,20 +42,20 @@ var _ = framework.KubeDescribe("Events", func() {
By("creating the pod") By("creating the pod")
name := "send-events-" + string(uuid.NewUUID()) name := "send-events-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond()) value := strconv.Itoa(time.Now().Nanosecond())
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Labels: map[string]string{ Labels: map[string]string{
"name": "foo", "name": "foo",
"time": value, "time": value,
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "p", Name: "p",
Image: "gcr.io/google_containers/serve_hostname:v1.4", Image: "gcr.io/google_containers/serve_hostname:v1.4",
Ports: []api.ContainerPort{{ContainerPort: 80}}, Ports: []v1.ContainerPort{{ContainerPort: 80}},
}, },
}, },
}, },
@ -74,7 +74,7 @@ var _ = framework.KubeDescribe("Events", func() {
By("verifying the pod is in kubernetes") By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := api.ListOptions{LabelSelector: selector} options := v1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options) pods, err := podClient.List(options)
Expect(len(pods.Items)).To(Equal(1)) Expect(len(pods.Items)).To(Equal(1))
@ -84,7 +84,7 @@ var _ = framework.KubeDescribe("Events", func() {
framework.Failf("Failed to get pod: %v", err) framework.Failf("Failed to get pod: %v", err)
} }
fmt.Printf("%+v\n", podWithUid) fmt.Printf("%+v\n", podWithUid)
var events *api.EventList var events *v1.EventList
// Check for scheduler event about the pod. // Check for scheduler event about the pod.
By("checking for scheduler event about the pod") By("checking for scheduler event about the pod")
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
@ -92,9 +92,9 @@ var _ = framework.KubeDescribe("Events", func() {
"involvedObject.kind": "Pod", "involvedObject.kind": "Pod",
"involvedObject.uid": string(podWithUid.UID), "involvedObject.uid": string(podWithUid.UID),
"involvedObject.namespace": f.Namespace.Name, "involvedObject.namespace": f.Namespace.Name,
"source": api.DefaultSchedulerName, "source": v1.DefaultSchedulerName,
}.AsSelector() }.AsSelector().String()
options := api.ListOptions{FieldSelector: selector} options := v1.ListOptions{FieldSelector: selector}
events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(options) events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(options)
if err != nil { if err != nil {
return false, err return false, err
@ -113,8 +113,8 @@ var _ = framework.KubeDescribe("Events", func() {
"involvedObject.kind": "Pod", "involvedObject.kind": "Pod",
"involvedObject.namespace": f.Namespace.Name, "involvedObject.namespace": f.Namespace.Name,
"source": "kubelet", "source": "kubelet",
}.AsSelector() }.AsSelector().String()
options := api.ListOptions{FieldSelector: selector} options := v1.ListOptions{FieldSelector: selector}
events, err = f.ClientSet.Core().Events(f.Namespace.Name).List(options) events, err = f.ClientSet.Core().Events(f.Namespace.Name).List(options)
if err != nil { if err != nil {
return false, err return false, err

View File

@ -21,8 +21,8 @@ import (
"path/filepath" "path/filepath"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -73,7 +73,7 @@ var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() {
// we need two namespaces anyway, so let's forget about // we need two namespaces anyway, so let's forget about
// the one created in BeforeEach and create two new ones. // the one created in BeforeEach and create two new ones.
namespaces := []*api.Namespace{nil, nil} namespaces := []*v1.Namespace{nil, nil}
for i := range namespaces { for i := range namespaces {
var err error var err error
namespaces[i], err = f.CreateNamespace(fmt.Sprintf("dnsexample%d", i), nil) namespaces[i], err = f.CreateNamespace(fmt.Sprintf("dnsexample%d", i), nil)
@ -97,7 +97,7 @@ var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() {
// the application itself may have not been initialized. Just query the application. // the application itself may have not been initialized. Just query the application.
for _, ns := range namespaces { for _, ns := range namespaces {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName}))
options := api.ListOptions{LabelSelector: label} options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns.Name).List(options) pods, err := c.Core().Pods(ns.Name).List(options)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods) err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods)
@ -117,7 +117,7 @@ var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() {
// dns error or timeout. // dns error or timeout.
// This code is probably unnecessary, but let's stay on the safe side. // This code is probably unnecessary, but let's stay on the safe side.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendPodName})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendPodName}))
options := api.ListOptions{LabelSelector: label} options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(namespaces[0].Name).List(options) pods, err := c.Core().Pods(namespaces[0].Name).List(options)
if err != nil || pods == nil || len(pods.Items) == 0 { if err != nil || pods == nil || len(pods.Items) == 0 {
@ -151,6 +151,6 @@ var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() {
}) })
}) })
func getNsCmdFlag(ns *api.Namespace) string { func getNsCmdFlag(ns *v1.Namespace) string {
return fmt.Sprintf("--namespace=%v", ns.Name) return fmt.Sprintf("--namespace=%v", ns.Name)
} }

View File

@ -25,7 +25,7 @@ import (
"syscall" "syscall"
"time" "time"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"

View File

@ -26,8 +26,8 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -49,11 +49,11 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
return f.NewClusterVerification( return f.NewClusterVerification(
framework.PodStateVerification{ framework.PodStateVerification{
Selectors: map[string]string{selectorKey: selectorValue}, Selectors: map[string]string{selectorKey: selectorValue},
ValidPhases: []api.PodPhase{api.PodRunning}, ValidPhases: []v1.PodPhase{v1.PodRunning},
}) })
} }
// Customized ForEach wrapper for this test. // Customized ForEach wrapper for this test.
forEachPod := func(selectorKey string, selectorValue string, fn func(api.Pod)) { forEachPod := func(selectorKey string, selectorValue string, fn func(v1.Pod)) {
clusterState(selectorKey, selectorValue).ForEach(fn) clusterState(selectorKey, selectorValue).ForEach(fn)
} }
var c clientset.Interface var c clientset.Interface
@ -113,7 +113,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})) label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label) err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
forEachPod(selectorKey, selectorValue, func(pod api.Pod) { forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
if pod.Name != bootstrapPodName { if pod.Name != bootstrapPodName {
_, err := framework.LookForStringInLog(ns, pod.Name, "redis", expectedOnServer, serverStartTimeout) _, err := framework.LookForStringInLog(ns, pod.Name, "redis", expectedOnServer, serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -123,7 +123,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
label = labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})) label = labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label) err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
forEachPod(selectorKey, selectorValue, func(pod api.Pod) { forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
if pod.Name != bootstrapPodName { if pod.Name != bootstrapPodName {
_, err := framework.LookForStringInLog(ns, pod.Name, "sentinel", expectedOnSentinel, serverStartTimeout) _, err := framework.LookForStringInLog(ns, pod.Name, "sentinel", expectedOnSentinel, serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -164,7 +164,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
framework.Logf("Now polling for Master startup...") framework.Logf("Now polling for Master startup...")
// Only one master pod: But its a natural way to look up pod names. // Only one master pod: But its a natural way to look up pod names.
forEachPod(selectorKey, selectorValue, func(pod api.Pod) { forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
framework.Logf("Now waiting for master to startup in %v", pod.Name) framework.Logf("Now waiting for master to startup in %v", pod.Name)
_, err := framework.LookForStringInLog(ns, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout) _, err := framework.LookForStringInLog(ns, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -173,7 +173,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
By("waiting for master endpoint") By("waiting for master endpoint")
err = framework.WaitForEndpoint(c, ns, "spark-master") err = framework.WaitForEndpoint(c, ns, "spark-master")
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
forEachPod(selectorKey, selectorValue, func(pod api.Pod) { forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
_, maErr := framework.LookForStringInLog(f.Namespace.Name, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout) _, maErr := framework.LookForStringInLog(f.Namespace.Name, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout)
if maErr != nil { if maErr != nil {
framework.Failf("Didn't find target string. error:", maErr) framework.Failf("Didn't find target string. error:", maErr)
@ -194,7 +194,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
framework.Logf("Now polling for worker startup...") framework.Logf("Now polling for worker startup...")
forEachPod(selectorKey, selectorValue, forEachPod(selectorKey, selectorValue,
func(pod api.Pod) { func(pod v1.Pod) {
_, slaveErr := framework.LookForStringInLog(ns, pod.Name, "spark-worker", "Successfully registered with master", serverStartTimeout) _, slaveErr := framework.LookForStringInLog(ns, pod.Name, "spark-worker", "Successfully registered with master", serverStartTimeout)
Expect(slaveErr).NotTo(HaveOccurred()) Expect(slaveErr).NotTo(HaveOccurred())
}) })
@ -226,7 +226,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"})) label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label) err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
forEachPod("app", "cassandra", func(pod api.Pod) { forEachPod("app", "cassandra", func(pod v1.Pod) {
framework.Logf("Verifying pod %v ", pod.Name) framework.Logf("Verifying pod %v ", pod.Name)
// TODO how do we do this better? Ready Probe? // TODO how do we do this better? Ready Probe?
_, err = framework.LookForStringInLog(ns, pod.Name, "cassandra", "Starting listening for CQL clients", serverStartTimeout) _, err = framework.LookForStringInLog(ns, pod.Name, "cassandra", "Starting listening for CQL clients", serverStartTimeout)
@ -234,7 +234,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
}) })
By("Finding each node in the nodetool status lines") By("Finding each node in the nodetool status lines")
forEachPod("app", "cassandra", func(pod api.Pod) { forEachPod("app", "cassandra", func(pod v1.Pod) {
output := framework.RunKubectlOrDie("exec", pod.Name, nsFlag, "--", "nodetool", "status") output := framework.RunKubectlOrDie("exec", pod.Name, nsFlag, "--", "nodetool", "status")
matched, _ := regexp.MatchString("UN.*"+pod.Status.PodIP, output) matched, _ := regexp.MatchString("UN.*"+pod.Status.PodIP, output)
if matched != true { if matched != true {
@ -281,7 +281,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"})) label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"}))
err = wait.PollImmediate(statefulsetPoll, statefulsetTimeout, err = wait.PollImmediate(statefulsetPoll, statefulsetTimeout,
func() (bool, error) { func() (bool, error) {
podList, err := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: label}) podList, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: label.String()})
if err != nil { if err != nil {
return false, fmt.Errorf("Unable to get list of pods in statefulset %s", label) return false, fmt.Errorf("Unable to get list of pods in statefulset %s", label)
} }
@ -294,9 +294,9 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPets, len(podList.Items)) return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPets, len(podList.Items))
} }
for _, p := range podList.Items { for _, p := range podList.Items {
isReady := api.IsPodReady(&p) isReady := v1.IsPodReady(&p)
if p.Status.Phase != api.PodRunning || !isReady { if p.Status.Phase != v1.PodRunning || !isReady {
framework.Logf("Waiting for pod %v to enter %v - Ready=True, currently %v - Ready=%v", p.Name, api.PodRunning, p.Status.Phase, isReady) framework.Logf("Waiting for pod %v to enter %v - Ready=True, currently %v - Ready=%v", p.Name, v1.PodRunning, p.Status.Phase, isReady)
return false, nil return false, nil
} }
} }
@ -305,7 +305,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Finding each node in the nodetool status lines") By("Finding each node in the nodetool status lines")
forEachPod("app", "cassandra", func(pod api.Pod) { forEachPod("app", "cassandra", func(pod v1.Pod) {
output := framework.RunKubectlOrDie("exec", pod.Name, nsFlag, "--", "nodetool", "status") output := framework.RunKubectlOrDie("exec", pod.Name, nsFlag, "--", "nodetool", "status")
matched, _ := regexp.MatchString("UN.*"+pod.Status.PodIP, output) matched, _ := regexp.MatchString("UN.*"+pod.Status.PodIP, output)
if matched != true { if matched != true {
@ -357,7 +357,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "storm-worker"})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "storm-worker"}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label) err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
forEachPod("name", "storm-worker", func(pod api.Pod) { forEachPod("name", "storm-worker", func(pod v1.Pod) {
//do nothing, just wait for the pod to be running //do nothing, just wait for the pod to be running
}) })
// TODO: Add logging configuration to nimbus & workers images and then // TODO: Add logging configuration to nimbus & workers images and then
@ -398,7 +398,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) { for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) {
pod, err := c.Core().Pods(ns).Get(podName) pod, err := c.Core().Pods(ns).Get(podName)
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName)) framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
stat := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName) stat := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName)
framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount) framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount)
if stat.RestartCount > 0 { if stat.RestartCount > 0 {
framework.Logf("Saw %v restart, succeeded...", podName) framework.Logf("Saw %v restart, succeeded...", podName)
@ -494,7 +494,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
err := testutils.WaitForPodsWithLabelRunning(c, ns, label) err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
checkDbInstances := func() { checkDbInstances := func() {
forEachPod("db", "rethinkdb", func(pod api.Pod) { forEachPod("db", "rethinkdb", func(pod v1.Pod) {
_, err = framework.LookForStringInLog(ns, pod.Name, "rethinkdb", "Server ready", serverStartTimeout) _, err = framework.LookForStringInLog(ns, pod.Name, "rethinkdb", "Server ready", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -504,7 +504,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scaling rethinkdb") By("scaling rethinkdb")
framework.ScaleRC(f.ClientSet, ns, "rethinkdb-rc", 2, true) framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, "rethinkdb-rc", 2, true)
checkDbInstances() checkDbInstances()
By("starting admin") By("starting admin")
@ -536,7 +536,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "hazelcast"})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "hazelcast"}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label) err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
forEachPod("name", "hazelcast", func(pod api.Pod) { forEachPod("name", "hazelcast", func(pod v1.Pod) {
_, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [1]", serverStartTimeout) _, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [1]", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
_, err = framework.LookForStringInLog(ns, pod.Name, "hazelcast", "is STARTED", serverStartTimeout) _, err = framework.LookForStringInLog(ns, pod.Name, "hazelcast", "is STARTED", serverStartTimeout)
@ -547,8 +547,8 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scaling hazelcast") By("scaling hazelcast")
framework.ScaleRC(f.ClientSet, ns, "hazelcast", 2, true) framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, "hazelcast", 2, true)
forEachPod("name", "hazelcast", func(pod api.Pod) { forEachPod("name", "hazelcast", func(pod v1.Pod) {
_, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout) _, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })

View File

@ -23,8 +23,8 @@ import (
"time" "time"
clientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/typed/core/v1" clientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
api_v1 "k8s.io/kubernetes/pkg/api/v1" api_v1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -111,7 +111,7 @@ var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func
// Create resources in the namespace. // Create resources in the namespace.
event := api_v1.Event{ event := api_v1.Event{
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: api_v1.ObjectMeta{
Name: api.SimpleNameGenerator.GenerateName(eventNamePrefix), Name: v1.SimpleNameGenerator.GenerateName(eventNamePrefix),
Namespace: nsName, Namespace: nsName,
}, },
InvolvedObject: api_v1.ObjectReference{ InvolvedObject: api_v1.ObjectReference{
@ -185,7 +185,7 @@ func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters m
func createNamespace(nsClient clientset.NamespaceInterface) string { func createNamespace(nsClient clientset.NamespaceInterface) string {
ns := api_v1.Namespace{ ns := api_v1.Namespace{
ObjectMeta: api_v1.ObjectMeta{ ObjectMeta: api_v1.ObjectMeta{
Name: api.SimpleNameGenerator.GenerateName(namespacePrefix), Name: v1.SimpleNameGenerator.GenerateName(namespacePrefix),
}, },
} }
By(fmt.Sprintf("Creating namespace %s", ns.Name)) By(fmt.Sprintf("Creating namespace %s", ns.Name))

View File

@ -25,7 +25,6 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5" fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/federation/pkg/federation-controller/util" "k8s.io/kubernetes/federation/pkg/federation-controller/util"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
@ -156,7 +155,7 @@ func createSecretOrFail(clientset *fedclientset.Clientset, nsName string) *v1.Se
secret := &v1.Secret{ secret := &v1.Secret{
ObjectMeta: v1.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: api.SimpleNameGenerator.GenerateName(secretNamePrefix), Name: v1.SimpleNameGenerator.GenerateName(secretNamePrefix),
Namespace: nsName, Namespace: nsName,
}, },
} }

View File

@ -347,7 +347,7 @@ func cleanupServiceShardLoadBalancer(clusterName string, service *v1.Service, ti
return fmt.Errorf("cloud provider undefined") return fmt.Errorf("cloud provider undefined")
} }
internalSvc := &api.Service{} internalSvc := &v1.Service{}
err := api.Scheme.Convert(service, internalSvc, nil) err := api.Scheme.Convert(service, internalSvc, nil)
if err != nil { if err != nil {
return fmt.Errorf("failed to convert versioned service object to internal type: %v", err) return fmt.Errorf("failed to convert versioned service object to internal type: %v", err)
@ -415,19 +415,19 @@ func discoverService(f *framework.Framework, name string, exists bool, podName s
command := []string{"sh", "-c", fmt.Sprintf("until nslookup '%s'; do sleep 10; done", name)} command := []string{"sh", "-c", fmt.Sprintf("until nslookup '%s'; do sleep 10; done", name)}
By(fmt.Sprintf("Looking up %q", name)) By(fmt.Sprintf("Looking up %q", name))
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "federated-service-discovery-container", Name: "federated-service-discovery-container",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: command, Command: command,
}, },
}, },
RestartPolicy: api.RestartPolicyOnFailure, RestartPolicy: v1.RestartPolicyOnFailure,
}, },
} }
@ -438,7 +438,7 @@ func discoverService(f *framework.Framework, name string, exists bool, podName s
By(fmt.Sprintf("Successfully created pod %q in namespace %q", pod.Name, nsName)) By(fmt.Sprintf("Successfully created pod %q in namespace %q", pod.Name, nsName))
defer func() { defer func() {
By(fmt.Sprintf("Deleting pod %q from namespace %q", podName, nsName)) By(fmt.Sprintf("Deleting pod %q from namespace %q", podName, nsName))
err := f.ClientSet.Core().Pods(nsName).Delete(podName, api.NewDeleteOptions(0)) err := f.ClientSet.Core().Pods(nsName).Delete(podName, v1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Deleting pod %q from namespace %q", podName, nsName) framework.ExpectNoError(err, "Deleting pod %q from namespace %q", podName, nsName)
By(fmt.Sprintf("Deleted pod %q from namespace %q", podName, nsName)) By(fmt.Sprintf("Deleted pod %q from namespace %q", podName, nsName))
}() }()

View File

@ -23,6 +23,7 @@ import (
"strings" "strings"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/unversioned/remotecommand" "k8s.io/kubernetes/pkg/client/unversioned/remotecommand"
remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand" remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
@ -62,7 +63,7 @@ func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error)
Namespace(options.Namespace). Namespace(options.Namespace).
SubResource("exec"). SubResource("exec").
Param("container", options.ContainerName) Param("container", options.ContainerName)
req.VersionedParams(&api.PodExecOptions{ req.VersionedParams(&v1.PodExecOptions{
Container: options.ContainerName, Container: options.ContainerName,
Command: options.Command, Command: options.Command,
Stdin: options.Stdin != nil, Stdin: options.Stdin != nil,

View File

@ -36,7 +36,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/dynamic" "k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
@ -60,15 +60,15 @@ const (
type Framework struct { type Framework struct {
BaseName string BaseName string
// ClientSet uses internal objects, you should use ClientSet_1_5 where possible. // ClientSet uses internal objects, you should use ClientSet where possible.
ClientSet internalclientset.Interface ClientSet clientset.Interface
ClientSet_1_5 *release_1_5.Clientset InternalClientset *internalclientset.Clientset
StagingClient *staging.Clientset StagingClient *staging.Clientset
ClientPool dynamic.ClientPool ClientPool dynamic.ClientPool
Namespace *api.Namespace // Every test has at least one namespace Namespace *v1.Namespace // Every test has at least one namespace
namespacesToDelete []*api.Namespace // Some tests have more than one. namespacesToDelete []*v1.Namespace // Some tests have more than one.
NamespaceDeletionTimeout time.Duration NamespaceDeletionTimeout time.Duration
gatherer *containerResourceGatherer gatherer *containerResourceGatherer
@ -130,7 +130,7 @@ func NewDefaultGroupVersionFramework(baseName string, groupVersion unversioned.G
return f return f
} }
func NewFramework(baseName string, options FrameworkOptions, client internalclientset.Interface) *Framework { func NewFramework(baseName string, options FrameworkOptions, client clientset.Interface) *Framework {
f := &Framework{ f := &Framework{
BaseName: baseName, BaseName: baseName,
AddonResourceConstraints: make(map[string]ResourceConstraint), AddonResourceConstraints: make(map[string]ResourceConstraint),
@ -193,9 +193,9 @@ func (f *Framework) BeforeEach() {
if TestContext.KubeAPIContentType != "" { if TestContext.KubeAPIContentType != "" {
config.ContentType = TestContext.KubeAPIContentType config.ContentType = TestContext.KubeAPIContentType
} }
f.ClientSet, err = internalclientset.NewForConfig(config) f.ClientSet, err = clientset.NewForConfig(config)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
f.ClientSet_1_5, err = release_1_5.NewForConfig(config) f.InternalClientset, err = internalclientset.NewForConfig(config)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
clientRepoConfig := getClientRepoConfig(config) clientRepoConfig := getClientRepoConfig(config)
f.StagingClient, err = staging.NewForConfig(clientRepoConfig) f.StagingClient, err = staging.NewForConfig(clientRepoConfig)
@ -369,7 +369,7 @@ func (f *Framework) AfterEach() {
// Print events if the test failed. // Print events if the test failed.
if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure { if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
// Pass both unversioned client and and versioned clientset, till we have removed all uses of the unversioned client. // Pass both unversioned client and and versioned clientset, till we have removed all uses of the unversioned client.
DumpAllNamespaceInfo(f.ClientSet, f.ClientSet_1_5, f.Namespace.Name) DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name)
By(fmt.Sprintf("Dumping a list of prepulled images on each node")) By(fmt.Sprintf("Dumping a list of prepulled images on each node"))
LogContainersInPodsWithLabels(f.ClientSet, api.NamespaceSystem, ImagePullerLabels, "image-puller", Logf) LogContainersInPodsWithLabels(f.ClientSet, api.NamespaceSystem, ImagePullerLabels, "image-puller", Logf)
if f.federated { if f.federated {
@ -439,7 +439,7 @@ func (f *Framework) AfterEach() {
} }
} }
func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (*api.Namespace, error) { func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (*v1.Namespace, error) {
createTestingNS := TestContext.CreateTestingNS createTestingNS := TestContext.CreateTestingNS
if createTestingNS == nil { if createTestingNS == nil {
createTestingNS = CreateTestingNS createTestingNS = CreateTestingNS
@ -507,14 +507,14 @@ func (f *Framework) WaitForPodNoLongerRunning(podName string) error {
// TestContainerOutput runs the given pod in the given namespace and waits // TestContainerOutput runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests // for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using a substring matcher. // the specified container log against the given expected output using a substring matcher.
func (f *Framework) TestContainerOutput(scenarioName string, pod *api.Pod, containerIndex int, expectedOutput []string) { func (f *Framework) TestContainerOutput(scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, ContainSubstring) f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, ContainSubstring)
} }
// TestContainerOutputRegexp runs the given pod in the given namespace and waits // TestContainerOutputRegexp runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests // for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using a regexp matcher. // the specified container log against the given expected output using a regexp matcher.
func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *api.Pod, containerIndex int, expectedOutput []string) { func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, MatchRegexp) f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, MatchRegexp)
} }
@ -524,13 +524,13 @@ func (f *Framework) WaitForAnEndpoint(serviceName string) error {
for { for {
// TODO: Endpoints client should take a field selector so we // TODO: Endpoints client should take a field selector so we
// don't have to list everything. // don't have to list everything.
list, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(api.ListOptions{}) list, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(v1.ListOptions{})
if err != nil { if err != nil {
return err return err
} }
rv := list.ResourceVersion rv := list.ResourceVersion
isOK := func(e *api.Endpoints) bool { isOK := func(e *v1.Endpoints) bool {
return e.Name == serviceName && len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 return e.Name == serviceName && len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0
} }
for i := range list.Items { for i := range list.Items {
@ -539,8 +539,8 @@ func (f *Framework) WaitForAnEndpoint(serviceName string) error {
} }
} }
options := api.ListOptions{ options := v1.ListOptions{
FieldSelector: fields.Set{"metadata.name": serviceName}.AsSelector(), FieldSelector: fields.Set{"metadata.name": serviceName}.AsSelector().String(),
ResourceVersion: rv, ResourceVersion: rv,
} }
w, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).Watch(options) w, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).Watch(options)
@ -555,7 +555,7 @@ func (f *Framework) WaitForAnEndpoint(serviceName string) error {
// reget and re-watch // reget and re-watch
break break
} }
if e, ok := val.Object.(*api.Endpoints); ok { if e, ok := val.Object.(*v1.Endpoints); ok {
if isOK(e) { if isOK(e) {
return nil return nil
} }
@ -604,7 +604,7 @@ func (f *Framework) CheckFileSizeViaContainer(podName, containerName, path strin
} }
// CreateServiceForSimpleAppWithPods is a convenience wrapper to create a service and its matching pods all at once. // CreateServiceForSimpleAppWithPods is a convenience wrapper to create a service and its matching pods all at once.
func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int, appName string, podSpec func(n api.Node) api.PodSpec, count int, block bool) (error, *api.Service) { func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int, appName string, podSpec func(n v1.Node) v1.PodSpec, count int, block bool) (error, *v1.Service) {
var err error = nil var err error = nil
theService := f.CreateServiceForSimpleApp(contPort, svcPort, appName) theService := f.CreateServiceForSimpleApp(contPort, svcPort, appName)
f.CreatePodsPerNodeForSimpleApp(appName, podSpec, count) f.CreatePodsPerNodeForSimpleApp(appName, podSpec, count)
@ -615,7 +615,7 @@ func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int,
} }
// CreateServiceForSimpleApp returns a service that selects/exposes pods (send -1 ports if no exposure needed) with an app label. // CreateServiceForSimpleApp returns a service that selects/exposes pods (send -1 ports if no exposure needed) with an app label.
func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName string) *api.Service { func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName string) *v1.Service {
if appName == "" { if appName == "" {
panic(fmt.Sprintf("no app name provided")) panic(fmt.Sprintf("no app name provided"))
} }
@ -625,11 +625,11 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
} }
// For convenience, user sending ports are optional. // For convenience, user sending ports are optional.
portsFunc := func() []api.ServicePort { portsFunc := func() []v1.ServicePort {
if contPort < 1 || svcPort < 1 { if contPort < 1 || svcPort < 1 {
return nil return nil
} else { } else {
return []api.ServicePort{{ return []v1.ServicePort{{
Protocol: "TCP", Protocol: "TCP",
Port: int32(svcPort), Port: int32(svcPort),
TargetPort: intstr.FromInt(contPort), TargetPort: intstr.FromInt(contPort),
@ -637,14 +637,14 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
} }
} }
Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName) Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
service, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(&api.Service{ service, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(&v1.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "service-for-" + appName, Name: "service-for-" + appName,
Labels: map[string]string{ Labels: map[string]string{
"app": appName + "-service", "app": appName + "-service",
}, },
}, },
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Ports: portsFunc(), Ports: portsFunc(),
Selector: serviceSelector, Selector: serviceSelector,
}, },
@ -654,7 +654,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
} }
// CreatePodsPerNodeForSimpleApp Creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking. // CreatePodsPerNodeForSimpleApp Creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n api.Node) api.PodSpec, maxCount int) map[string]string { func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
nodes := GetReadySchedulableNodesOrDie(f.ClientSet) nodes := GetReadySchedulableNodesOrDie(f.ClientSet)
labels := map[string]string{ labels := map[string]string{
"app": appName + "-pod", "app": appName + "-pod",
@ -663,8 +663,8 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n
// one per node, but no more than maxCount. // one per node, but no more than maxCount.
if i <= maxCount { if i <= maxCount {
Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName) Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(&api.Pod{ _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(&v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf(appName+"-pod-%v", i), Name: fmt.Sprintf(appName+"-pod-%v", i),
Labels: labels, Labels: labels,
}, },
@ -834,22 +834,22 @@ type PodStateVerification struct {
Selectors map[string]string Selectors map[string]string
// Required: The phases which are valid for your pod. // Required: The phases which are valid for your pod.
ValidPhases []api.PodPhase ValidPhases []v1.PodPhase
// Optional: only pods passing this function will pass the filter // Optional: only pods passing this function will pass the filter
// Verify a pod. // Verify a pod.
// As an optimization, in addition to specfying filter (boolean), // As an optimization, in addition to specfying filter (boolean),
// this function allows specifying an error as well. // this function allows specifying an error as well.
// The error indicates that the polling of the pod spectrum should stop. // The error indicates that the polling of the pod spectrum should stop.
Verify func(api.Pod) (bool, error) Verify func(v1.Pod) (bool, error)
// Optional: only pods with this name will pass the filter. // Optional: only pods with this name will pass the filter.
PodName string PodName string
} }
type ClusterVerification struct { type ClusterVerification struct {
client internalclientset.Interface client clientset.Interface
namespace *api.Namespace // pointer rather than string, since ns isn't created until before each. namespace *v1.Namespace // pointer rather than string, since ns isn't created until before each.
podState PodStateVerification podState PodStateVerification
} }
@ -861,11 +861,11 @@ func (f *Framework) NewClusterVerification(filter PodStateVerification) *Cluster
} }
} }
func passesPodNameFilter(pod api.Pod, name string) bool { func passesPodNameFilter(pod v1.Pod, name string) bool {
return name == "" || strings.Contains(pod.Name, name) return name == "" || strings.Contains(pod.Name, name)
} }
func passesVerifyFilter(pod api.Pod, verify func(p api.Pod) (bool, error)) (bool, error) { func passesVerifyFilter(pod v1.Pod, verify func(p v1.Pod) (bool, error)) (bool, error) {
if verify == nil { if verify == nil {
return true, nil return true, nil
} else { } else {
@ -879,7 +879,7 @@ func passesVerifyFilter(pod api.Pod, verify func(p api.Pod) (bool, error)) (bool
} }
} }
func passesPhasesFilter(pod api.Pod, validPhases []api.PodPhase) bool { func passesPhasesFilter(pod v1.Pod, validPhases []v1.PodPhase) bool {
passesPhaseFilter := false passesPhaseFilter := false
for _, phase := range validPhases { for _, phase := range validPhases {
if pod.Status.Phase == phase { if pod.Status.Phase == phase {
@ -890,18 +890,18 @@ func passesPhasesFilter(pod api.Pod, validPhases []api.PodPhase) bool {
} }
// filterLabels returns a list of pods which have labels. // filterLabels returns a list of pods which have labels.
func filterLabels(selectors map[string]string, cli internalclientset.Interface, ns string) (*api.PodList, error) { func filterLabels(selectors map[string]string, cli clientset.Interface, ns string) (*v1.PodList, error) {
var err error var err error
var selector labels.Selector var selector labels.Selector
var pl *api.PodList var pl *v1.PodList
// List pods based on selectors. This might be a tiny optimization rather then filtering // List pods based on selectors. This might be a tiny optimization rather then filtering
// everything manually. // everything manually.
if len(selectors) > 0 { if len(selectors) > 0 {
selector = labels.SelectorFromSet(labels.Set(selectors)) selector = labels.SelectorFromSet(labels.Set(selectors))
options := api.ListOptions{LabelSelector: selector} options := v1.ListOptions{LabelSelector: selector.String()}
pl, err = cli.Core().Pods(ns).List(options) pl, err = cli.Core().Pods(ns).List(options)
} else { } else {
pl, err = cli.Core().Pods(ns).List(api.ListOptions{}) pl, err = cli.Core().Pods(ns).List(v1.ListOptions{})
} }
return pl, err return pl, err
} }
@ -909,20 +909,20 @@ func filterLabels(selectors map[string]string, cli internalclientset.Interface,
// filter filters pods which pass a filter. It can be used to compose // filter filters pods which pass a filter. It can be used to compose
// the more useful abstractions like ForEach, WaitFor, and so on, which // the more useful abstractions like ForEach, WaitFor, and so on, which
// can be used directly by tests. // can be used directly by tests.
func (p *PodStateVerification) filter(c internalclientset.Interface, namespace *api.Namespace) ([]api.Pod, error) { func (p *PodStateVerification) filter(c clientset.Interface, namespace *v1.Namespace) ([]v1.Pod, error) {
if len(p.ValidPhases) == 0 || namespace == nil { if len(p.ValidPhases) == 0 || namespace == nil {
panic(fmt.Errorf("Need to specify a valid pod phases (%v) and namespace (%v). ", p.ValidPhases, namespace)) panic(fmt.Errorf("Need to specify a valid pod phases (%v) and namespace (%v). ", p.ValidPhases, namespace))
} }
ns := namespace.Name ns := namespace.Name
pl, err := filterLabels(p.Selectors, c, ns) // Build an api.PodList to operate against. pl, err := filterLabels(p.Selectors, c, ns) // Build an v1.PodList to operate against.
Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors) Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors)
if len(pl.Items) == 0 || err != nil { if len(pl.Items) == 0 || err != nil {
return pl.Items, err return pl.Items, err
} }
unfilteredPods := pl.Items unfilteredPods := pl.Items
filteredPods := []api.Pod{} filteredPods := []v1.Pod{}
ReturnPodsSoFar: ReturnPodsSoFar:
// Next: Pod must match at least one of the states that the user specified // Next: Pod must match at least one of the states that the user specified
for _, pod := range unfilteredPods { for _, pod := range unfilteredPods {
@ -943,8 +943,8 @@ ReturnPodsSoFar:
// WaitFor waits for some minimum number of pods to be verified, according to the PodStateVerification // WaitFor waits for some minimum number of pods to be verified, according to the PodStateVerification
// definition. // definition.
func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]api.Pod, error) { func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1.Pod, error) {
pods := []api.Pod{} pods := []v1.Pod{}
var returnedErr error var returnedErr error
err := wait.Poll(1*time.Second, timeout, func() (bool, error) { err := wait.Poll(1*time.Second, timeout, func() (bool, error) {
@ -983,7 +983,7 @@ func (cl *ClusterVerification) WaitForOrFail(atLeast int, timeout time.Duration)
// //
// For example, if you require at least 5 pods to be running before your test will pass, // For example, if you require at least 5 pods to be running before your test will pass,
// its smart to first call "clusterVerification.WaitFor(5)" before you call clusterVerification.ForEach. // its smart to first call "clusterVerification.WaitFor(5)" before you call clusterVerification.ForEach.
func (cl *ClusterVerification) ForEach(podFunc func(api.Pod)) error { func (cl *ClusterVerification) ForEach(podFunc func(v1.Pod)) error {
pods, err := cl.podState.filter(cl.client, cl.namespace) pods, err := cl.podState.filter(cl.client, cl.namespace)
if err == nil { if err == nil {
if len(pods) == 0 { if len(pods) == 0 {

View File

@ -29,8 +29,8 @@ import (
cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapi "github.com/google/cadvisor/info/v1"
"github.com/prometheus/common/model" "github.com/prometheus/common/model"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics" kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
kubeletstats "k8s.io/kubernetes/pkg/kubelet/server/stats" kubeletstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
@ -157,7 +157,7 @@ func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor
client: c, client: c,
nodesRuntimeOps: make(map[string]NodeRuntimeOperationErrorRate), nodesRuntimeOps: make(map[string]NodeRuntimeOperationErrorRate),
} }
nodes, err := m.client.Core().Nodes().List(api.ListOptions{}) nodes, err := m.client.Core().Nodes().List(v1.ListOptions{})
if err != nil { if err != nil {
Failf("RuntimeOperationMonitor: unable to get list of nodes: %v", err) Failf("RuntimeOperationMonitor: unable to get list of nodes: %v", err)
} }
@ -695,7 +695,7 @@ func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingI
func (r *ResourceMonitor) Start() { func (r *ResourceMonitor) Start() {
// It should be OK to monitor unschedulable Nodes // It should be OK to monitor unschedulable Nodes
nodes, err := r.client.Core().Nodes().List(api.ListOptions{}) nodes, err := r.client.Core().Nodes().List(v1.ListOptions{})
if err != nil { if err != nil {
Failf("ResourceMonitor: unable to get list of nodes: %v", err) Failf("ResourceMonitor: unable to get list of nodes: %v", err)
} }

View File

@ -25,7 +25,7 @@ import (
"text/tabwriter" "text/tabwriter"
"time" "time"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
) )
const ( const (

View File

@ -28,7 +28,8 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/master/ports" "k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/metrics" "k8s.io/kubernetes/pkg/metrics"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@ -323,7 +324,7 @@ func getSchedulingLatency(c clientset.Interface) (SchedulingLatency, error) {
result := SchedulingLatency{} result := SchedulingLatency{}
// Check if master Node is registered // Check if master Node is registered
nodes, err := c.Core().Nodes().List(api.ListOptions{}) nodes, err := c.Core().Nodes().List(v1.ListOptions{})
ExpectNoError(err) ExpectNoError(err)
var data string var data string

View File

@ -24,10 +24,10 @@ import (
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
api "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/rand" "k8s.io/kubernetes/pkg/util/rand"
@ -87,23 +87,23 @@ func getServiceSelector() map[string]string {
type NetworkingTestConfig struct { type NetworkingTestConfig struct {
// TestContaienrPod is a test pod running the netexec image. It is capable // TestContaienrPod is a test pod running the netexec image. It is capable
// of executing tcp/udp requests against ip:port. // of executing tcp/udp requests against ip:port.
TestContainerPod *api.Pod TestContainerPod *v1.Pod
// HostTestContainerPod is a pod running with hostNetworking=true, and the // HostTestContainerPod is a pod running with hostNetworking=true, and the
// hostexec image. // hostexec image.
HostTestContainerPod *api.Pod HostTestContainerPod *v1.Pod
// EndpointPods are the pods belonging to the Service created by this // EndpointPods are the pods belonging to the Service created by this
// test config. Each invocation of `setup` creates a service with // test config. Each invocation of `setup` creates a service with
// 1 pod per node running the netexecImage. // 1 pod per node running the netexecImage.
EndpointPods []*api.Pod EndpointPods []*v1.Pod
f *Framework f *Framework
podClient *PodClient podClient *PodClient
// NodePortService is a Service with Type=NodePort spanning over all // NodePortService is a Service with Type=NodePort spanning over all
// endpointPods. // endpointPods.
NodePortService *api.Service NodePortService *v1.Service
// ExternalAddrs is a list of external IPs of nodes in the cluster. // ExternalAddrs is a list of external IPs of nodes in the cluster.
ExternalAddrs []string ExternalAddrs []string
// Nodes is a list of nodes in the cluster. // Nodes is a list of nodes in the cluster.
Nodes []api.Node Nodes []v1.Node
// MaxTries is the number of retries tolerated for tests run against // MaxTries is the number of retries tolerated for tests run against
// endpoints and services created by this config. // endpoints and services created by this config.
MaxTries int MaxTries int
@ -298,41 +298,41 @@ func (config *NetworkingTestConfig) GetSelfURL(path string, expected string) {
} }
} }
func (config *NetworkingTestConfig) createNetShellPodSpec(podName string, node string) *api.Pod { func (config *NetworkingTestConfig) createNetShellPodSpec(podName string, node string) *v1.Pod {
probe := &api.Probe{ probe := &v1.Probe{
InitialDelaySeconds: 10, InitialDelaySeconds: 10,
TimeoutSeconds: 30, TimeoutSeconds: 30,
PeriodSeconds: 10, PeriodSeconds: 10,
SuccessThreshold: 1, SuccessThreshold: 1,
FailureThreshold: 3, FailureThreshold: 3,
Handler: api.Handler{ Handler: v1.Handler{
HTTPGet: &api.HTTPGetAction{ HTTPGet: &v1.HTTPGetAction{
Path: "/healthz", Path: "/healthz",
Port: intstr.IntOrString{IntVal: EndpointHttpPort}, Port: intstr.IntOrString{IntVal: EndpointHttpPort},
}, },
}, },
} }
pod := &api.Pod{ pod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "Pod", Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Namespace: config.Namespace, Namespace: config.Namespace,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "webserver", Name: "webserver",
Image: NetexecImageName, Image: NetexecImageName,
ImagePullPolicy: api.PullIfNotPresent, ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{ Command: []string{
"/netexec", "/netexec",
fmt.Sprintf("--http-port=%d", EndpointHttpPort), fmt.Sprintf("--http-port=%d", EndpointHttpPort),
fmt.Sprintf("--udp-port=%d", EndpointUdpPort), fmt.Sprintf("--udp-port=%d", EndpointUdpPort),
}, },
Ports: []api.ContainerPort{ Ports: []v1.ContainerPort{
{ {
Name: "http", Name: "http",
ContainerPort: EndpointHttpPort, ContainerPort: EndpointHttpPort,
@ -340,7 +340,7 @@ func (config *NetworkingTestConfig) createNetShellPodSpec(podName string, node s
{ {
Name: "udp", Name: "udp",
ContainerPort: EndpointUdpPort, ContainerPort: EndpointUdpPort,
Protocol: api.ProtocolUDP, Protocol: v1.ProtocolUDP,
}, },
}, },
LivenessProbe: probe, LivenessProbe: probe,
@ -355,28 +355,28 @@ func (config *NetworkingTestConfig) createNetShellPodSpec(podName string, node s
return pod return pod
} }
func (config *NetworkingTestConfig) createTestPodSpec() *api.Pod { func (config *NetworkingTestConfig) createTestPodSpec() *v1.Pod {
pod := &api.Pod{ pod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "Pod", Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: testPodName, Name: testPodName,
Namespace: config.Namespace, Namespace: config.Namespace,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "webserver", Name: "webserver",
Image: NetexecImageName, Image: NetexecImageName,
ImagePullPolicy: api.PullIfNotPresent, ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{ Command: []string{
"/netexec", "/netexec",
fmt.Sprintf("--http-port=%d", EndpointHttpPort), fmt.Sprintf("--http-port=%d", EndpointHttpPort),
fmt.Sprintf("--udp-port=%d", EndpointUdpPort), fmt.Sprintf("--udp-port=%d", EndpointUdpPort),
}, },
Ports: []api.ContainerPort{ Ports: []v1.ContainerPort{
{ {
Name: "http", Name: "http",
ContainerPort: TestContainerHttpPort, ContainerPort: TestContainerHttpPort,
@ -390,15 +390,15 @@ func (config *NetworkingTestConfig) createTestPodSpec() *api.Pod {
} }
func (config *NetworkingTestConfig) createNodePortService(selector map[string]string) { func (config *NetworkingTestConfig) createNodePortService(selector map[string]string) {
serviceSpec := &api.Service{ serviceSpec := &v1.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: nodePortServiceName, Name: nodePortServiceName,
}, },
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Type: api.ServiceTypeNodePort, Type: v1.ServiceTypeNodePort,
Ports: []api.ServicePort{ Ports: []v1.ServicePort{
{Port: ClusterHttpPort, Name: "http", Protocol: api.ProtocolTCP, TargetPort: intstr.FromInt(EndpointHttpPort)}, {Port: ClusterHttpPort, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(EndpointHttpPort)},
{Port: ClusterUdpPort, Name: "udp", Protocol: api.ProtocolUDP, TargetPort: intstr.FromInt(EndpointUdpPort)}, {Port: ClusterUdpPort, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt(EndpointUdpPort)},
}, },
Selector: selector, Selector: selector,
}, },
@ -434,7 +434,7 @@ func (config *NetworkingTestConfig) createTestPods() {
} }
} }
func (config *NetworkingTestConfig) createService(serviceSpec *api.Service) *api.Service { func (config *NetworkingTestConfig) createService(serviceSpec *v1.Service) *v1.Service {
_, err := config.getServiceClient().Create(serviceSpec) _, err := config.getServiceClient().Create(serviceSpec)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err)) Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
@ -468,10 +468,10 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
By("Getting node addresses") By("Getting node addresses")
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute)) ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet) nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
config.ExternalAddrs = NodeAddresses(nodeList, api.NodeExternalIP) config.ExternalAddrs = NodeAddresses(nodeList, v1.NodeExternalIP)
if len(config.ExternalAddrs) < 2 { if len(config.ExternalAddrs) < 2 {
// fall back to legacy IPs // fall back to legacy IPs
config.ExternalAddrs = NodeAddresses(nodeList, api.NodeLegacyHostIP) config.ExternalAddrs = NodeAddresses(nodeList, v1.NodeLegacyHostIP)
} }
Expect(len(config.ExternalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP")) Expect(len(config.ExternalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
config.Nodes = nodeList.Items config.Nodes = nodeList.Items
@ -481,9 +481,9 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
for _, p := range config.NodePortService.Spec.Ports { for _, p := range config.NodePortService.Spec.Ports {
switch p.Protocol { switch p.Protocol {
case api.ProtocolUDP: case v1.ProtocolUDP:
config.NodeUdpPort = int(p.NodePort) config.NodeUdpPort = int(p.NodePort)
case api.ProtocolTCP: case v1.ProtocolTCP:
config.NodeHttpPort = int(p.NodePort) config.NodeHttpPort = int(p.NodePort)
default: default:
continue continue
@ -495,7 +495,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
func (config *NetworkingTestConfig) cleanup() { func (config *NetworkingTestConfig) cleanup() {
nsClient := config.getNamespacesClient() nsClient := config.getNamespacesClient()
nsList, err := nsClient.List(api.ListOptions{}) nsList, err := nsClient.List(v1.ListOptions{})
if err == nil { if err == nil {
for _, ns := range nsList.Items { for _, ns := range nsList.Items {
if strings.Contains(ns.Name, config.f.BaseName) && ns.Name != config.Namespace { if strings.Contains(ns.Name, config.f.BaseName) && ns.Name != config.Namespace {
@ -507,8 +507,8 @@ func (config *NetworkingTestConfig) cleanup() {
// shuffleNodes copies nodes from the specified slice into a copy in random // shuffleNodes copies nodes from the specified slice into a copy in random
// order. It returns a new slice. // order. It returns a new slice.
func shuffleNodes(nodes []api.Node) []api.Node { func shuffleNodes(nodes []v1.Node) []v1.Node {
shuffled := make([]api.Node, len(nodes)) shuffled := make([]v1.Node, len(nodes))
perm := rand.Perm(len(nodes)) perm := rand.Perm(len(nodes))
for i, j := range perm { for i, j := range perm {
shuffled[j] = nodes[i] shuffled[j] = nodes[i]
@ -516,7 +516,7 @@ func shuffleNodes(nodes []api.Node) []api.Node {
return shuffled return shuffled
} }
func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod { func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*v1.Pod {
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute)) ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet) nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
@ -529,7 +529,7 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector
} }
// create pods, one for each node // create pods, one for each node
createdPods := make([]*api.Pod, 0, len(nodes)) createdPods := make([]*v1.Pod, 0, len(nodes))
for i, n := range nodes { for i, n := range nodes {
podName := fmt.Sprintf("%s-%d", podName, i) podName := fmt.Sprintf("%s-%d", podName, i)
pod := config.createNetShellPodSpec(podName, n.Name) pod := config.createNetShellPodSpec(podName, n.Name)
@ -539,7 +539,7 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector
} }
// wait that all of them are up // wait that all of them are up
runningPods := make([]*api.Pod, 0, len(nodes)) runningPods := make([]*v1.Pod, 0, len(nodes))
for _, p := range createdPods { for _, p := range createdPods {
ExpectNoError(config.f.WaitForPodReady(p.Name)) ExpectNoError(config.f.WaitForPodReady(p.Name))
rp, err := config.getPodClient().Get(p.Name) rp, err := config.getPodClient().Get(p.Name)
@ -552,7 +552,7 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector
func (config *NetworkingTestConfig) DeleteNetProxyPod() { func (config *NetworkingTestConfig) DeleteNetProxyPod() {
pod := config.EndpointPods[0] pod := config.EndpointPods[0]
config.getPodClient().Delete(pod.Name, api.NewDeleteOptions(0)) config.getPodClient().Delete(pod.Name, v1.NewDeleteOptions(0))
config.EndpointPods = config.EndpointPods[1:] config.EndpointPods = config.EndpointPods[1:]
// wait for pod being deleted. // wait for pod being deleted.
err := WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout) err := WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
@ -568,7 +568,7 @@ func (config *NetworkingTestConfig) DeleteNetProxyPod() {
time.Sleep(5 * time.Second) time.Sleep(5 * time.Second)
} }
func (config *NetworkingTestConfig) createPod(pod *api.Pod) *api.Pod { func (config *NetworkingTestConfig) createPod(pod *v1.Pod) *v1.Pod {
return config.getPodClient().Create(pod) return config.getPodClient().Create(pod)
} }

View File

@ -22,8 +22,8 @@ import (
"strings" "strings"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
) )
@ -141,15 +141,15 @@ func nodeUpgradeGKE(v string, img string) error {
// nodes it finds. // nodes it finds.
func CheckNodesReady(c clientset.Interface, nt time.Duration, expect int) ([]string, error) { func CheckNodesReady(c clientset.Interface, nt time.Duration, expect int) ([]string, error) {
// First, keep getting all of the nodes until we get the number we expect. // First, keep getting all of the nodes until we get the number we expect.
var nodeList *api.NodeList var nodeList *v1.NodeList
var errLast error var errLast error
start := time.Now() start := time.Now()
found := wait.Poll(Poll, nt, func() (bool, error) { found := wait.Poll(Poll, nt, func() (bool, error) {
// A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver // A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver
// knows about all of the nodes. Thus, we retry the list nodes call // knows about all of the nodes. Thus, we retry the list nodes call
// until we get the expected number of nodes. // until we get the expected number of nodes.
nodeList, errLast = c.Core().Nodes().List(api.ListOptions{ nodeList, errLast = c.Core().Nodes().List(v1.ListOptions{
FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector()}) FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String()})
if errLast != nil { if errLast != nil {
return false, nil return false, nil
} }

View File

@ -22,9 +22,9 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/api/v1"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -50,11 +50,11 @@ func (f *Framework) PodClient() *PodClient {
type PodClient struct { type PodClient struct {
f *Framework f *Framework
unversionedcore.PodInterface v1core.PodInterface
} }
// Create creates a new pod according to the framework specifications (don't wait for it to start). // Create creates a new pod according to the framework specifications (don't wait for it to start).
func (c *PodClient) Create(pod *api.Pod) *api.Pod { func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
c.mungeSpec(pod) c.mungeSpec(pod)
p, err := c.PodInterface.Create(pod) p, err := c.PodInterface.Create(pod)
ExpectNoError(err, "Error creating Pod") ExpectNoError(err, "Error creating Pod")
@ -62,7 +62,7 @@ func (c *PodClient) Create(pod *api.Pod) *api.Pod {
} }
// CreateSync creates a new pod according to the framework specifications, and wait for it to start. // CreateSync creates a new pod according to the framework specifications, and wait for it to start.
func (c *PodClient) CreateSync(pod *api.Pod) *api.Pod { func (c *PodClient) CreateSync(pod *v1.Pod) *v1.Pod {
p := c.Create(pod) p := c.Create(pod)
ExpectNoError(c.f.WaitForPodRunning(p.Name)) ExpectNoError(c.f.WaitForPodRunning(p.Name))
// Get the newest pod after it becomes running, some status may change after pod created, such as pod ip. // Get the newest pod after it becomes running, some status may change after pod created, such as pod ip.
@ -72,12 +72,12 @@ func (c *PodClient) CreateSync(pod *api.Pod) *api.Pod {
} }
// CreateBatch create a batch of pods. All pods are created before waiting. // CreateBatch create a batch of pods. All pods are created before waiting.
func (c *PodClient) CreateBatch(pods []*api.Pod) []*api.Pod { func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod {
ps := make([]*api.Pod, len(pods)) ps := make([]*v1.Pod, len(pods))
var wg sync.WaitGroup var wg sync.WaitGroup
for i, pod := range pods { for i, pod := range pods {
wg.Add(1) wg.Add(1)
go func(i int, pod *api.Pod) { go func(i int, pod *v1.Pod) {
defer wg.Done() defer wg.Done()
defer GinkgoRecover() defer GinkgoRecover()
ps[i] = c.CreateSync(pod) ps[i] = c.CreateSync(pod)
@ -90,7 +90,7 @@ func (c *PodClient) CreateBatch(pods []*api.Pod) []*api.Pod {
// Update updates the pod object. It retries if there is a conflict, throw out error if // Update updates the pod object. It retries if there is a conflict, throw out error if
// there is any other errors. name is the pod name, updateFn is the function updating the // there is any other errors. name is the pod name, updateFn is the function updating the
// pod object. // pod object.
func (c *PodClient) Update(name string, updateFn func(pod *api.Pod)) { func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) { ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
pod, err := c.PodInterface.Get(name) pod, err := c.PodInterface.Get(name)
if err != nil { if err != nil {
@ -112,7 +112,7 @@ func (c *PodClient) Update(name string, updateFn func(pod *api.Pod)) {
// DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't // DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't
// disappear before the timeout, it will fail the test. // disappear before the timeout, it will fail the test.
func (c *PodClient) DeleteSync(name string, options *api.DeleteOptions, timeout time.Duration) { func (c *PodClient) DeleteSync(name string, options *v1.DeleteOptions, timeout time.Duration) {
err := c.Delete(name, options) err := c.Delete(name, options)
if err != nil && !errors.IsNotFound(err) { if err != nil && !errors.IsNotFound(err) {
Failf("Failed to delete pod %q: %v", name, err) Failf("Failed to delete pod %q: %v", name, err)
@ -122,7 +122,7 @@ func (c *PodClient) DeleteSync(name string, options *api.DeleteOptions, timeout
} }
// mungeSpec apply test-suite specific transformations to the pod spec. // mungeSpec apply test-suite specific transformations to the pod spec.
func (c *PodClient) mungeSpec(pod *api.Pod) { func (c *PodClient) mungeSpec(pod *v1.Pod) {
if !TestContext.NodeE2E { if !TestContext.NodeE2E {
return return
} }
@ -131,7 +131,7 @@ func (c *PodClient) mungeSpec(pod *api.Pod) {
pod.Spec.NodeName = TestContext.NodeName pod.Spec.NodeName = TestContext.NodeName
// Node e2e does not support the default DNSClusterFirst policy. Set // Node e2e does not support the default DNSClusterFirst policy. Set
// the policy to DNSDefault, which is configured per node. // the policy to DNSDefault, which is configured per node.
pod.Spec.DNSPolicy = api.DNSDefault pod.Spec.DNSPolicy = v1.DNSDefault
// PrepullImages only works for node e2e now. For cluster e2e, image prepull is not enforced, // PrepullImages only works for node e2e now. For cluster e2e, image prepull is not enforced,
// we should not munge ImagePullPolicy for cluster e2e pods. // we should not munge ImagePullPolicy for cluster e2e pods.
@ -142,7 +142,7 @@ func (c *PodClient) mungeSpec(pod *api.Pod) {
// during the test. // during the test.
for i := range pod.Spec.Containers { for i := range pod.Spec.Containers {
c := &pod.Spec.Containers[i] c := &pod.Spec.Containers[i]
if c.ImagePullPolicy == api.PullAlways { if c.ImagePullPolicy == v1.PullAlways {
// If the image pull policy is PullAlways, the image doesn't need to be in // If the image pull policy is PullAlways, the image doesn't need to be in
// the white list or pre-pulled, because the image is expected to be pulled // the white list or pre-pulled, because the image is expected to be pulled
// in the test anyway. // in the test anyway.
@ -153,7 +153,7 @@ func (c *PodClient) mungeSpec(pod *api.Pod) {
Expect(ImageWhiteList.Has(c.Image)).To(BeTrue(), "Image %q is not in the white list, consider adding it to CommonImageWhiteList in test/e2e/common/util.go or NodeImageWhiteList in test/e2e_node/image_list.go", c.Image) Expect(ImageWhiteList.Has(c.Image)).To(BeTrue(), "Image %q is not in the white list, consider adding it to CommonImageWhiteList in test/e2e/common/util.go or NodeImageWhiteList in test/e2e_node/image_list.go", c.Image)
// Do not pull images during the tests because the images in white list should have // Do not pull images during the tests because the images in white list should have
// been prepulled. // been prepulled.
c.ImagePullPolicy = api.PullNever c.ImagePullPolicy = v1.PullNever
} }
} }
@ -162,11 +162,11 @@ func (c *PodClient) mungeSpec(pod *api.Pod) {
func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) { func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
f := c.f f := c.f
Expect(waitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout, Expect(waitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
func(pod *api.Pod) (bool, error) { func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase { switch pod.Status.Phase {
case api.PodFailed: case v1.PodFailed:
return true, fmt.Errorf("pod %q failed with reason: %q, message: %q", name, pod.Status.Reason, pod.Status.Message) return true, fmt.Errorf("pod %q failed with reason: %q, message: %q", name, pod.Status.Reason, pod.Status.Message)
case api.PodSucceeded: case v1.PodSucceeded:
return true, nil return true, nil
default: default:
return false, nil return false, nil

View File

@ -29,8 +29,8 @@ import (
"time" "time"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
utilruntime "k8s.io/kubernetes/pkg/util/runtime" utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/system" "k8s.io/kubernetes/pkg/util/system"
) )
@ -250,7 +250,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
finished: false, finished: false,
}) })
} else { } else {
pods, err := c.Core().Pods("kube-system").List(api.ListOptions{}) pods, err := c.Core().Pods("kube-system").List(v1.ListOptions{})
if err != nil { if err != nil {
Logf("Error while listing Pods: %v", err) Logf("Error while listing Pods: %v", err)
return nil, err return nil, err
@ -262,14 +262,14 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
g.containerIDs = append(g.containerIDs, containerID) g.containerIDs = append(g.containerIDs, containerID)
} }
} }
nodeList, err := c.Core().Nodes().List(api.ListOptions{}) nodeList, err := c.Core().Nodes().List(v1.ListOptions{})
if err != nil { if err != nil {
Logf("Error while listing Nodes: %v", err) Logf("Error while listing Nodes: %v", err)
return nil, err return nil, err
} }
for _, node := range nodeList.Items { for _, node := range nodeList.Items {
if !options.masterOnly || system.IsMasterNode(&node) { if !options.masterOnly || system.IsMasterNode(node.Name) {
g.workerWg.Add(1) g.workerWg.Add(1)
g.workers = append(g.workers, resourceGatherWorker{ g.workers = append(g.workers, resourceGatherWorker{
c: c, c: c,

File diff suppressed because it is too large Load Diff

View File

@ -117,7 +117,7 @@ func gatherMetrics(f *framework.Framework) {
var _ = framework.KubeDescribe("Garbage collector", func() { var _ = framework.KubeDescribe("Garbage collector", func() {
f := framework.NewDefaultFramework("gc") f := framework.NewDefaultFramework("gc")
It("[Feature:GarbageCollector] should delete pods created by rc when not orphaning", func() { It("[Feature:GarbageCollector] should delete pods created by rc when not orphaning", func() {
clientSet := f.ClientSet_1_5 clientSet := f.ClientSet
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name) rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name) podClient := clientSet.Core().Pods(f.Namespace.Name)
rcName := "simpletest.rc" rcName := "simpletest.rc"
@ -168,7 +168,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
}) })
It("[Feature:GarbageCollector] should orphan pods created by rc if delete options say so", func() { It("[Feature:GarbageCollector] should orphan pods created by rc if delete options say so", func() {
clientSet := f.ClientSet_1_5 clientSet := f.ClientSet
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name) rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name) podClient := clientSet.Core().Pods(f.Namespace.Name)
rcName := "simpletest.rc" rcName := "simpletest.rc"
@ -230,7 +230,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
}) })
It("[Feature:GarbageCollector] should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() { It("[Feature:GarbageCollector] should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() {
clientSet := f.ClientSet_1_5 clientSet := f.ClientSet
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name) rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name) podClient := clientSet.Core().Pods(f.Namespace.Name)
rcName := "simpletest.rc" rcName := "simpletest.rc"

View File

@ -121,7 +121,7 @@ func observeObjectDeletion(w watch.Interface) (obj runtime.Object) {
var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() { var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
f := framework.NewDefaultFramework("clientset") f := framework.NewDefaultFramework("clientset")
It("should create pods, delete pods, watch pods", func() { It("should create pods, delete pods, watch pods", func() {
podClient := f.ClientSet_1_5.Core().Pods(f.Namespace.Name) podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
By("constructing the pod") By("constructing the pod")
name := "pod" + string(uuid.NewUUID()) name := "pod" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond()) value := strconv.Itoa(time.Now().Nanosecond())
@ -240,7 +240,7 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
f := framework.NewDefaultFramework("clientset") f := framework.NewDefaultFramework("clientset")
It("should create v2alpha1 cronJobs, delete cronJobs, watch cronJobs", func() { It("should create v2alpha1 cronJobs, delete cronJobs, watch cronJobs", func() {
var enabled bool var enabled bool
groupList, err := f.ClientSet_1_5.Discovery().ServerGroups() groupList, err := f.ClientSet.Discovery().ServerGroups()
ExpectNoError(err) ExpectNoError(err)
for _, group := range groupList.Groups { for _, group := range groupList.Groups {
if group.Name == v2alpha1.GroupName { if group.Name == v2alpha1.GroupName {
@ -256,7 +256,7 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
framework.Logf("%s is not enabled, test skipped", v2alpha1.SchemeGroupVersion) framework.Logf("%s is not enabled, test skipped", v2alpha1.SchemeGroupVersion)
return return
} }
cronJobClient := f.ClientSet_1_5.BatchV2alpha1().CronJobs(f.Namespace.Name) cronJobClient := f.ClientSet.BatchV2alpha1().CronJobs(f.Namespace.Name)
By("constructing the cronJob") By("constructing the cronJob")
name := "cronjob" + string(uuid.NewUUID()) name := "cronjob" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond()) value := strconv.Itoa(time.Now().Nanosecond())

View File

@ -20,8 +20,8 @@ import (
"fmt" "fmt"
"os/exec" "os/exec"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -65,28 +65,28 @@ func doTestWriteAndReadToLocalSsd(f *framework.Framework) {
f.TestContainerOutput(msg, pod, 0, out) f.TestContainerOutput(msg, pod, 0, out)
} }
func testPodWithSsd(command string) *api.Pod { func testPodWithSsd(command string) *v1.Pod {
containerName := "test-container" containerName := "test-container"
volumeName := "test-ssd-volume" volumeName := "test-ssd-volume"
path := "/mnt/disks/ssd0" path := "/mnt/disks/ssd0"
podName := "pod-" + string(uuid.NewUUID()) podName := "pod-" + string(uuid.NewUUID())
image := "ubuntu:14.04" image := "ubuntu:14.04"
return &api.Pod{ return &v1.Pod{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "Pod", Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: containerName, Name: containerName,
Image: image, Image: image,
Command: []string{"/bin/sh"}, Command: []string{"/bin/sh"},
Args: []string{"-c", command}, Args: []string{"-c", command},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: volumeName, Name: volumeName,
MountPath: path, MountPath: path,
@ -94,12 +94,12 @@ func testPodWithSsd(command string) *api.Pod {
}, },
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: volumeName, Name: volumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
HostPath: &api.HostPathVolumeSource{ HostPath: &v1.HostPathVolumeSource{
Path: path, Path: path,
}, },
}, },

View File

@ -26,7 +26,7 @@ import (
"time" "time"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )

View File

@ -19,8 +19,8 @@ package e2e
import ( import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/autoscaling" autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -177,7 +177,7 @@ func scaleDown(name, kind string, checkStability bool, rc *ResourceConsumer, f *
func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int32) { func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int32) {
hpa := &autoscaling.HorizontalPodAutoscaler{ hpa := &autoscaling.HorizontalPodAutoscaler{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: rc.name, Name: rc.name,
Namespace: rc.framework.Namespace.Name, Namespace: rc.framework.Namespace.Name,
}, },

View File

@ -38,12 +38,13 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
compute "google.golang.org/api/compute/v1" compute "google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
apierrs "k8s.io/kubernetes/pkg/api/errors" apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/apis/extensions" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@ -280,16 +281,16 @@ func createSecret(kubeClient clientset.Interface, ing *extensions.Ingress) (host
} }
cert := c.Bytes() cert := c.Bytes()
key := k.Bytes() key := k.Bytes()
secret := &api.Secret{ secret := &v1.Secret{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: tls.SecretName, Name: tls.SecretName,
}, },
Data: map[string][]byte{ Data: map[string][]byte{
api.TLSCertKey: cert, v1.TLSCertKey: cert,
api.TLSPrivateKeyKey: key, v1.TLSPrivateKeyKey: key,
}, },
} }
var s *api.Secret var s *v1.Secret
if s, err = kubeClient.Core().Secrets(ing.Namespace).Get(tls.SecretName); err == nil { if s, err = kubeClient.Core().Secrets(ing.Namespace).Get(tls.SecretName); err == nil {
// TODO: Retry the update. We don't really expect anything to conflict though. // TODO: Retry the update. We don't really expect anything to conflict though.
framework.Logf("Updating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name) framework.Logf("Updating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name)
@ -841,8 +842,8 @@ type GCEIngressController struct {
rcPath string rcPath string
UID string UID string
staticIPName string staticIPName string
rc *api.ReplicationController rc *v1.ReplicationController
svc *api.Service svc *v1.Service
c clientset.Interface c clientset.Interface
cloud framework.CloudConfig cloud framework.CloudConfig
} }
@ -854,8 +855,8 @@ func newTestJig(c clientset.Interface) *testJig {
// NginxIngressController manages implementation details of Ingress on Nginx. // NginxIngressController manages implementation details of Ingress on Nginx.
type NginxIngressController struct { type NginxIngressController struct {
ns string ns string
rc *api.ReplicationController rc *v1.ReplicationController
pod *api.Pod pod *v1.Pod
c clientset.Interface c clientset.Interface
externalIP string externalIP string
} }
@ -874,7 +875,7 @@ func (cont *NginxIngressController) init() {
framework.Logf("waiting for pods with label %v", rc.Spec.Selector) framework.Logf("waiting for pods with label %v", rc.Spec.Selector)
sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector)) sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector))
ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.c, cont.ns, sel)) ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.c, cont.ns, sel))
pods, err := cont.c.Core().Pods(cont.ns).List(api.ListOptions{LabelSelector: sel}) pods, err := cont.c.Core().Pods(cont.ns).List(v1.ListOptions{LabelSelector: sel.String()})
ExpectNoError(err) ExpectNoError(err)
if len(pods.Items) == 0 { if len(pods.Items) == 0 {
framework.Failf("Failed to find nginx ingress controller pods with selector %v", sel) framework.Failf("Failed to find nginx ingress controller pods with selector %v", sel)

View File

@ -22,7 +22,7 @@ import (
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )
@ -51,13 +51,13 @@ var _ = framework.KubeDescribe("Initial Resources [Feature:InitialResources] [Fl
}) })
}) })
func runPod(f *framework.Framework, name, image string) *api.Pod { func runPod(f *framework.Framework, name, image string) *v1.Pod {
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: name, Name: name,
Image: image, Image: image,

View File

@ -21,8 +21,10 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" batchinternal "k8s.io/kubernetes/pkg/apis/batch"
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/kubectl" "k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -49,7 +51,7 @@ var _ = framework.KubeDescribe("Job", func() {
// Simplest case: all pods succeed promptly // Simplest case: all pods succeed promptly
It("should run a job to completion when tasks succeed", func() { It("should run a job to completion when tasks succeed", func() {
By("Creating a job") By("Creating a job")
job := newTestJob("succeed", "all-succeed", api.RestartPolicyNever, parallelism, completions) job := newTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job) job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -68,7 +70,7 @@ var _ = framework.KubeDescribe("Job", func() {
// up to 5 minutes between restarts, making test timeouts // up to 5 minutes between restarts, making test timeouts
// due to successive failures too likely with a reasonable // due to successive failures too likely with a reasonable
// test timeout. // test timeout.
job := newTestJob("failOnce", "fail-once-local", api.RestartPolicyOnFailure, parallelism, completions) job := newTestJob("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job) job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -86,7 +88,7 @@ var _ = framework.KubeDescribe("Job", func() {
// Worst case analysis: 15 failures, each taking 1 minute to // Worst case analysis: 15 failures, each taking 1 minute to
// run due to some slowness, 1 in 2^15 chance of happening, // run due to some slowness, 1 in 2^15 chance of happening,
// causing test flake. Should be very rare. // causing test flake. Should be very rare.
job := newTestJob("randomlySucceedOrFail", "rand-non-local", api.RestartPolicyNever, parallelism, completions) job := newTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job) job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -97,7 +99,7 @@ var _ = framework.KubeDescribe("Job", func() {
It("should keep restarting failed pods", func() { It("should keep restarting failed pods", func() {
By("Creating a job") By("Creating a job")
job := newTestJob("fail", "all-fail", api.RestartPolicyNever, parallelism, completions) job := newTestJob("fail", "all-fail", v1.RestartPolicyNever, parallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job) job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -115,7 +117,7 @@ var _ = framework.KubeDescribe("Job", func() {
startParallelism := int32(1) startParallelism := int32(1)
endParallelism := int32(2) endParallelism := int32(2)
By("Creating a job") By("Creating a job")
job := newTestJob("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions) job := newTestJob("notTerminate", "scale-up", v1.RestartPolicyNever, startParallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job) job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -124,7 +126,7 @@ var _ = framework.KubeDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scale job up") By("scale job up")
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.ClientSet) scaler, err := kubectl.ScalerFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute) waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute) waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@ -140,7 +142,7 @@ var _ = framework.KubeDescribe("Job", func() {
startParallelism := int32(2) startParallelism := int32(2)
endParallelism := int32(1) endParallelism := int32(1)
By("Creating a job") By("Creating a job")
job := newTestJob("notTerminate", "scale-down", api.RestartPolicyNever, startParallelism, completions) job := newTestJob("notTerminate", "scale-down", v1.RestartPolicyNever, startParallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job) job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -149,7 +151,7 @@ var _ = framework.KubeDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("scale job down") By("scale job down")
scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.ClientSet) scaler, err := kubectl.ScalerFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute) waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute) waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@ -163,7 +165,7 @@ var _ = framework.KubeDescribe("Job", func() {
It("should delete a job", func() { It("should delete a job", func() {
By("Creating a job") By("Creating a job")
job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions) job := newTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job) job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -172,7 +174,7 @@ var _ = framework.KubeDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("delete a job") By("delete a job")
reaper, err := kubectl.ReaperFor(batch.Kind("Job"), f.ClientSet) reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute timeout := 1 * time.Minute
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0)) err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))
@ -186,7 +188,7 @@ var _ = framework.KubeDescribe("Job", func() {
It("should fail a job", func() { It("should fail a job", func() {
By("Creating a job") By("Creating a job")
job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions) job := newTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions)
activeDeadlineSeconds := int64(10) activeDeadlineSeconds := int64(10)
job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
job, err := createJob(f.ClientSet, f.Namespace.Name, job) job, err := createJob(f.ClientSet, f.Namespace.Name, job)
@ -211,35 +213,35 @@ var _ = framework.KubeDescribe("Job", func() {
}) })
// newTestJob returns a job which does one of several testing behaviors. // newTestJob returns a job which does one of several testing behaviors.
func newTestJob(behavior, name string, rPol api.RestartPolicy, parallelism, completions int32) *batch.Job { func newTestJob(behavior, name string, rPol v1.RestartPolicy, parallelism, completions int32) *batch.Job {
job := &batch.Job{ job := &batch.Job{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: batch.JobSpec{ Spec: batch.JobSpec{
Parallelism: &parallelism, Parallelism: &parallelism,
Completions: &completions, Completions: &completions,
ManualSelector: newBool(false), ManualSelector: newBool(false),
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{jobSelectorKey: name}, Labels: map[string]string{jobSelectorKey: name},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
RestartPolicy: rPol, RestartPolicy: rPol,
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: "data", Name: "data",
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{}, EmptyDir: &v1.EmptyDirVolumeSource{},
}, },
}, },
}, },
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "c", Name: "c",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{}, Command: []string{},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
MountPath: "/data", MountPath: "/data",
Name: "data", Name: "data",
@ -293,14 +295,14 @@ func deleteJob(c clientset.Interface, ns, name string) error {
func waitForAllPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error { func waitForAllPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: jobName})) label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: jobName}))
return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) { return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
options := api.ListOptions{LabelSelector: label} options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options) pods, err := c.Core().Pods(ns).List(options)
if err != nil { if err != nil {
return false, err return false, err
} }
count := int32(0) count := int32(0)
for _, p := range pods.Items { for _, p := range pods.Items {
if p.Status.Phase == api.PodRunning { if p.Status.Phase == v1.PodRunning {
count++ count++
} }
} }
@ -327,7 +329,7 @@ func waitForJobFail(c clientset.Interface, ns, jobName string, timeout time.Dura
return false, err return false, err
} }
for _, c := range curr.Status.Conditions { for _, c := range curr.Status.Conditions {
if c.Type == batch.JobFailed && c.Status == api.ConditionTrue { if c.Type == batch.JobFailed && c.Status == v1.ConditionTrue {
return true, nil return true, nil
} }
} }

View File

@ -20,6 +20,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -69,7 +70,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
// Wait for the Kibana pod(s) to enter the running state. // Wait for the Kibana pod(s) to enter the running state.
By("Checking to make sure the Kibana pods are running") By("Checking to make sure the Kibana pods are running")
label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue})) label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue}))
options := api.ListOptions{LabelSelector: label} options := v1.ListOptions{LabelSelector: label.String()}
pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options) pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items { for _, pod := range pods.Items {

View File

@ -24,7 +24,7 @@ import (
"strings" "strings"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/images/net/nat" "k8s.io/kubernetes/test/images/net/nat"
@ -47,7 +47,7 @@ var _ = framework.KubeDescribe("Network", func() {
It("should set TCP CLOSE_WAIT timeout", func() { It("should set TCP CLOSE_WAIT timeout", func() {
nodes := framework.GetReadySchedulableNodesOrDie(fr.ClientSet) nodes := framework.GetReadySchedulableNodesOrDie(fr.ClientSet)
ips := collectAddresses(nodes, api.NodeInternalIP) ips := collectAddresses(nodes, v1.NodeInternalIP)
if len(nodes.Items) < 2 { if len(nodes.Items) < 2 {
framework.Skipf( framework.Skipf(
@ -56,7 +56,7 @@ var _ = framework.KubeDescribe("Network", func() {
} }
type NodeInfo struct { type NodeInfo struct {
node *api.Node node *v1.Node
name string name string
nodeIp string nodeIp string
} }
@ -75,15 +75,15 @@ var _ = framework.KubeDescribe("Network", func() {
zero := int64(0) zero := int64(0)
clientPodSpec := &api.Pod{ clientPodSpec := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "e2e-net-client", Name: "e2e-net-client",
Namespace: fr.Namespace.Name, Namespace: fr.Namespace.Name,
Labels: map[string]string{"app": "e2e-net-client"}, Labels: map[string]string{"app": "e2e-net-client"},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
NodeName: clientNodeInfo.name, NodeName: clientNodeInfo.name,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "e2e-net-client", Name: "e2e-net-client",
Image: kubeProxyE2eImage, Image: kubeProxyE2eImage,
@ -97,15 +97,15 @@ var _ = framework.KubeDescribe("Network", func() {
}, },
} }
serverPodSpec := &api.Pod{ serverPodSpec := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "e2e-net-server", Name: "e2e-net-server",
Namespace: fr.Namespace.Name, Namespace: fr.Namespace.Name,
Labels: map[string]string{"app": "e2e-net-server"}, Labels: map[string]string{"app": "e2e-net-server"},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
NodeName: serverNodeInfo.name, NodeName: serverNodeInfo.name,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "e2e-net-server", Name: "e2e-net-server",
Image: kubeProxyE2eImage, Image: kubeProxyE2eImage,
@ -118,7 +118,7 @@ var _ = framework.KubeDescribe("Network", func() {
testDaemonTcpPort, testDaemonTcpPort,
postFinTimeoutSeconds), postFinTimeoutSeconds),
}, },
Ports: []api.ContainerPort{ Ports: []v1.ContainerPort{
{ {
Name: "tcp", Name: "tcp",
ContainerPort: testDaemonTcpPort, ContainerPort: testDaemonTcpPort,

View File

@ -41,12 +41,12 @@ import (
"github.com/elazarl/goproxy" "github.com/elazarl/goproxy"
"github.com/ghodss/yaml" "github.com/ghodss/yaml"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/annotations" "k8s.io/kubernetes/pkg/api/annotations"
apierrs "k8s.io/kubernetes/pkg/api/errors" apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/kubectl/cmd/util" "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@ -204,7 +204,7 @@ var _ = framework.KubeDescribe("Kubectl alpha client", func() {
framework.RunKubectlOrDie("run", sjName, "--restart=OnFailure", "--generator=scheduledjob/v2alpha1", framework.RunKubectlOrDie("run", sjName, "--restart=OnFailure", "--generator=scheduledjob/v2alpha1",
"--schedule="+schedule, "--image="+busyboxImage, nsFlag) "--schedule="+schedule, "--image="+busyboxImage, nsFlag)
By("verifying the ScheduledJob " + sjName + " was created") By("verifying the ScheduledJob " + sjName + " was created")
sj, err := c.Batch().CronJobs(ns).Get(sjName) sj, err := c.BatchV2alpha1().CronJobs(ns).Get(sjName)
if err != nil { if err != nil {
framework.Failf("Failed getting ScheduledJob %s: %v", sjName, err) framework.Failf("Failed getting ScheduledJob %s: %v", sjName, err)
} }
@ -215,7 +215,7 @@ var _ = framework.KubeDescribe("Kubectl alpha client", func() {
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage { if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
framework.Failf("Failed creating ScheduledJob %s for 1 pod with expected image %s: %#v", sjName, busyboxImage, containers) framework.Failf("Failed creating ScheduledJob %s for 1 pod with expected image %s: %#v", sjName, busyboxImage, containers)
} }
if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != api.RestartPolicyOnFailure { if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a ScheduledJob with correct restart policy for --restart=OnFailure") framework.Failf("Failed creating a ScheduledJob with correct restart policy for --restart=OnFailure")
} }
}) })
@ -241,7 +241,7 @@ var _ = framework.KubeDescribe("Kubectl alpha client", func() {
framework.RunKubectlOrDie("run", cjName, "--restart=OnFailure", "--generator=cronjob/v2alpha1", framework.RunKubectlOrDie("run", cjName, "--restart=OnFailure", "--generator=cronjob/v2alpha1",
"--schedule="+schedule, "--image="+busyboxImage, nsFlag) "--schedule="+schedule, "--image="+busyboxImage, nsFlag)
By("verifying the CronJob " + cjName + " was created") By("verifying the CronJob " + cjName + " was created")
sj, err := c.Batch().CronJobs(ns).Get(cjName) sj, err := c.BatchV2alpha1().CronJobs(ns).Get(cjName)
if err != nil { if err != nil {
framework.Failf("Failed getting CronJob %s: %v", cjName, err) framework.Failf("Failed getting CronJob %s: %v", cjName, err)
} }
@ -252,7 +252,7 @@ var _ = framework.KubeDescribe("Kubectl alpha client", func() {
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage { if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers) framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers)
} }
if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != api.RestartPolicyOnFailure { if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure") framework.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure")
} }
}) })
@ -268,10 +268,10 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
return f.NewClusterVerification( return f.NewClusterVerification(
framework.PodStateVerification{ framework.PodStateVerification{
Selectors: map[string]string{"app": "redis"}, Selectors: map[string]string{"app": "redis"},
ValidPhases: []api.PodPhase{api.PodRunning /*api.PodPending*/}, ValidPhases: []v1.PodPhase{v1.PodRunning /*v1.PodPending*/},
}) })
} }
forEachPod := func(podFunc func(p api.Pod)) { forEachPod := func(podFunc func(p v1.Pod)) {
clusterState().ForEach(podFunc) clusterState().ForEach(podFunc)
} }
var c clientset.Interface var c clientset.Interface
@ -289,7 +289,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
pods, err := clusterState().WaitFor(atLeast, framework.PodStartTimeout) pods, err := clusterState().WaitFor(atLeast, framework.PodStartTimeout)
if err != nil || len(pods) < atLeast { if err != nil || len(pods) < atLeast {
// TODO: Generalize integrating debug info into these tests so we always get debug info when we need it // TODO: Generalize integrating debug info into these tests so we always get debug info when we need it
framework.DumpAllNamespaceInfo(c, f.ClientSet_1_5, ns) framework.DumpAllNamespaceInfo(f.ClientSet, ns)
framework.Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err) framework.Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
} }
} }
@ -519,8 +519,8 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
WithStdinData("abcd1234\n"). WithStdinData("abcd1234\n").
ExecOrDie() ExecOrDie()
Expect(runOutput).ToNot(ContainSubstring("stdin closed")) Expect(runOutput).ToNot(ContainSubstring("stdin closed"))
g := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) } g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
runTestPod, _, err := util.GetFirstPod(f.ClientSet.Core(), ns, labels.SelectorFromSet(map[string]string{"run": "run-test-3"}), 1*time.Minute, g) runTestPod, _, err := util.GetFirstPod(f.InternalClientset.Core(), ns, labels.SelectorFromSet(map[string]string{"run": "run-test-3"}), 1*time.Minute, g)
if err != nil { if err != nil {
os.Exit(1) os.Exit(1)
} }
@ -646,7 +646,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
waitForOrFailWithDebug(1) waitForOrFailWithDebug(1)
// Pod // Pod
forEachPod(func(pod api.Pod) { forEachPod(func(pod v1.Pod) {
output := framework.RunKubectlOrDie("describe", "pod", pod.Name, nsFlag) output := framework.RunKubectlOrDie("describe", "pod", pod.Name, nsFlag)
requiredStrings := [][]string{ requiredStrings := [][]string{
{"Name:", "redis-master-"}, {"Name:", "redis-master-"},
@ -700,7 +700,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
// Node // Node
// It should be OK to list unschedulable Nodes here. // It should be OK to list unschedulable Nodes here.
nodes, err := c.Core().Nodes().List(api.ListOptions{}) nodes, err := c.Core().Nodes().List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
node := nodes.Items[0] node := nodes.Items[0]
output = framework.RunKubectlOrDie("describe", "node", node.Name) output = framework.RunKubectlOrDie("describe", "node", node.Name)
@ -748,7 +748,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
// It may take a while for the pods to get registered in some cases, wait to be sure. // It may take a while for the pods to get registered in some cases, wait to be sure.
By("Waiting for Redis master to start.") By("Waiting for Redis master to start.")
waitForOrFailWithDebug(1) waitForOrFailWithDebug(1)
forEachPod(func(pod api.Pod) { forEachPod(func(pod v1.Pod) {
framework.Logf("wait on redis-master startup in %v ", ns) framework.Logf("wait on redis-master startup in %v ", ns)
framework.LookForStringInLog(ns, pod.Name, "redis-master", "The server is now ready to accept connections", framework.PodStartTimeout) framework.LookForStringInLog(ns, pod.Name, "redis-master", "The server is now ready to accept connections", framework.PodStartTimeout)
}) })
@ -873,7 +873,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
By("Waiting for Redis master to start.") By("Waiting for Redis master to start.")
waitForOrFailWithDebug(1) waitForOrFailWithDebug(1)
forEachPod(func(pod api.Pod) { forEachPod(func(pod v1.Pod) {
By("checking for a matching strings") By("checking for a matching strings")
_, err := framework.LookForStringInLog(ns, pod.Name, containerName, "The server is now ready to accept connections", framework.PodStartTimeout) _, err := framework.LookForStringInLog(ns, pod.Name, containerName, "The server is now ready to accept connections", framework.PodStartTimeout)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -923,12 +923,12 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
By("Waiting for Redis master to start.") By("Waiting for Redis master to start.")
waitForOrFailWithDebug(1) waitForOrFailWithDebug(1)
By("patching all pods") By("patching all pods")
forEachPod(func(pod api.Pod) { forEachPod(func(pod v1.Pod) {
framework.RunKubectlOrDie("patch", "pod", pod.Name, nsFlag, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}") framework.RunKubectlOrDie("patch", "pod", pod.Name, nsFlag, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}")
}) })
By("checking annotations") By("checking annotations")
forEachPod(func(pod api.Pod) { forEachPod(func(pod v1.Pod) {
found := false found := false
for key, val := range pod.Annotations { for key, val := range pod.Annotations {
if key == "x" && val == "y" { if key == "x" && val == "y" {
@ -1082,7 +1082,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
By("rolling-update to same image controller") By("rolling-update to same image controller")
runKubectlRetryOrDie("rolling-update", rcName, "--update-period=1s", "--image="+nginxImage, "--image-pull-policy="+string(api.PullIfNotPresent), nsFlag) runKubectlRetryOrDie("rolling-update", rcName, "--update-period=1s", "--image="+nginxImage, "--image-pull-policy="+string(v1.PullIfNotPresent), nsFlag)
framework.ValidateController(c, nginxImage, 1, rcName, "run="+rcName, noOpValidatorFn, ns) framework.ValidateController(c, nginxImage, 1, rcName, "run="+rcName, noOpValidatorFn, ns)
}) })
}) })
@ -1166,7 +1166,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage { if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
framework.Failf("Failed creating job %s for 1 pod with expected image %s: %#v", jobName, nginxImage, containers) framework.Failf("Failed creating job %s for 1 pod with expected image %s: %#v", jobName, nginxImage, containers)
} }
if job.Spec.Template.Spec.RestartPolicy != api.RestartPolicyOnFailure { if job.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a job with correct restart policy for --restart=OnFailure") framework.Failf("Failed creating a job with correct restart policy for --restart=OnFailure")
} }
}) })
@ -1199,7 +1199,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage { if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
framework.Failf("Failed creating pod %s with expected image %s", podName, nginxImage) framework.Failf("Failed creating pod %s with expected image %s", podName, nginxImage)
} }
if pod.Spec.RestartPolicy != api.RestartPolicyNever { if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
framework.Failf("Failed creating a pod with correct restart policy for --restart=Never") framework.Failf("Failed creating a pod with correct restart policy for --restart=Never")
} }
}) })
@ -1333,10 +1333,10 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
framework.KubeDescribe("Kubectl taint", func() { framework.KubeDescribe("Kubectl taint", func() {
It("should update the taint on a node", func() { It("should update the taint on a node", func() {
testTaint := api.Taint{ testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-001-%s", string(uuid.NewUUID())), Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-001-%s", string(uuid.NewUUID())),
Value: "testing-taint-value", Value: "testing-taint-value",
Effect: api.TaintEffectNoSchedule, Effect: v1.TaintEffectNoSchedule,
} }
nodeName := getNodeThatCanRunPod(f) nodeName := getNodeThatCanRunPod(f)
@ -1364,10 +1364,10 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
}) })
It("should remove all the taints with the same key off a node", func() { It("should remove all the taints with the same key off a node", func() {
testTaint := api.Taint{ testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-002-%s", string(uuid.NewUUID())), Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-002-%s", string(uuid.NewUUID())),
Value: "testing-taint-value", Value: "testing-taint-value",
Effect: api.TaintEffectNoSchedule, Effect: v1.TaintEffectNoSchedule,
} }
nodeName := getNodeThatCanRunPod(f) nodeName := getNodeThatCanRunPod(f)
@ -1385,10 +1385,10 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
} }
checkOutput(output, requiredStrings) checkOutput(output, requiredStrings)
newTestTaint := api.Taint{ newTestTaint := v1.Taint{
Key: testTaint.Key, Key: testTaint.Key,
Value: "another-testing-taint-value", Value: "another-testing-taint-value",
Effect: api.TaintEffectPreferNoSchedule, Effect: v1.TaintEffectPreferNoSchedule,
} }
By("adding another taint " + newTestTaint.ToString() + " to the node") By("adding another taint " + newTestTaint.ToString() + " to the node")
runKubectlRetryOrDie("taint", "nodes", nodeName, newTestTaint.ToString()) runKubectlRetryOrDie("taint", "nodes", nodeName, newTestTaint.ToString())
@ -1434,11 +1434,11 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
if len(quota.Spec.Hard) != 2 { if len(quota.Spec.Hard) != 2 {
framework.Failf("Expected two resources, got %v", quota.Spec.Hard) framework.Failf("Expected two resources, got %v", quota.Spec.Hard)
} }
r, found := quota.Spec.Hard[api.ResourcePods] r, found := quota.Spec.Hard[v1.ResourcePods]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 { if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
framework.Failf("Expected pods=1000000, got %v", r) framework.Failf("Expected pods=1000000, got %v", r)
} }
r, found = quota.Spec.Hard[api.ResourceServices] r, found = quota.Spec.Hard[v1.ResourceServices]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 { if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
framework.Failf("Expected services=1000000, got %v", r) framework.Failf("Expected services=1000000, got %v", r)
} }
@ -1461,14 +1461,14 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
if len(quota.Spec.Scopes) != 2 { if len(quota.Spec.Scopes) != 2 {
framework.Failf("Expected two scopes, got %v", quota.Spec.Scopes) framework.Failf("Expected two scopes, got %v", quota.Spec.Scopes)
} }
scopes := make(map[api.ResourceQuotaScope]struct{}) scopes := make(map[v1.ResourceQuotaScope]struct{})
for _, scope := range quota.Spec.Scopes { for _, scope := range quota.Spec.Scopes {
scopes[scope] = struct{}{} scopes[scope] = struct{}{}
} }
if _, found := scopes[api.ResourceQuotaScopeBestEffort]; !found { if _, found := scopes[v1.ResourceQuotaScopeBestEffort]; !found {
framework.Failf("Expected BestEffort scope, got %v", quota.Spec.Scopes) framework.Failf("Expected BestEffort scope, got %v", quota.Spec.Scopes)
} }
if _, found := scopes[api.ResourceQuotaScopeNotTerminating]; !found { if _, found := scopes[v1.ResourceQuotaScopeNotTerminating]; !found {
framework.Failf("Expected NotTerminating scope, got %v", quota.Spec.Scopes) framework.Failf("Expected NotTerminating scope, got %v", quota.Spec.Scopes)
} }
}) })
@ -1640,8 +1640,8 @@ func readBytesFromFile(filename string) []byte {
return data return data
} }
func readReplicationControllerFromString(contents string) *api.ReplicationController { func readReplicationControllerFromString(contents string) *v1.ReplicationController {
rc := api.ReplicationController{} rc := v1.ReplicationController{}
if err := yaml.Unmarshal([]byte(contents), &rc); err != nil { if err := yaml.Unmarshal([]byte(contents), &rc); err != nil {
framework.Failf(err.Error()) framework.Failf(err.Error())
} }
@ -1662,12 +1662,12 @@ func modifyReplicationControllerConfiguration(contents string) io.Reader {
return bytes.NewReader(data) return bytes.NewReader(data)
} }
func forEachReplicationController(c clientset.Interface, ns, selectorKey, selectorValue string, fn func(api.ReplicationController)) { func forEachReplicationController(c clientset.Interface, ns, selectorKey, selectorValue string, fn func(v1.ReplicationController)) {
var rcs *api.ReplicationControllerList var rcs *v1.ReplicationControllerList
var err error var err error
for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) { for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) {
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue})) label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
options := api.ListOptions{LabelSelector: label} options := v1.ListOptions{LabelSelector: label.String()}
rcs, err = c.Core().ReplicationControllers(ns).List(options) rcs, err = c.Core().ReplicationControllers(ns).List(options)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
if len(rcs.Items) > 0 { if len(rcs.Items) > 0 {
@ -1684,7 +1684,7 @@ func forEachReplicationController(c clientset.Interface, ns, selectorKey, select
} }
} }
func validateReplicationControllerConfiguration(rc api.ReplicationController) { func validateReplicationControllerConfiguration(rc v1.ReplicationController) {
if rc.Name == "redis-master" { if rc.Name == "redis-master" {
if _, ok := rc.Annotations[annotations.LastAppliedConfigAnnotation]; !ok { if _, ok := rc.Annotations[annotations.LastAppliedConfigAnnotation]; !ok {
framework.Failf("Annotation not found in modified configuration:\n%v\n", rc) framework.Failf("Annotation not found in modified configuration:\n%v\n", rc)

View File

@ -21,8 +21,8 @@ import (
"strings" "strings"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -98,7 +98,7 @@ func waitTillNPodsRunningOnNodes(c clientset.Interface, nodeNames sets.String, p
func updateNodeLabels(c clientset.Interface, nodeNames sets.String, toAdd, toRemove map[string]string) { func updateNodeLabels(c clientset.Interface, nodeNames sets.String, toAdd, toRemove map[string]string) {
const maxRetries = 5 const maxRetries = 5
for nodeName := range nodeNames { for nodeName := range nodeNames {
var node *api.Node var node *v1.Node
var err error var err error
for i := 0; i < maxRetries; i++ { for i := 0; i < maxRetries; i++ {
node, err = c.Core().Nodes().Get(nodeName) node, err = c.Core().Nodes().Get(nodeName)
@ -189,12 +189,13 @@ var _ = framework.KubeDescribe("kubelet", func() {
rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID())) rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID()))
Expect(framework.RunRC(testutils.RCConfig{ Expect(framework.RunRC(testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
Name: rcName, InternalClient: f.InternalClientset,
Namespace: f.Namespace.Name, Name: rcName,
Image: framework.GetPauseImageName(f.ClientSet), Namespace: f.Namespace.Name,
Replicas: totalPods, Image: framework.GetPauseImageName(f.ClientSet),
NodeSelector: nodeLabels, Replicas: totalPods,
NodeSelector: nodeLabels,
})).NotTo(HaveOccurred()) })).NotTo(HaveOccurred())
// Perform a sanity check so that we know all desired pods are // Perform a sanity check so that we know all desired pods are
// running on the nodes according to kubelet. The timeout is set to // running on the nodes according to kubelet. The timeout is set to
@ -207,7 +208,7 @@ var _ = framework.KubeDescribe("kubelet", func() {
} }
By("Deleting the RC") By("Deleting the RC")
framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, rcName) framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
// Check that the pods really are gone by querying /runningpods on the // Check that the pods really are gone by querying /runningpods on the
// node. The /runningpods handler checks the container runtime (or its // node. The /runningpods handler checks the container runtime (or its
// cache) and returns a list of running pods. Some possible causes of // cache) and returns a list of running pods. Some possible causes of

View File

@ -22,7 +22,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats" "k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
@ -70,11 +70,12 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
// TODO: Use a more realistic workload // TODO: Use a more realistic workload
Expect(framework.RunRC(testutils.RCConfig{ Expect(framework.RunRC(testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
Name: rcName, InternalClient: f.InternalClientset,
Namespace: f.Namespace.Name, Name: rcName,
Image: framework.GetPauseImageName(f.ClientSet), Namespace: f.Namespace.Name,
Replicas: totalPods, Image: framework.GetPauseImageName(f.ClientSet),
Replicas: totalPods,
})).NotTo(HaveOccurred()) })).NotTo(HaveOccurred())
// Log once and flush the stats. // Log once and flush the stats.
@ -116,7 +117,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
verifyCPULimits(expectedCPU, cpuSummary) verifyCPULimits(expectedCPU, cpuSummary)
By("Deleting the RC") By("Deleting the RC")
framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, rcName) framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
} }
func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) { func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) {

View File

@ -19,8 +19,8 @@ package e2e
import ( import (
"fmt" "fmt"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
@ -37,8 +37,8 @@ var _ = framework.KubeDescribe("LimitRange", func() {
max := getResourceList("500m", "500Mi") max := getResourceList("500m", "500Mi")
defaultLimit := getResourceList("500m", "500Mi") defaultLimit := getResourceList("500m", "500Mi")
defaultRequest := getResourceList("100m", "200Mi") defaultRequest := getResourceList("100m", "200Mi")
maxLimitRequestRatio := api.ResourceList{} maxLimitRequestRatio := v1.ResourceList{}
limitRange := newLimitRange("limit-range", api.LimitTypeContainer, limitRange := newLimitRange("limit-range", v1.LimitTypeContainer,
min, max, min, max,
defaultLimit, defaultRequest, defaultLimit, defaultRequest,
maxLimitRequestRatio) maxLimitRequestRatio)
@ -47,13 +47,13 @@ var _ = framework.KubeDescribe("LimitRange", func() {
By("Fetching the LimitRange to ensure it has proper values") By("Fetching the LimitRange to ensure it has proper values")
limitRange, err = f.ClientSet.Core().LimitRanges(f.Namespace.Name).Get(limitRange.Name) limitRange, err = f.ClientSet.Core().LimitRanges(f.Namespace.Name).Get(limitRange.Name)
expected := api.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit} expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
actual := api.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default} actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
err = equalResourceRequirement(expected, actual) err = equalResourceRequirement(expected, actual)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a Pod with no resource requirements") By("Creating a Pod with no resource requirements")
pod := newTestPod(f, "pod-no-resources", api.ResourceList{}, api.ResourceList{}) pod := newTestPod(f, "pod-no-resources", v1.ResourceList{}, v1.ResourceList{})
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -80,7 +80,7 @@ var _ = framework.KubeDescribe("LimitRange", func() {
// This is an interesting case, so it's worth a comment // This is an interesting case, so it's worth a comment
// If you specify a Limit, and no Request, the Limit will default to the Request // If you specify a Limit, and no Request, the Limit will default to the Request
// This means that the LimitRange.DefaultRequest will ONLY take affect if a container.resources.limit is not supplied // This means that the LimitRange.DefaultRequest will ONLY take affect if a container.resources.limit is not supplied
expected = api.ResourceRequirements{Requests: getResourceList("300m", "150Mi"), Limits: getResourceList("300m", "500Mi")} expected = v1.ResourceRequirements{Requests: getResourceList("300m", "150Mi"), Limits: getResourceList("300m", "500Mi")}
for i := range pod.Spec.Containers { for i := range pod.Spec.Containers {
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources) err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
if err != nil { if err != nil {
@ -91,19 +91,19 @@ var _ = framework.KubeDescribe("LimitRange", func() {
} }
By("Failing to create a Pod with less than min resources") By("Failing to create a Pod with less than min resources")
pod = newTestPod(f, podName, getResourceList("10m", "50Mi"), api.ResourceList{}) pod = newTestPod(f, podName, getResourceList("10m", "50Mi"), v1.ResourceList{})
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
By("Failing to create a Pod with more than max resources") By("Failing to create a Pod with more than max resources")
pod = newTestPod(f, podName, getResourceList("600m", "600Mi"), api.ResourceList{}) pod = newTestPod(f, podName, getResourceList("600m", "600Mi"), v1.ResourceList{})
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
}) })
}) })
func equalResourceRequirement(expected api.ResourceRequirements, actual api.ResourceRequirements) error { func equalResourceRequirement(expected v1.ResourceRequirements, actual v1.ResourceRequirements) error {
framework.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests) framework.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests)
err := equalResourceList(expected.Requests, actual.Requests) err := equalResourceList(expected.Requests, actual.Requests)
if err != nil { if err != nil {
@ -117,7 +117,7 @@ func equalResourceRequirement(expected api.ResourceRequirements, actual api.Reso
return nil return nil
} }
func equalResourceList(expected api.ResourceList, actual api.ResourceList) error { func equalResourceList(expected v1.ResourceList, actual v1.ResourceList) error {
for k, v := range expected { for k, v := range expected {
if actualValue, found := actual[k]; !found || (v.Cmp(actualValue) != 0) { if actualValue, found := actual[k]; !found || (v.Cmp(actualValue) != 0) {
return fmt.Errorf("resource %v expected %v actual %v", k, v.String(), actualValue.String()) return fmt.Errorf("resource %v expected %v actual %v", k, v.String(), actualValue.String())
@ -131,28 +131,28 @@ func equalResourceList(expected api.ResourceList, actual api.ResourceList) error
return nil return nil
} }
func getResourceList(cpu, memory string) api.ResourceList { func getResourceList(cpu, memory string) v1.ResourceList {
res := api.ResourceList{} res := v1.ResourceList{}
if cpu != "" { if cpu != "" {
res[api.ResourceCPU] = resource.MustParse(cpu) res[v1.ResourceCPU] = resource.MustParse(cpu)
} }
if memory != "" { if memory != "" {
res[api.ResourceMemory] = resource.MustParse(memory) res[v1.ResourceMemory] = resource.MustParse(memory)
} }
return res return res
} }
// newLimitRange returns a limit range with specified data // newLimitRange returns a limit range with specified data
func newLimitRange(name string, limitType api.LimitType, func newLimitRange(name string, limitType v1.LimitType,
min, max, min, max,
defaultLimit, defaultRequest, defaultLimit, defaultRequest,
maxLimitRequestRatio api.ResourceList) *api.LimitRange { maxLimitRequestRatio v1.ResourceList) *v1.LimitRange {
return &api.LimitRange{ return &v1.LimitRange{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.LimitRangeSpec{ Spec: v1.LimitRangeSpec{
Limits: []api.LimitRangeItem{ Limits: []v1.LimitRangeItem{
{ {
Type: limitType, Type: limitType,
Min: min, Min: min,
@ -167,17 +167,17 @@ func newLimitRange(name string, limitType api.LimitType,
} }
// newTestPod returns a pod that has the specified requests and limits // newTestPod returns a pod that has the specified requests and limits
func newTestPod(f *framework.Framework, name string, requests api.ResourceList, limits api.ResourceList) *api.Pod { func newTestPod(f *framework.Framework, name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod {
return &api.Pod{ return &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "pause", Name: "pause",
Image: framework.GetPauseImageName(f.ClientSet), Image: framework.GetPauseImageName(f.ClientSet),
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: requests, Requests: requests,
Limits: limits, Limits: limits,
}, },

View File

@ -27,8 +27,9 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/transport" "k8s.io/kubernetes/pkg/client/transport"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@ -62,7 +63,7 @@ const (
// To run this suite you must explicitly ask for it by setting the // To run this suite you must explicitly ask for it by setting the
// -t/--test flag or ginkgo.focus flag. // -t/--test flag or ginkgo.focus flag.
var _ = framework.KubeDescribe("Load capacity", func() { var _ = framework.KubeDescribe("Load capacity", func() {
var clientset internalclientset.Interface var clientset clientset.Interface
var nodeCount int var nodeCount int
var ns string var ns string
var configs []*testutils.RCConfig var configs []*testutils.RCConfig
@ -140,7 +141,7 @@ var _ = framework.KubeDescribe("Load capacity", func() {
totalPods := itArg.podsPerNode * nodeCount totalPods := itArg.podsPerNode * nodeCount
configs = generateRCConfigs(totalPods, itArg.image, itArg.command, namespaces) configs = generateRCConfigs(totalPods, itArg.image, itArg.command, namespaces)
var services []*api.Service var services []*v1.Service
// Read the environment variable to see if we want to create services // Read the environment variable to see if we want to create services
createServices := os.Getenv("CREATE_SERVICES") createServices := os.Getenv("CREATE_SERVICES")
if createServices == "true" { if createServices == "true" {
@ -206,8 +207,9 @@ var _ = framework.KubeDescribe("Load capacity", func() {
} }
}) })
func createClients(numberOfClients int) ([]*internalclientset.Clientset, error) { func createClients(numberOfClients int) ([]*clientset.Clientset, []*internalclientset.Clientset, error) {
clients := make([]*internalclientset.Clientset, numberOfClients) clients := make([]*clientset.Clientset, numberOfClients)
internalClients := make([]*internalclientset.Clientset, numberOfClients)
for i := 0; i < numberOfClients; i++ { for i := 0; i < numberOfClients; i++ {
config, err := framework.LoadConfig() config, err := framework.LoadConfig()
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -223,11 +225,11 @@ func createClients(numberOfClients int) ([]*internalclientset.Clientset, error)
// each client here. // each client here.
transportConfig, err := config.TransportConfig() transportConfig, err := config.TransportConfig()
if err != nil { if err != nil {
return nil, err return nil, nil, err
} }
tlsConfig, err := transport.TLSConfigFor(transportConfig) tlsConfig, err := transport.TLSConfigFor(transportConfig)
if err != nil { if err != nil {
return nil, err return nil, nil, err
} }
config.Transport = utilnet.SetTransportDefaults(&http.Transport{ config.Transport = utilnet.SetTransportDefaults(&http.Transport{
Proxy: http.ProxyFromEnvironment, Proxy: http.ProxyFromEnvironment,
@ -243,13 +245,18 @@ func createClients(numberOfClients int) ([]*internalclientset.Clientset, error)
// Transport field. // Transport field.
config.TLSClientConfig = restclient.TLSClientConfig{} config.TLSClientConfig = restclient.TLSClientConfig{}
c, err := internalclientset.NewForConfig(config) c, err := clientset.NewForConfig(config)
if err != nil { if err != nil {
return nil, err return nil, nil, err
} }
clients[i] = c clients[i] = c
internalClient, err := internalclientset.NewForConfig(config)
if err != nil {
return nil, nil, err
}
internalClients[i] = internalClient
} }
return clients, nil return clients, internalClients, nil
} }
func computeRCCounts(total int) (int, int, int) { func computeRCCounts(total int) (int, int, int) {
@ -266,7 +273,7 @@ func computeRCCounts(total int) (int, int, int) {
return smallRCCount, mediumRCCount, bigRCCount return smallRCCount, mediumRCCount, bigRCCount
} }
func generateRCConfigs(totalPods int, image string, command []string, nss []*api.Namespace) []*testutils.RCConfig { func generateRCConfigs(totalPods int, image string, command []string, nss []*v1.Namespace) []*testutils.RCConfig {
configs := make([]*testutils.RCConfig, 0) configs := make([]*testutils.RCConfig, 0)
smallRCCount, mediumRCCount, bigRCCount := computeRCCounts(totalPods) smallRCCount, mediumRCCount, bigRCCount := computeRCCounts(totalPods)
@ -277,49 +284,51 @@ func generateRCConfigs(totalPods int, image string, command []string, nss []*api
// Create a number of clients to better simulate real usecase // Create a number of clients to better simulate real usecase
// where not everyone is using exactly the same client. // where not everyone is using exactly the same client.
rcsPerClient := 20 rcsPerClient := 20
clients, err := createClients((len(configs) + rcsPerClient - 1) / rcsPerClient) clients, internalClients, err := createClients((len(configs) + rcsPerClient - 1) / rcsPerClient)
framework.ExpectNoError(err) framework.ExpectNoError(err)
for i := 0; i < len(configs); i++ { for i := 0; i < len(configs); i++ {
configs[i].Client = clients[i%len(clients)] configs[i].Client = clients[i%len(clients)]
configs[i].InternalClient = internalClients[i%len(internalClients)]
} }
return configs return configs
} }
func generateRCConfigsForGroup( func generateRCConfigsForGroup(
nss []*api.Namespace, groupName string, size, count int, image string, command []string) []*testutils.RCConfig { nss []*v1.Namespace, groupName string, size, count int, image string, command []string) []*testutils.RCConfig {
configs := make([]*testutils.RCConfig, 0, count) configs := make([]*testutils.RCConfig, 0, count)
for i := 1; i <= count; i++ { for i := 1; i <= count; i++ {
config := &testutils.RCConfig{ config := &testutils.RCConfig{
Client: nil, // this will be overwritten later Client: nil, // this will be overwritten later
Name: groupName + "-" + strconv.Itoa(i), InternalClient: nil, // this will be overwritten later
Namespace: nss[i%len(nss)].Name, Name: groupName + "-" + strconv.Itoa(i),
Timeout: 10 * time.Minute, Namespace: nss[i%len(nss)].Name,
Image: image, Timeout: 10 * time.Minute,
Command: command, Image: image,
Replicas: size, Command: command,
CpuRequest: 10, // 0.01 core Replicas: size,
MemRequest: 26214400, // 25MB CpuRequest: 10, // 0.01 core
MemRequest: 26214400, // 25MB
} }
configs = append(configs, config) configs = append(configs, config)
} }
return configs return configs
} }
func generateServicesForConfigs(configs []*testutils.RCConfig) []*api.Service { func generateServicesForConfigs(configs []*testutils.RCConfig) []*v1.Service {
services := make([]*api.Service, 0, len(configs)) services := make([]*v1.Service, 0, len(configs))
for _, config := range configs { for _, config := range configs {
serviceName := config.Name + "-svc" serviceName := config.Name + "-svc"
labels := map[string]string{"name": config.Name} labels := map[string]string{"name": config.Name}
service := &api.Service{ service := &v1.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: serviceName, Name: serviceName,
Namespace: config.Namespace, Namespace: config.Namespace,
}, },
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Selector: labels, Selector: labels,
Ports: []api.ServicePort{{ Ports: []v1.ServicePort{{
Port: 80, Port: 80,
TargetPort: intstr.FromInt(80), TargetPort: intstr.FromInt(80),
}}, }},
@ -368,11 +377,11 @@ func scaleRC(wg *sync.WaitGroup, config *testutils.RCConfig, scalingTime time.Du
sleepUpTo(scalingTime) sleepUpTo(scalingTime)
newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2) newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2)
framework.ExpectNoError(framework.ScaleRC(config.Client, config.Namespace, config.Name, newSize, true), framework.ExpectNoError(framework.ScaleRC(config.Client, config.InternalClient, config.Namespace, config.Name, newSize, true),
fmt.Sprintf("scaling rc %s for the first time", config.Name)) fmt.Sprintf("scaling rc %s for the first time", config.Name))
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
options := api.ListOptions{ options := v1.ListOptions{
LabelSelector: selector, LabelSelector: selector.String(),
ResourceVersion: "0", ResourceVersion: "0",
} }
_, err := config.Client.Core().Pods(config.Namespace).List(options) _, err := config.Client.Core().Pods(config.Namespace).List(options)
@ -396,16 +405,16 @@ func deleteRC(wg *sync.WaitGroup, config *testutils.RCConfig, deletingTime time.
if framework.TestContext.GarbageCollectorEnabled { if framework.TestContext.GarbageCollectorEnabled {
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name)) framework.ExpectNoError(framework.DeleteRCAndWaitForGC(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
} else { } else {
framework.ExpectNoError(framework.DeleteRCAndPods(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name)) framework.ExpectNoError(framework.DeleteRCAndPods(config.Client, config.InternalClient, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
} }
} }
func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string) ([]*api.Namespace, error) { func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string) ([]*v1.Namespace, error) {
namespaces := []*api.Namespace{} namespaces := []*v1.Namespace{}
for i := 1; i <= namespaceCount; i++ { for i := 1; i <= namespaceCount; i++ {
namespace, err := f.CreateNamespace(fmt.Sprintf("%v-%d", namePrefix, i), nil) namespace, err := f.CreateNamespace(fmt.Sprintf("%v-%d", namePrefix, i), nil)
if err != nil { if err != nil {
return []*api.Namespace{}, err return []*v1.Namespace{}, err
} }
namespaces = append(namespaces, namespace) namespaces = append(namespaces, namespace)
} }

View File

@ -25,7 +25,7 @@ import (
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )
@ -95,9 +95,9 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname
appName := "logging-soak" + podname appName := "logging-soak" + podname
podlables := f.CreatePodsPerNodeForSimpleApp( podlables := f.CreatePodsPerNodeForSimpleApp(
appName, appName,
func(n api.Node) api.PodSpec { func(n v1.Node) v1.PodSpec {
return api.PodSpec{ return v1.PodSpec{
Containers: []api.Container{{ Containers: []v1.Container{{
Name: "logging-soak", Name: "logging-soak",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Args: []string{ Args: []string{
@ -107,7 +107,7 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname
}, },
}}, }},
NodeName: n.Name, NodeName: n.Name,
RestartPolicy: api.RestartPolicyAlways, RestartPolicy: v1.RestartPolicyAlways,
} }
}, },
totalPods, totalPods,
@ -116,10 +116,10 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname
logSoakVerification := f.NewClusterVerification( logSoakVerification := f.NewClusterVerification(
framework.PodStateVerification{ framework.PodStateVerification{
Selectors: podlables, Selectors: podlables,
ValidPhases: []api.PodPhase{api.PodRunning, api.PodSucceeded}, ValidPhases: []v1.PodPhase{v1.PodRunning, v1.PodSucceeded},
// we don't validate total log data, since there is no gaurantee all logs will be stored forever. // we don't validate total log data, since there is no gaurantee all logs will be stored forever.
// instead, we just validate that some logs are being created in std out. // instead, we just validate that some logs are being created in std out.
Verify: func(p api.Pod) (bool, error) { Verify: func(p v1.Pod) (bool, error) {
s, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second) s, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second)
return s != "", err return s != "", err
}, },

View File

@ -19,9 +19,9 @@ package e2e
import ( import (
"fmt" "fmt"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -45,7 +45,7 @@ var _ = framework.KubeDescribe("Mesos", func() {
nodeClient := f.ClientSet.Core().Nodes() nodeClient := f.ClientSet.Core().Nodes()
rackA := labels.SelectorFromSet(map[string]string{"k8s.mesosphere.io/attribute-rack": "1"}) rackA := labels.SelectorFromSet(map[string]string{"k8s.mesosphere.io/attribute-rack": "1"})
options := api.ListOptions{LabelSelector: rackA} options := v1.ListOptions{LabelSelector: rackA.String()}
nodes, err := nodeClient.List(options) nodes, err := nodeClient.List(options)
if err != nil { if err != nil {
framework.Failf("Failed to query for node: %v", err) framework.Failf("Failed to query for node: %v", err)
@ -54,7 +54,7 @@ var _ = framework.KubeDescribe("Mesos", func() {
var addr string var addr string
for _, a := range nodes.Items[0].Status.Addresses { for _, a := range nodes.Items[0].Status.Addresses {
if a.Type == api.NodeInternalIP { if a.Type == v1.NodeInternalIP {
addr = a.Address addr = a.Address
} }
} }
@ -79,18 +79,18 @@ var _ = framework.KubeDescribe("Mesos", func() {
// scheduled onto it. // scheduled onto it.
By("Trying to launch a pod with a label to get a node which can launch it.") By("Trying to launch a pod with a label to get a node which can launch it.")
podName := "with-label" podName := "with-label"
_, err := c.Core().Pods(ns).Create(&api.Pod{ _, err := c.Core().Pods(ns).Create(&v1.Pod{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "Pod", Kind: "Pod",
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Annotations: map[string]string{ Annotations: map[string]string{
"k8s.mesosphere.io/roles": "public", "k8s.mesosphere.io/roles": "public",
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: podName, Name: podName,
Image: framework.GetPauseImageName(f.ClientSet), Image: framework.GetPauseImageName(f.ClientSet),
@ -110,7 +110,7 @@ var _ = framework.KubeDescribe("Mesos", func() {
rack2 := labels.SelectorFromSet(map[string]string{ rack2 := labels.SelectorFromSet(map[string]string{
"k8s.mesosphere.io/attribute-rack": "2", "k8s.mesosphere.io/attribute-rack": "2",
}) })
options := api.ListOptions{LabelSelector: rack2} options := v1.ListOptions{LabelSelector: rack2.String()}
nodes, err := nodeClient.List(options) nodes, err := nodeClient.List(options)
framework.ExpectNoError(err) framework.ExpectNoError(err)

View File

@ -19,8 +19,8 @@ package e2e
import ( import (
"strings" "strings"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/metrics" "k8s.io/kubernetes/pkg/metrics"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -59,7 +59,7 @@ var _ = framework.KubeDescribe("MetricsGrabber", func() {
It("should grab all metrics from a Scheduler.", func() { It("should grab all metrics from a Scheduler.", func() {
By("Proxying to Pod through the API server") By("Proxying to Pod through the API server")
// Check if master Node is registered // Check if master Node is registered
nodes, err := c.Core().Nodes().List(api.ListOptions{}) nodes, err := c.Core().Nodes().List(v1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
var masterRegistered = false var masterRegistered = false
@ -80,7 +80,7 @@ var _ = framework.KubeDescribe("MetricsGrabber", func() {
It("should grab all metrics from a ControllerManager.", func() { It("should grab all metrics from a ControllerManager.", func() {
By("Proxying to Pod through the API server") By("Proxying to Pod through the API server")
// Check if master Node is registered // Check if master Node is registered
nodes, err := c.Core().Nodes().List(api.ListOptions{}) nodes, err := c.Core().Nodes().List(v1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
var masterRegistered = false var masterRegistered = false

View File

@ -24,7 +24,8 @@ import (
influxdb "github.com/influxdata/influxdb/client" influxdb "github.com/influxdata/influxdb/client"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -101,7 +102,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string,
// is running (which would be an error except during a rolling update). // is running (which would be an error except during a rolling update).
for _, rcLabel := range rcLabels { for _, rcLabel := range rcLabels {
selector := labels.Set{"k8s-app": rcLabel}.AsSelector() selector := labels.Set{"k8s-app": rcLabel}.AsSelector()
options := api.ListOptions{LabelSelector: selector} options := v1.ListOptions{LabelSelector: selector.String()}
deploymentList, err := c.Extensions().Deployments(api.NamespaceSystem).List(options) deploymentList, err := c.Extensions().Deployments(api.NamespaceSystem).List(options)
if err != nil { if err != nil {
return nil, err return nil, err
@ -121,7 +122,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string,
// Check all the replication controllers. // Check all the replication controllers.
for _, rc := range rcList.Items { for _, rc := range rcList.Items {
selector := labels.Set(rc.Spec.Selector).AsSelector() selector := labels.Set(rc.Spec.Selector).AsSelector()
options := api.ListOptions{LabelSelector: selector} options := v1.ListOptions{LabelSelector: selector.String()}
podList, err := c.Core().Pods(api.NamespaceSystem).List(options) podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
if err != nil { if err != nil {
return nil, err return nil, err
@ -136,7 +137,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string,
// Do the same for all deployments. // Do the same for all deployments.
for _, rc := range deploymentList.Items { for _, rc := range deploymentList.Items {
selector := labels.Set(rc.Spec.Selector.MatchLabels).AsSelector() selector := labels.Set(rc.Spec.Selector.MatchLabels).AsSelector()
options := api.ListOptions{LabelSelector: selector} options := v1.ListOptions{LabelSelector: selector.String()}
podList, err := c.Core().Pods(api.NamespaceSystem).List(options) podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
if err != nil { if err != nil {
return nil, err return nil, err
@ -151,7 +152,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string,
// And for pet sets. // And for pet sets.
for _, ps := range psList.Items { for _, ps := range psList.Items {
selector := labels.Set(ps.Spec.Selector.MatchLabels).AsSelector() selector := labels.Set(ps.Spec.Selector.MatchLabels).AsSelector()
options := api.ListOptions{LabelSelector: selector} options := v1.ListOptions{LabelSelector: selector.String()}
podList, err := c.Core().Pods(api.NamespaceSystem).List(options) podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
if err != nil { if err != nil {
return nil, err return nil, err
@ -168,7 +169,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string,
} }
func expectedServicesExist(c clientset.Interface) error { func expectedServicesExist(c clientset.Interface) error {
serviceList, err := c.Core().Services(api.NamespaceSystem).List(api.ListOptions{}) serviceList, err := c.Core().Services(api.NamespaceSystem).List(v1.ListOptions{})
if err != nil { if err != nil {
return err return err
} }
@ -187,7 +188,7 @@ func expectedServicesExist(c clientset.Interface) error {
func getAllNodesInCluster(c clientset.Interface) ([]string, error) { func getAllNodesInCluster(c clientset.Interface) ([]string, error) {
// It should be OK to list unschedulable Nodes here. // It should be OK to list unschedulable Nodes here.
nodeList, err := c.Core().Nodes().List(api.ListOptions{}) nodeList, err := c.Core().Nodes().List(v1.ListOptions{})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -281,7 +282,7 @@ func testMonitoringUsingHeapsterInfluxdb(c clientset.Interface) {
func printDebugInfo(c clientset.Interface) { func printDebugInfo(c clientset.Interface) {
set := labels.Set{"k8s-app": "heapster"} set := labels.Set{"k8s-app": "heapster"}
options := api.ListOptions{LabelSelector: set.AsSelector()} options := v1.ListOptions{LabelSelector: set.AsSelector().String()}
podList, err := c.Core().Pods(api.NamespaceSystem).List(options) podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
if err != nil { if err != nil {
framework.Logf("Error while listing pods %v", err) framework.Logf("Error while listing pods %v", err)

View File

@ -22,8 +22,8 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -60,7 +60,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second, framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second,
func() (bool, error) { func() (bool, error) {
var cnt = 0 var cnt = 0
nsList, err := f.ClientSet.Core().Namespaces().List(api.ListOptions{}) nsList, err := f.ClientSet.Core().Namespaces().List(v1.ListOptions{})
if err != nil { if err != nil {
return false, err return false, err
} }
@ -89,12 +89,12 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a pod in the namespace") By("Creating a pod in the namespace")
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "test-pod", Name: "test-pod",
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "nginx", Name: "nginx",
Image: framework.GetPauseImageName(f.ClientSet), Image: framework.GetPauseImageName(f.ClientSet),
@ -145,13 +145,13 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
"foo": "bar", "foo": "bar",
"baz": "blah", "baz": "blah",
} }
service := &api.Service{ service := &v1.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: serviceName, Name: serviceName,
}, },
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Selector: labels, Selector: labels,
Ports: []api.ServicePort{{ Ports: []v1.ServicePort{{
Port: 80, Port: 80,
TargetPort: intstr.FromInt(80), TargetPort: intstr.FromInt(80),
}}, }},

View File

@ -22,8 +22,9 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@ -40,7 +41,7 @@ import (
// At the end (even in case of errors), the network traffic is brought back to normal. // At the end (even in case of errors), the network traffic is brought back to normal.
// This function executes commands on a node so it will work only for some // This function executes commands on a node so it will work only for some
// environments. // environments.
func testUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *api.Node, testFunc func()) { func testUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1.Node, testFunc func()) {
host := framework.GetNodeExternalIP(node) host := framework.GetNodeExternalIP(node)
master := framework.GetMasterAddress(c) master := framework.GetMasterAddress(c)
By(fmt.Sprintf("block network traffic from node %s to the master", node.Name)) By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
@ -54,13 +55,13 @@ func testUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *ap
}() }()
framework.Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name) framework.Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
if !framework.WaitForNodeToBe(c, node.Name, api.NodeReady, true, resizeNodeReadyTimeout) { if !framework.WaitForNodeToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
} }
framework.BlockNetwork(host, master) framework.BlockNetwork(host, master)
framework.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name) framework.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
if !framework.WaitForNodeToBe(c, node.Name, api.NodeReady, false, resizeNodeNotReadyTimeout) { if !framework.WaitForNodeToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) {
framework.Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout) framework.Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
} }
@ -68,14 +69,14 @@ func testUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *ap
// network traffic is unblocked in a deferred function // network traffic is unblocked in a deferred function
} }
func expectNodeReadiness(isReady bool, newNode chan *api.Node) { func expectNodeReadiness(isReady bool, newNode chan *v1.Node) {
timeout := false timeout := false
expected := false expected := false
timer := time.After(nodeReadinessTimeout) timer := time.After(nodeReadinessTimeout)
for !expected && !timeout { for !expected && !timeout {
select { select {
case n := <-newNode: case n := <-newNode:
if framework.IsNodeConditionSetAsExpected(n, api.NodeReady, isReady) { if framework.IsNodeConditionSetAsExpected(n, v1.NodeReady, isReady) {
expected = true expected = true
} else { } else {
framework.Logf("Observed node ready status is NOT %v as expected", isReady) framework.Logf("Observed node ready status is NOT %v as expected", isReady)
@ -89,24 +90,24 @@ func expectNodeReadiness(isReady bool, newNode chan *api.Node) {
} }
} }
func podOnNode(podName, nodeName string, image string) *api.Pod { func podOnNode(podName, nodeName string, image string) *v1.Pod {
return &api.Pod{ return &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Labels: map[string]string{ Labels: map[string]string{
"name": podName, "name": podName,
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: podName, Name: podName,
Image: image, Image: image,
Ports: []api.ContainerPort{{ContainerPort: 9376}}, Ports: []v1.ContainerPort{{ContainerPort: 9376}},
}, },
}, },
NodeName: nodeName, NodeName: nodeName,
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
} }
@ -158,16 +159,16 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
It("All pods on the unreachable node should be marked as NotReady upon the node turn NotReady "+ It("All pods on the unreachable node should be marked as NotReady upon the node turn NotReady "+
"AND all pods should be mark back to Ready when the node get back to Ready before pod eviction timeout", func() { "AND all pods should be mark back to Ready when the node get back to Ready before pod eviction timeout", func() {
By("choose a node - we will block all network traffic on this node") By("choose a node - we will block all network traffic on this node")
var podOpts api.ListOptions var podOpts v1.ListOptions
nodeOpts := api.ListOptions{} nodeOpts := v1.ListOptions{}
nodes, err := c.Core().Nodes().List(nodeOpts) nodes, err := c.Core().Nodes().List(nodeOpts)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
framework.FilterNodes(nodes, func(node api.Node) bool { framework.FilterNodes(nodes, func(node v1.Node) bool {
if !framework.IsNodeConditionSetAsExpected(&node, api.NodeReady, true) { if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
return false return false
} }
podOpts = api.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name)} podOpts = v1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
pods, err := c.Core().Pods(api.NamespaceAll).List(podOpts) pods, err := c.Core().Pods(v1.NamespaceAll).List(podOpts)
if err != nil || len(pods.Items) <= 0 { if err != nil || len(pods.Items) <= 0 {
return false return false
} }
@ -177,7 +178,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
framework.Failf("No eligible node were found: %d", len(nodes.Items)) framework.Failf("No eligible node were found: %d", len(nodes.Items))
} }
node := nodes.Items[0] node := nodes.Items[0]
podOpts = api.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name)} podOpts = v1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil { if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil {
framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err) framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
} }
@ -185,25 +186,25 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
By("Set up watch on node status") By("Set up watch on node status")
nodeSelector := fields.OneTermEqualSelector("metadata.name", node.Name) nodeSelector := fields.OneTermEqualSelector("metadata.name", node.Name)
stopCh := make(chan struct{}) stopCh := make(chan struct{})
newNode := make(chan *api.Node) newNode := make(chan *v1.Node)
var controller *cache.Controller var controller *cache.Controller
_, controller = cache.NewInformer( _, controller = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
options.FieldSelector = nodeSelector options.FieldSelector = nodeSelector.String()
obj, err := f.ClientSet.Core().Nodes().List(options) obj, err := f.ClientSet.Core().Nodes().List(options)
return runtime.Object(obj), err return runtime.Object(obj), err
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
options.FieldSelector = nodeSelector options.FieldSelector = nodeSelector.String()
return f.ClientSet.Core().Nodes().Watch(options) return f.ClientSet.Core().Nodes().Watch(options)
}, },
}, },
&api.Node{}, &v1.Node{},
0, 0,
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
UpdateFunc: func(oldObj, newObj interface{}) { UpdateFunc: func(oldObj, newObj interface{}) {
n, ok := newObj.(*api.Node) n, ok := newObj.(*v1.Node)
Expect(ok).To(Equal(true)) Expect(ok).To(Equal(true))
newNode <- n newNode <- n
@ -262,7 +263,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
By("choose a node with at least one pod - we will block some network traffic on this node") By("choose a node with at least one pod - we will block some network traffic on this node")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := api.ListOptions{LabelSelector: label} options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
nodeName := pods.Items[0].Spec.NodeName nodeName := pods.Items[0].Spec.NodeName
@ -327,7 +328,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
By("choose a node with at least one pod - we will block some network traffic on this node") By("choose a node with at least one pod - we will block some network traffic on this node")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name})) label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
options := api.ListOptions{LabelSelector: label} options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
nodeName := pods.Items[0].Spec.NodeName nodeName := pods.Items[0].Spec.NodeName
@ -385,8 +386,8 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
}) })
It("should come back up if node goes down [Slow] [Disruptive]", func() { It("should come back up if node goes down [Slow] [Disruptive]", func() {
petMounts := []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}} petMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
podMounts := []api.VolumeMount{{Name: "home", MountPath: "/home"}} podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
ps := newStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels) ps := newStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
_, err := c.Apps().StatefulSets(ns).Create(ps) _, err := c.Apps().StatefulSets(ns).Create(ps)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -399,16 +400,16 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
restartNodes(f, nodeNames) restartNodes(f, nodeNames)
By("waiting for pods to be running again") By("waiting for pods to be running again")
pst.waitForRunningAndReady(ps.Spec.Replicas, ps) pst.waitForRunningAndReady(*ps.Spec.Replicas, ps)
}) })
It("should not reschedule pets if there is a network partition [Slow] [Disruptive]", func() { It("should not reschedule pets if there is a network partition [Slow] [Disruptive]", func() {
ps := newStatefulSet(psName, ns, headlessSvcName, 3, []api.VolumeMount{}, []api.VolumeMount{}, labels) ps := newStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels)
_, err := c.Apps().StatefulSets(ns).Create(ps) _, err := c.Apps().StatefulSets(ns).Create(ps)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
pst := statefulSetTester{c: c} pst := statefulSetTester{c: c}
pst.waitForRunningAndReady(ps.Spec.Replicas, ps) pst.waitForRunningAndReady(*ps.Spec.Replicas, ps)
pod := pst.getPodList(ps).Items[0] pod := pst.getPodList(ps).Items[0]
node, err := c.Core().Nodes().Get(pod.Spec.NodeName) node, err := c.Core().Nodes().Get(pod.Spec.NodeName)
@ -429,7 +430,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
} }
By("waiting for pods to be running again") By("waiting for pods to be running again")
pst.waitForRunningAndReady(ps.Spec.Replicas, ps) pst.waitForRunningAndReady(*ps.Spec.Replicas, ps)
}) })
}) })
@ -438,7 +439,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
parallelism := int32(2) parallelism := int32(2)
completions := int32(4) completions := int32(4)
job := newTestJob("notTerminate", "network-partition", api.RestartPolicyNever, parallelism, completions) job := newTestJob("notTerminate", "network-partition", v1.RestartPolicyNever, parallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job) job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: job.Name})) label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: job.Name}))
@ -448,7 +449,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("choose a node with at least one pod - we will block some network traffic on this node") By("choose a node with at least one pod - we will block some network traffic on this node")
options := api.ListOptions{LabelSelector: label} options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
nodeName := pods.Items[0].Spec.NodeName nodeName := pods.Items[0].Spec.NodeName

View File

@ -24,7 +24,7 @@ import (
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
) )
@ -59,9 +59,9 @@ var _ = framework.KubeDescribe("Networking IPerf [Experimental] [Slow] [Feature:
8001, 8001,
8002, 8002,
appName, appName,
func(n api.Node) api.PodSpec { func(n v1.Node) v1.PodSpec {
return api.PodSpec{ return v1.PodSpec{
Containers: []api.Container{{ Containers: []v1.Container{{
Name: "iperf-server", Name: "iperf-server",
Image: "gcr.io/google_containers/iperf:e2e", Image: "gcr.io/google_containers/iperf:e2e",
Args: []string{ Args: []string{
@ -69,10 +69,10 @@ var _ = framework.KubeDescribe("Networking IPerf [Experimental] [Slow] [Feature:
"-c", "-c",
"/usr/local/bin/iperf -s -p 8001 ", "/usr/local/bin/iperf -s -p 8001 ",
}, },
Ports: []api.ContainerPort{{ContainerPort: 8001}}, Ports: []v1.ContainerPort{{ContainerPort: 8001}},
}}, }},
NodeName: n.Name, NodeName: n.Name,
RestartPolicy: api.RestartPolicyOnFailure, RestartPolicy: v1.RestartPolicyOnFailure,
} }
}, },
// this will be used to generate the -service name which all iperf clients point at. // this will be used to generate the -service name which all iperf clients point at.
@ -86,9 +86,9 @@ var _ = framework.KubeDescribe("Networking IPerf [Experimental] [Slow] [Feature:
iperfClientPodLabels := f.CreatePodsPerNodeForSimpleApp( iperfClientPodLabels := f.CreatePodsPerNodeForSimpleApp(
"iperf-e2e-cli", "iperf-e2e-cli",
func(n api.Node) api.PodSpec { func(n v1.Node) v1.PodSpec {
return api.PodSpec{ return v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "iperf-client", Name: "iperf-client",
Image: "gcr.io/google_containers/iperf:e2e", Image: "gcr.io/google_containers/iperf:e2e",
@ -99,7 +99,7 @@ var _ = framework.KubeDescribe("Networking IPerf [Experimental] [Slow] [Feature:
}, },
}, },
}, },
RestartPolicy: api.RestartPolicyOnFailure, // let them successfully die. RestartPolicy: v1.RestartPolicyOnFailure, // let them successfully die.
} }
}, },
numClient, numClient,
@ -121,7 +121,7 @@ var _ = framework.KubeDescribe("Networking IPerf [Experimental] [Slow] [Feature:
iperfClusterVerification := f.NewClusterVerification( iperfClusterVerification := f.NewClusterVerification(
framework.PodStateVerification{ framework.PodStateVerification{
Selectors: iperfClientPodLabels, Selectors: iperfClientPodLabels,
ValidPhases: []api.PodPhase{api.PodSucceeded}, ValidPhases: []v1.PodPhase{v1.PodSucceeded},
}, },
) )
@ -133,7 +133,7 @@ var _ = framework.KubeDescribe("Networking IPerf [Experimental] [Slow] [Feature:
} else { } else {
// For each builds up a collection of IPerfRecords // For each builds up a collection of IPerfRecords
iperfClusterVerification.ForEach( iperfClusterVerification.ForEach(
func(p api.Pod) { func(p v1.Pod) {
resultS, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "iperf-client", "0-", 1*time.Second) resultS, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "iperf-client", "0-", 1*time.Second)
if err == nil { if err == nil {
framework.Logf(resultS) framework.Logf(resultS)

View File

@ -23,8 +23,9 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/api/v1"
coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/system" "k8s.io/kubernetes/pkg/util/system"
@ -54,7 +55,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
name = "node-problem-detector-" + uid name = "node-problem-detector-" + uid
configName = "node-problem-detector-config-" + uid configName = "node-problem-detector-config-" + uid
// There is no namespace for Node, event recorder will set default namespace for node events. // There is no namespace for Node, event recorder will set default namespace for node events.
eventNamespace = api.NamespaceDefault eventNamespace = v1.NamespaceDefault
}) })
// Test kernel monitor. We may add other tests if we have more problem daemons in the future. // Test kernel monitor. We may add other tests if we have more problem daemons in the future.
@ -63,7 +64,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
// Use test condition to avoid conflict with real node problem detector // Use test condition to avoid conflict with real node problem detector
// TODO(random-liu): Now node condition could be arbitrary string, consider wether we need to // TODO(random-liu): Now node condition could be arbitrary string, consider wether we need to
// add TestCondition when switching to predefined condition list. // add TestCondition when switching to predefined condition list.
condition = api.NodeConditionType("TestCondition") condition = v1.NodeConditionType("TestCondition")
lookback = time.Hour // Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds. lookback = time.Hour // Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds.
startPattern = "test reboot" startPattern = "test reboot"
@ -88,8 +89,8 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
permMessage = "permanent error" permMessage = "permanent error"
) )
var source, config, tmpDir string var source, config, tmpDir string
var node *api.Node var node *v1.Node
var eventListOptions api.ListOptions var eventListOptions v1.ListOptions
injectCommand := func(timestamp time.Time, log string, num int) string { injectCommand := func(timestamp time.Time, log string, num int) string {
var commands []string var commands []string
for i := 0; i < num; i++ { for i := 0; i < num; i++ {
@ -132,11 +133,11 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
] ]
}` }`
By("Get a non master node to run the pod") By("Get a non master node to run the pod")
nodes, err := c.Core().Nodes().List(api.ListOptions{}) nodes, err := c.Core().Nodes().List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
node = nil node = nil
for _, n := range nodes.Items { for _, n := range nodes.Items {
if !system.IsMasterNode(&n) { if !system.IsMasterNode(n.Name) {
node = &n node = &n
break break
} }
@ -146,70 +147,71 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
selector := fields.Set{ selector := fields.Set{
"involvedObject.kind": "Node", "involvedObject.kind": "Node",
"involvedObject.name": node.Name, "involvedObject.name": node.Name,
"involvedObject.namespace": api.NamespaceAll, "involvedObject.namespace": v1.NamespaceAll,
"source": source, "source": source,
}.AsSelector() }.AsSelector().String()
eventListOptions = api.ListOptions{FieldSelector: selector} eventListOptions = v1.ListOptions{FieldSelector: selector}
By("Create the test log file") By("Create the test log file")
tmpDir = "/tmp/" + name tmpDir = "/tmp/" + name
cmd := fmt.Sprintf("mkdir %s; > %s/%s", tmpDir, tmpDir, logFile) cmd := fmt.Sprintf("mkdir %s; > %s/%s", tmpDir, tmpDir, logFile)
Expect(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)).To(Succeed()) Expect(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)).To(Succeed())
By("Create config map for the node problem detector") By("Create config map for the node problem detector")
_, err = c.Core().ConfigMaps(ns).Create(&api.ConfigMap{ _, err = c.Core().ConfigMaps(ns).Create(&v1.ConfigMap{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: configName, Name: configName,
}, },
Data: map[string]string{configFile: config}, Data: map[string]string{configFile: config},
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Create the node problem detector") By("Create the node problem detector")
_, err = c.Core().Pods(ns).Create(&api.Pod{ _, err = c.Core().Pods(ns).Create(&v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
NodeName: node.Name, NodeName: node.Name,
SecurityContext: &api.PodSecurityContext{HostNetwork: true}, HostNetwork: true,
Volumes: []api.Volume{ SecurityContext: &v1.PodSecurityContext{},
Volumes: []v1.Volume{
{ {
Name: configVolume, Name: configVolume,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
ConfigMap: &api.ConfigMapVolumeSource{ ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: api.LocalObjectReference{Name: configName}, LocalObjectReference: v1.LocalObjectReference{Name: configName},
}, },
}, },
}, },
{ {
Name: logVolume, Name: logVolume,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
HostPath: &api.HostPathVolumeSource{Path: tmpDir}, HostPath: &v1.HostPathVolumeSource{Path: tmpDir},
}, },
}, },
{ {
Name: localtimeVolume, Name: localtimeVolume,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
HostPath: &api.HostPathVolumeSource{Path: etcLocaltime}, HostPath: &v1.HostPathVolumeSource{Path: etcLocaltime},
}, },
}, },
}, },
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: name, Name: name,
Image: image, Image: image,
Command: []string{"/node-problem-detector", "--kernel-monitor=" + filepath.Join(configDir, configFile)}, Command: []string{"/node-problem-detector", "--kernel-monitor=" + filepath.Join(configDir, configFile)},
ImagePullPolicy: api.PullAlways, ImagePullPolicy: v1.PullAlways,
Env: []api.EnvVar{ Env: []v1.EnvVar{
{ {
Name: "NODE_NAME", Name: "NODE_NAME",
ValueFrom: &api.EnvVarSource{ ValueFrom: &v1.EnvVarSource{
FieldRef: &api.ObjectFieldSelector{ FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1", APIVersion: "v1",
FieldPath: "spec.nodeName", FieldPath: "spec.nodeName",
}, },
}, },
}, },
}, },
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: logVolume, Name: logVolume,
MountPath: logDir, MountPath: logDir,
@ -248,13 +250,13 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
events int events int
conditionReason string conditionReason string
conditionMessage string conditionMessage string
conditionType api.ConditionStatus conditionType v1.ConditionStatus
}{ }{
{ {
description: "should generate default node condition", description: "should generate default node condition",
conditionReason: defaultReason, conditionReason: defaultReason,
conditionMessage: defaultMessage, conditionMessage: defaultMessage,
conditionType: api.ConditionFalse, conditionType: v1.ConditionFalse,
}, },
{ {
description: "should not generate events for too old log", description: "should not generate events for too old log",
@ -263,7 +265,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
messageNum: 3, messageNum: 3,
conditionReason: defaultReason, conditionReason: defaultReason,
conditionMessage: defaultMessage, conditionMessage: defaultMessage,
conditionType: api.ConditionFalse, conditionType: v1.ConditionFalse,
}, },
{ {
description: "should not change node condition for too old log", description: "should not change node condition for too old log",
@ -272,7 +274,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
messageNum: 1, messageNum: 1,
conditionReason: defaultReason, conditionReason: defaultReason,
conditionMessage: defaultMessage, conditionMessage: defaultMessage,
conditionType: api.ConditionFalse, conditionType: v1.ConditionFalse,
}, },
{ {
description: "should generate event for old log within lookback duration", description: "should generate event for old log within lookback duration",
@ -282,7 +284,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
events: 3, events: 3,
conditionReason: defaultReason, conditionReason: defaultReason,
conditionMessage: defaultMessage, conditionMessage: defaultMessage,
conditionType: api.ConditionFalse, conditionType: v1.ConditionFalse,
}, },
{ {
description: "should change node condition for old log within lookback duration", description: "should change node condition for old log within lookback duration",
@ -292,7 +294,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
events: 3, // event number should not change events: 3, // event number should not change
conditionReason: permReason, conditionReason: permReason,
conditionMessage: permMessage, conditionMessage: permMessage,
conditionType: api.ConditionTrue, conditionType: v1.ConditionTrue,
}, },
{ {
description: "should reset node condition if the node is reboot", description: "should reset node condition if the node is reboot",
@ -302,7 +304,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
events: 3, // event number should not change events: 3, // event number should not change
conditionReason: defaultReason, conditionReason: defaultReason,
conditionMessage: defaultMessage, conditionMessage: defaultMessage,
conditionType: api.ConditionFalse, conditionType: v1.ConditionFalse,
}, },
{ {
description: "should generate event for new log", description: "should generate event for new log",
@ -312,7 +314,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
events: 6, events: 6,
conditionReason: defaultReason, conditionReason: defaultReason,
conditionMessage: defaultMessage, conditionMessage: defaultMessage,
conditionType: api.ConditionFalse, conditionType: v1.ConditionFalse,
}, },
{ {
description: "should change node condition for new log", description: "should change node condition for new log",
@ -322,7 +324,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
events: 6, // event number should not change events: 6, // event number should not change
conditionReason: permReason, conditionReason: permReason,
conditionMessage: permMessage, conditionMessage: permMessage,
conditionType: api.ConditionTrue, conditionType: v1.ConditionTrue,
}, },
} { } {
By(test.description) By(test.description)
@ -360,13 +362,13 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
framework.Logf("Node Problem Detector logs:\n %s", log) framework.Logf("Node Problem Detector logs:\n %s", log)
} }
By("Delete the node problem detector") By("Delete the node problem detector")
c.Core().Pods(ns).Delete(name, api.NewDeleteOptions(0)) c.Core().Pods(ns).Delete(name, v1.NewDeleteOptions(0))
By("Wait for the node problem detector to disappear") By("Wait for the node problem detector to disappear")
Expect(framework.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(Succeed()) Expect(framework.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(Succeed())
By("Delete the config map") By("Delete the config map")
c.Core().ConfigMaps(ns).Delete(configName, nil) c.Core().ConfigMaps(ns).Delete(configName, nil)
By("Clean up the events") By("Clean up the events")
Expect(c.Core().Events(eventNamespace).DeleteCollection(api.NewDeleteOptions(0), eventListOptions)).To(Succeed()) Expect(c.Core().Events(eventNamespace).DeleteCollection(v1.NewDeleteOptions(0), eventListOptions)).To(Succeed())
By("Clean up the node condition") By("Clean up the node condition")
patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition)) patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition))
c.Core().RESTClient().Patch(api.StrategicMergePatchType).Resource("nodes").Name(node.Name).SubResource("status").Body(patch).Do() c.Core().RESTClient().Patch(api.StrategicMergePatchType).Resource("nodes").Name(node.Name).SubResource("status").Body(patch).Do()
@ -377,7 +379,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
}) })
// verifyEvents verifies there are num specific events generated // verifyEvents verifies there are num specific events generated
func verifyEvents(e coreclientset.EventInterface, options api.ListOptions, num int, reason, message string) error { func verifyEvents(e coreclientset.EventInterface, options v1.ListOptions, num int, reason, message string) error {
events, err := e.List(options) events, err := e.List(options)
if err != nil { if err != nil {
return err return err
@ -396,7 +398,7 @@ func verifyEvents(e coreclientset.EventInterface, options api.ListOptions, num i
} }
// verifyNoEvents verifies there is no event generated // verifyNoEvents verifies there is no event generated
func verifyNoEvents(e coreclientset.EventInterface, options api.ListOptions) error { func verifyNoEvents(e coreclientset.EventInterface, options v1.ListOptions) error {
events, err := e.List(options) events, err := e.List(options)
if err != nil { if err != nil {
return err return err
@ -408,12 +410,12 @@ func verifyNoEvents(e coreclientset.EventInterface, options api.ListOptions) err
} }
// verifyCondition verifies specific node condition is generated, if reason and message are empty, they will not be checked // verifyCondition verifies specific node condition is generated, if reason and message are empty, they will not be checked
func verifyCondition(n coreclientset.NodeInterface, nodeName string, condition api.NodeConditionType, status api.ConditionStatus, reason, message string) error { func verifyCondition(n coreclientset.NodeInterface, nodeName string, condition v1.NodeConditionType, status v1.ConditionStatus, reason, message string) error {
node, err := n.Get(nodeName) node, err := n.Get(nodeName)
if err != nil { if err != nil {
return err return err
} }
_, c := api.GetNodeCondition(&node.Status, condition) _, c := v1.GetNodeCondition(&node.Status, condition)
if c == nil { if c == nil {
return fmt.Errorf("node condition %q not found", condition) return fmt.Errorf("node condition %q not found", condition)
} }

View File

@ -22,9 +22,9 @@ import (
"time" "time"
cadvisorapi "github.com/google/cadvisor/info/v1" cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -136,10 +136,10 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
"involvedObject.kind": "Pod", "involvedObject.kind": "Pod",
"involvedObject.name": pendingPodName, "involvedObject.name": pendingPodName,
"involvedObject.namespace": ns, "involvedObject.namespace": ns,
"source": api.DefaultSchedulerName, "source": v1.DefaultSchedulerName,
"reason": "FailedScheduling", "reason": "FailedScheduling",
}.AsSelector() }.AsSelector().String()
options := api.ListOptions{FieldSelector: selector} options := v1.ListOptions{FieldSelector: selector}
schedEvents, err := c.Core().Events(ns).List(options) schedEvents, err := c.Core().Events(ns).List(options)
framework.ExpectNoError(err) framework.ExpectNoError(err)
@ -171,19 +171,19 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
func createOutOfDiskPod(c clientset.Interface, ns, name string, milliCPU int64) { func createOutOfDiskPod(c clientset.Interface, ns, name string, milliCPU int64) {
podClient := c.Core().Pods(ns) podClient := c.Core().Pods(ns)
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "pause", Name: "pause",
Image: framework.GetPauseImageName(c), Image: framework.GetPauseImageName(c),
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: api.ResourceList{ Requests: v1.ResourceList{
// Request enough CPU to fit only two pods on a given node. // Request enough CPU to fit only two pods on a given node.
api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
}, },
}, },
}, },
@ -197,11 +197,11 @@ func createOutOfDiskPod(c clientset.Interface, ns, name string, milliCPU int64)
// availCpu calculates the available CPU on a given node by subtracting the CPU requested by // availCpu calculates the available CPU on a given node by subtracting the CPU requested by
// all the pods from the total available CPU capacity on the node. // all the pods from the total available CPU capacity on the node.
func availCpu(c clientset.Interface, node *api.Node) (int64, error) { func availCpu(c clientset.Interface, node *v1.Node) (int64, error) {
podClient := c.Core().Pods(api.NamespaceAll) podClient := c.Core().Pods(v1.NamespaceAll)
selector := fields.Set{"spec.nodeName": node.Name}.AsSelector() selector := fields.Set{"spec.nodeName": node.Name}.AsSelector().String()
options := api.ListOptions{FieldSelector: selector} options := v1.ListOptions{FieldSelector: selector}
pods, err := podClient.List(options) pods, err := podClient.List(options)
if err != nil { if err != nil {
return 0, fmt.Errorf("failed to retrieve all the pods on node %s: %v", node.Name, err) return 0, fmt.Errorf("failed to retrieve all the pods on node %s: %v", node.Name, err)
@ -217,7 +217,7 @@ func availCpu(c clientset.Interface, node *api.Node) (int64, error) {
// availSize returns the available disk space on a given node by querying node stats which // availSize returns the available disk space on a given node by querying node stats which
// is in turn obtained internally from cadvisor. // is in turn obtained internally from cadvisor.
func availSize(c clientset.Interface, node *api.Node) (uint64, error) { func availSize(c clientset.Interface, node *v1.Node) (uint64, error) {
statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name) statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name)
framework.Logf("Querying stats for node %s using url %s", node.Name, statsResource) framework.Logf("Querying stats for node %s using url %s", node.Name, statsResource)
res, err := c.Core().RESTClient().Get().AbsPath(statsResource).Timeout(time.Minute).Do().Raw() res, err := c.Core().RESTClient().Get().AbsPath(statsResource).Timeout(time.Minute).Do().Raw()
@ -235,7 +235,7 @@ func availSize(c clientset.Interface, node *api.Node) (uint64, error) {
// fillDiskSpace fills the available disk space on a given node by creating a large file. The disk // fillDiskSpace fills the available disk space on a given node by creating a large file. The disk
// space on the node is filled in such a way that the available space after filling the disk is just // space on the node is filled in such a way that the available space after filling the disk is just
// below the lowDiskSpaceThreshold mark. // below the lowDiskSpaceThreshold mark.
func fillDiskSpace(c clientset.Interface, node *api.Node) { func fillDiskSpace(c clientset.Interface, node *v1.Node) {
avail, err := availSize(c, node) avail, err := availSize(c, node)
framework.ExpectNoError(err, "Node %s: couldn't obtain available disk size %v", node.Name, err) framework.ExpectNoError(err, "Node %s: couldn't obtain available disk size %v", node.Name, err)
@ -247,7 +247,7 @@ func fillDiskSpace(c clientset.Interface, node *api.Node) {
cmd := fmt.Sprintf("fallocate -l %d test.img", fillSize) cmd := fmt.Sprintf("fallocate -l %d test.img", fillSize)
framework.ExpectNoError(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)) framework.ExpectNoError(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node))
ood := framework.WaitForNodeToBe(c, node.Name, api.NodeOutOfDisk, true, nodeOODTimeOut) ood := framework.WaitForNodeToBe(c, node.Name, v1.NodeOutOfDisk, true, nodeOODTimeOut)
Expect(ood).To(BeTrue(), "Node %s did not run out of disk within %v", node.Name, nodeOODTimeOut) Expect(ood).To(BeTrue(), "Node %s did not run out of disk within %v", node.Name, nodeOODTimeOut)
avail, err = availSize(c, node) avail, err = availSize(c, node)
@ -256,11 +256,11 @@ func fillDiskSpace(c clientset.Interface, node *api.Node) {
} }
// recoverDiskSpace recovers disk space, filled by creating a large file, on a given node. // recoverDiskSpace recovers disk space, filled by creating a large file, on a given node.
func recoverDiskSpace(c clientset.Interface, node *api.Node) { func recoverDiskSpace(c clientset.Interface, node *v1.Node) {
By(fmt.Sprintf("Recovering disk space on node %s", node.Name)) By(fmt.Sprintf("Recovering disk space on node %s", node.Name))
cmd := "rm -f test.img" cmd := "rm -f test.img"
framework.ExpectNoError(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)) framework.ExpectNoError(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node))
ood := framework.WaitForNodeToBe(c, node.Name, api.NodeOutOfDisk, false, nodeOODTimeOut) ood := framework.WaitForNodeToBe(c, node.Name, v1.NodeOutOfDisk, false, nodeOODTimeOut)
Expect(ood).To(BeTrue(), "Node %s's out of disk condition status did not change to false within %v", node.Name, nodeOODTimeOut) Expect(ood).To(BeTrue(), "Node %s's out of disk condition status did not change to false within %v", node.Name, nodeOODTimeOut)
} }

View File

@ -24,6 +24,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@ -38,16 +39,16 @@ import (
var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", func() { var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", func() {
f := framework.NewDefaultFramework("opaque-resource") f := framework.NewDefaultFramework("opaque-resource")
opaqueResName := api.OpaqueIntResourceName("foo") opaqueResName := v1.OpaqueIntResourceName("foo")
var node *api.Node var node *v1.Node
BeforeEach(func() { BeforeEach(func() {
if node == nil { if node == nil {
// Priming invocation; select the first non-master node. // Priming invocation; select the first non-master node.
nodes, err := f.ClientSet.Core().Nodes().List(api.ListOptions{}) nodes, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for _, n := range nodes.Items { for _, n := range nodes.Items {
if !system.IsMasterNode(&n) { if !system.IsMasterNode(n.Name) {
node = &n node = &n
break break
} }
@ -63,8 +64,8 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
It("should not break pods that do not consume opaque integer resources.", func() { It("should not break pods that do not consume opaque integer resources.", func() {
By("Creating a vanilla pod") By("Creating a vanilla pod")
requests := api.ResourceList{api.ResourceCPU: resource.MustParse("0.1")} requests := v1.ResourceList{v1.ResourceCPU: resource.MustParse("0.1")}
limits := api.ResourceList{api.ResourceCPU: resource.MustParse("0.2")} limits := v1.ResourceList{v1.ResourceCPU: resource.MustParse("0.2")}
pod := newTestPod(f, "without-oir", requests, limits) pod := newTestPod(f, "without-oir", requests, limits)
By("Observing an event that indicates the pod was scheduled") By("Observing an event that indicates the pod was scheduled")
@ -72,8 +73,8 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
return err return err
} }
predicate := func(e *api.Event) bool { predicate := func(e *v1.Event) bool {
return e.Type == api.EventTypeNormal && return e.Type == v1.EventTypeNormal &&
e.Reason == "Scheduled" && e.Reason == "Scheduled" &&
// Here we don't check for the bound node name since it can land on // Here we don't check for the bound node name since it can land on
// any one (this pod doesn't require any of the opaque resource.) // any one (this pod doesn't require any of the opaque resource.)
@ -86,13 +87,13 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
It("should schedule pods that do consume opaque integer resources.", func() { It("should schedule pods that do consume opaque integer resources.", func() {
By("Creating a pod that requires less of the opaque resource than is allocatable on a node.") By("Creating a pod that requires less of the opaque resource than is allocatable on a node.")
requests := api.ResourceList{ requests := v1.ResourceList{
api.ResourceCPU: resource.MustParse("0.1"), v1.ResourceCPU: resource.MustParse("0.1"),
opaqueResName: resource.MustParse("1"), opaqueResName: resource.MustParse("1"),
} }
limits := api.ResourceList{ limits := v1.ResourceList{
api.ResourceCPU: resource.MustParse("0.2"), v1.ResourceCPU: resource.MustParse("0.2"),
opaqueResName: resource.MustParse("2"), opaqueResName: resource.MustParse("2"),
} }
pod := newTestPod(f, "min-oir", requests, limits) pod := newTestPod(f, "min-oir", requests, limits)
@ -101,8 +102,8 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
return err return err
} }
predicate := func(e *api.Event) bool { predicate := func(e *v1.Event) bool {
return e.Type == api.EventTypeNormal && return e.Type == v1.EventTypeNormal &&
e.Reason == "Scheduled" && e.Reason == "Scheduled" &&
strings.Contains(e.Message, fmt.Sprintf("Successfully assigned %v to %v", pod.Name, node.Name)) strings.Contains(e.Message, fmt.Sprintf("Successfully assigned %v to %v", pod.Name, node.Name))
} }
@ -113,15 +114,15 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
It("should not schedule pods that exceed the available amount of opaque integer resource.", func() { It("should not schedule pods that exceed the available amount of opaque integer resource.", func() {
By("Creating a pod that requires more of the opaque resource than is allocatable on any node") By("Creating a pod that requires more of the opaque resource than is allocatable on any node")
requests := api.ResourceList{opaqueResName: resource.MustParse("6")} requests := v1.ResourceList{opaqueResName: resource.MustParse("6")}
limits := api.ResourceList{} limits := v1.ResourceList{}
By("Observing an event that indicates the pod was not scheduled") By("Observing an event that indicates the pod was not scheduled")
action := func() error { action := func() error {
_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(newTestPod(f, "over-max-oir", requests, limits)) _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(newTestPod(f, "over-max-oir", requests, limits))
return err return err
} }
predicate := func(e *api.Event) bool { predicate := func(e *v1.Event) bool {
return e.Type == "Warning" && return e.Type == "Warning" &&
e.Reason == "FailedScheduling" && e.Reason == "FailedScheduling" &&
strings.Contains(e.Message, "failed to fit in any node") strings.Contains(e.Message, "failed to fit in any node")
@ -133,20 +134,20 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
It("should account opaque integer resources in pods with multiple containers.", func() { It("should account opaque integer resources in pods with multiple containers.", func() {
By("Creating a pod with two containers that together require less of the opaque resource than is allocatable on a node") By("Creating a pod with two containers that together require less of the opaque resource than is allocatable on a node")
requests := api.ResourceList{opaqueResName: resource.MustParse("1")} requests := v1.ResourceList{opaqueResName: resource.MustParse("1")}
limits := api.ResourceList{} limits := v1.ResourceList{}
image := framework.GetPauseImageName(f.ClientSet) image := framework.GetPauseImageName(f.ClientSet)
// This pod consumes 2 "foo" resources. // This pod consumes 2 "foo" resources.
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "mult-container-oir", Name: "mult-container-oir",
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "pause", Name: "pause",
Image: image, Image: image,
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: requests, Requests: requests,
Limits: limits, Limits: limits,
}, },
@ -154,7 +155,7 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
{ {
Name: "pause-sidecar", Name: "pause-sidecar",
Image: image, Image: image,
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: requests, Requests: requests,
Limits: limits, Limits: limits,
}, },
@ -168,8 +169,8 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
return err return err
} }
predicate := func(e *api.Event) bool { predicate := func(e *v1.Event) bool {
return e.Type == api.EventTypeNormal && return e.Type == v1.EventTypeNormal &&
e.Reason == "Scheduled" && e.Reason == "Scheduled" &&
strings.Contains(e.Message, fmt.Sprintf("Successfully assigned %v to %v", pod.Name, node.Name)) strings.Contains(e.Message, fmt.Sprintf("Successfully assigned %v to %v", pod.Name, node.Name))
} }
@ -178,19 +179,19 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
Expect(success).To(Equal(true)) Expect(success).To(Equal(true))
By("Creating a pod with two containers that together require more of the opaque resource than is allocatable on any node") By("Creating a pod with two containers that together require more of the opaque resource than is allocatable on any node")
requests = api.ResourceList{opaqueResName: resource.MustParse("3")} requests = v1.ResourceList{opaqueResName: resource.MustParse("3")}
limits = api.ResourceList{} limits = v1.ResourceList{}
// This pod consumes 6 "foo" resources. // This pod consumes 6 "foo" resources.
pod = &api.Pod{ pod = &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "mult-container-over-max-oir", Name: "mult-container-over-max-oir",
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "pause", Name: "pause",
Image: image, Image: image,
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: requests, Requests: requests,
Limits: limits, Limits: limits,
}, },
@ -198,7 +199,7 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
{ {
Name: "pause-sidecar", Name: "pause-sidecar",
Image: image, Image: image,
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: requests, Requests: requests,
Limits: limits, Limits: limits,
}, },
@ -212,7 +213,7 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
_, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) _, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
return err return err
} }
predicate = func(e *api.Event) bool { predicate = func(e *v1.Event) bool {
return e.Type == "Warning" && return e.Type == "Warning" &&
e.Reason == "FailedScheduling" && e.Reason == "FailedScheduling" &&
strings.Contains(e.Message, "failed to fit in any node") strings.Contains(e.Message, "failed to fit in any node")
@ -224,12 +225,12 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
}) })
// Adds the opaque resource to a node. // Adds the opaque resource to a node.
func addOpaqueResource(f *framework.Framework, nodeName string, opaqueResName api.ResourceName) { func addOpaqueResource(f *framework.Framework, nodeName string, opaqueResName v1.ResourceName) {
action := func() error { action := func() error {
patch := []byte(fmt.Sprintf(`[{"op": "add", "path": "/status/capacity/%s", "value": "5"}]`, escapeForJSONPatch(opaqueResName))) patch := []byte(fmt.Sprintf(`[{"op": "add", "path": "/status/capacity/%s", "value": "5"}]`, escapeForJSONPatch(opaqueResName)))
return f.ClientSet.Core().RESTClient().Patch(api.JSONPatchType).Resource("nodes").Name(nodeName).SubResource("status").Body(patch).Do().Error() return f.ClientSet.Core().RESTClient().Patch(api.JSONPatchType).Resource("nodes").Name(nodeName).SubResource("status").Body(patch).Do().Error()
} }
predicate := func(n *api.Node) bool { predicate := func(n *v1.Node) bool {
capacity, foundCap := n.Status.Capacity[opaqueResName] capacity, foundCap := n.Status.Capacity[opaqueResName]
allocatable, foundAlloc := n.Status.Allocatable[opaqueResName] allocatable, foundAlloc := n.Status.Allocatable[opaqueResName]
return foundCap && capacity.MilliValue() == int64(5000) && return foundCap && capacity.MilliValue() == int64(5000) &&
@ -241,13 +242,13 @@ func addOpaqueResource(f *framework.Framework, nodeName string, opaqueResName ap
} }
// Removes the opaque resource from a node. // Removes the opaque resource from a node.
func removeOpaqueResource(f *framework.Framework, nodeName string, opaqueResName api.ResourceName) { func removeOpaqueResource(f *framework.Framework, nodeName string, opaqueResName v1.ResourceName) {
action := func() error { action := func() error {
patch := []byte(fmt.Sprintf(`[{"op": "remove", "path": "/status/capacity/%s"}]`, escapeForJSONPatch(opaqueResName))) patch := []byte(fmt.Sprintf(`[{"op": "remove", "path": "/status/capacity/%s"}]`, escapeForJSONPatch(opaqueResName)))
f.ClientSet.Core().RESTClient().Patch(api.JSONPatchType).Resource("nodes").Name(nodeName).SubResource("status").Body(patch).Do() f.ClientSet.Core().RESTClient().Patch(api.JSONPatchType).Resource("nodes").Name(nodeName).SubResource("status").Body(patch).Do()
return nil // Ignore error -- the opaque resource may not exist. return nil // Ignore error -- the opaque resource may not exist.
} }
predicate := func(n *api.Node) bool { predicate := func(n *v1.Node) bool {
_, foundCap := n.Status.Capacity[opaqueResName] _, foundCap := n.Status.Capacity[opaqueResName]
_, foundAlloc := n.Status.Allocatable[opaqueResName] _, foundAlloc := n.Status.Allocatable[opaqueResName]
return !foundCap && !foundAlloc return !foundCap && !foundAlloc
@ -257,7 +258,7 @@ func removeOpaqueResource(f *framework.Framework, nodeName string, opaqueResName
Expect(success).To(Equal(true)) Expect(success).To(Equal(true))
} }
func escapeForJSONPatch(resName api.ResourceName) string { func escapeForJSONPatch(resName v1.ResourceName) string {
// Escape forward slashes in the resource name per the JSON Pointer spec. // Escape forward slashes in the resource name per the JSON Pointer spec.
// See https://tools.ietf.org/html/rfc6901#section-3 // See https://tools.ietf.org/html/rfc6901#section-3
return strings.Replace(string(resName), "/", "~1", -1) return strings.Replace(string(resName), "/", "~1", -1)
@ -265,7 +266,7 @@ func escapeForJSONPatch(resName api.ResourceName) string {
// Returns true if a node update matching the predicate was emitted from the // Returns true if a node update matching the predicate was emitted from the
// system after performing the supplied action. // system after performing the supplied action.
func observeNodeUpdateAfterAction(f *framework.Framework, nodeName string, nodePredicate func(*api.Node) bool, action func() error) (bool, error) { func observeNodeUpdateAfterAction(f *framework.Framework, nodeName string, nodePredicate func(*v1.Node) bool, action func() error) (bool, error) {
observedMatchingNode := false observedMatchingNode := false
nodeSelector := fields.OneTermEqualSelector("metadata.name", nodeName) nodeSelector := fields.OneTermEqualSelector("metadata.name", nodeName)
informerStartedChan := make(chan struct{}) informerStartedChan := make(chan struct{})
@ -273,24 +274,24 @@ func observeNodeUpdateAfterAction(f *framework.Framework, nodeName string, nodeP
_, controller := cache.NewInformer( _, controller := cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
options.FieldSelector = nodeSelector options.FieldSelector = nodeSelector.String()
ls, err := f.ClientSet.Core().Nodes().List(options) ls, err := f.ClientSet.Core().Nodes().List(options)
return ls, err return ls, err
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
options.FieldSelector = nodeSelector options.FieldSelector = nodeSelector.String()
w, err := f.ClientSet.Core().Nodes().Watch(options) w, err := f.ClientSet.Core().Nodes().Watch(options)
// Signal parent goroutine that watching has begun. // Signal parent goroutine that watching has begun.
informerStartedGuard.Do(func() { close(informerStartedChan) }) informerStartedGuard.Do(func() { close(informerStartedChan) })
return w, err return w, err
}, },
}, },
&api.Node{}, &v1.Node{},
0, 0,
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
UpdateFunc: func(oldObj, newObj interface{}) { UpdateFunc: func(oldObj, newObj interface{}) {
n, ok := newObj.(*api.Node) n, ok := newObj.(*v1.Node)
Expect(ok).To(Equal(true)) Expect(ok).To(Equal(true))
if nodePredicate(n) { if nodePredicate(n) {
observedMatchingNode = true observedMatchingNode = true
@ -323,26 +324,26 @@ func observeNodeUpdateAfterAction(f *framework.Framework, nodeName string, nodeP
// Returns true if an event matching the predicate was emitted from the system // Returns true if an event matching the predicate was emitted from the system
// after performing the supplied action. // after performing the supplied action.
func observeEventAfterAction(f *framework.Framework, eventPredicate func(*api.Event) bool, action func() error) (bool, error) { func observeEventAfterAction(f *framework.Framework, eventPredicate func(*v1.Event) bool, action func() error) (bool, error) {
observedMatchingEvent := false observedMatchingEvent := false
// Create an informer to list/watch events from the test framework namespace. // Create an informer to list/watch events from the test framework namespace.
_, controller := cache.NewInformer( _, controller := cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
ls, err := f.ClientSet.Core().Events(f.Namespace.Name).List(options) ls, err := f.ClientSet.Core().Events(f.Namespace.Name).List(options)
return ls, err return ls, err
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
w, err := f.ClientSet.Core().Events(f.Namespace.Name).Watch(options) w, err := f.ClientSet.Core().Events(f.Namespace.Name).Watch(options)
return w, err return w, err
}, },
}, },
&api.Event{}, &v1.Event{},
0, 0,
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
e, ok := obj.(*api.Event) e, ok := obj.(*v1.Event)
By(fmt.Sprintf("Considering event: \nType = [%s], Reason = [%s], Message = [%s]", e.Type, e.Reason, e.Message)) By(fmt.Sprintf("Considering event: \nType = [%s], Reason = [%s], Message = [%s]", e.Type, e.Reason, e.Message))
Expect(ok).To(Equal(true)) Expect(ok).To(Equal(true))
if ok && eventPredicate(e) { if ok && eventPredicate(e) {

View File

@ -30,11 +30,11 @@ import (
"github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2"
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce" gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
@ -54,8 +54,8 @@ const (
var _ = framework.KubeDescribe("Pod Disks", func() { var _ = framework.KubeDescribe("Pod Disks", func() {
var ( var (
podClient unversionedcore.PodInterface podClient v1core.PodInterface
nodeClient unversionedcore.NodeInterface nodeClient v1core.NodeInterface
host0Name types.NodeName host0Name types.NodeName
host1Name types.NodeName host1Name types.NodeName
) )
@ -91,8 +91,8 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
// Teardown pods, PD. Ignore errors. // Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed. // Teardown should do nothing unless test failed.
By("cleaning up PD-RW test environment") By("cleaning up PD-RW test environment")
podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)) podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0))
podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0)) podClient.Delete(host1Pod.Name, v1.NewDeleteOptions(0))
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name}) detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
}() }()
@ -113,7 +113,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
By("deleting host0Pod") By("deleting host0Pod")
// Delete pod with 0 grace period // Delete pod with 0 grace period
framework.ExpectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod") framework.ExpectNoError(podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")
By("submitting host1Pod to kubernetes") By("submitting host1Pod to kubernetes")
_, err = podClient.Create(host1Pod) _, err = podClient.Create(host1Pod)
@ -131,7 +131,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */)) framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */))
By("deleting host1Pod") By("deleting host1Pod")
framework.ExpectNoError(podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host1Pod") framework.ExpectNoError(podClient.Delete(host1Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host1Pod")
By("Test completed successfully, waiting for PD to safely detach") By("Test completed successfully, waiting for PD to safely detach")
waitForPDDetach(diskName, host0Name) waitForPDDetach(diskName, host0Name)
@ -155,8 +155,8 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
// Teardown pods, PD. Ignore errors. // Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed. // Teardown should do nothing unless test failed.
By("cleaning up PD-RW test environment") By("cleaning up PD-RW test environment")
podClient.Delete(host0Pod.Name, &api.DeleteOptions{}) podClient.Delete(host0Pod.Name, &v1.DeleteOptions{})
podClient.Delete(host1Pod.Name, &api.DeleteOptions{}) podClient.Delete(host1Pod.Name, &v1.DeleteOptions{})
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name}) detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
}() }()
@ -177,7 +177,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
By("deleting host0Pod") By("deleting host0Pod")
// Delete pod with default grace period 30s // Delete pod with default grace period 30s
framework.ExpectNoError(podClient.Delete(host0Pod.Name, &api.DeleteOptions{}), "Failed to delete host0Pod") framework.ExpectNoError(podClient.Delete(host0Pod.Name, &v1.DeleteOptions{}), "Failed to delete host0Pod")
By("submitting host1Pod to kubernetes") By("submitting host1Pod to kubernetes")
_, err = podClient.Create(host1Pod) _, err = podClient.Create(host1Pod)
@ -195,7 +195,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */)) framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */))
By("deleting host1Pod") By("deleting host1Pod")
framework.ExpectNoError(podClient.Delete(host1Pod.Name, &api.DeleteOptions{}), "Failed to delete host1Pod") framework.ExpectNoError(podClient.Delete(host1Pod.Name, &v1.DeleteOptions{}), "Failed to delete host1Pod")
By("Test completed successfully, waiting for PD to safely detach") By("Test completed successfully, waiting for PD to safely detach")
waitForPDDetach(diskName, host0Name) waitForPDDetach(diskName, host0Name)
@ -219,9 +219,9 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
By("cleaning up PD-RO test environment") By("cleaning up PD-RO test environment")
// Teardown pods, PD. Ignore errors. // Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed. // Teardown should do nothing unless test failed.
podClient.Delete(rwPod.Name, api.NewDeleteOptions(0)) podClient.Delete(rwPod.Name, v1.NewDeleteOptions(0))
podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0)) podClient.Delete(host0ROPod.Name, v1.NewDeleteOptions(0))
podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0)) podClient.Delete(host1ROPod.Name, v1.NewDeleteOptions(0))
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name}) detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
}() }()
@ -230,7 +230,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(err, "Failed to create rwPod") framework.ExpectNoError(err, "Failed to create rwPod")
framework.ExpectNoError(f.WaitForPodRunningSlow(rwPod.Name)) framework.ExpectNoError(f.WaitForPodRunningSlow(rwPod.Name))
// Delete pod with 0 grace period // Delete pod with 0 grace period
framework.ExpectNoError(podClient.Delete(rwPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod") framework.ExpectNoError(podClient.Delete(rwPod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")
framework.ExpectNoError(waitForPDDetach(diskName, host0Name)) framework.ExpectNoError(waitForPDDetach(diskName, host0Name))
By("submitting host0ROPod to kubernetes") By("submitting host0ROPod to kubernetes")
@ -246,10 +246,10 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(f.WaitForPodRunningSlow(host1ROPod.Name)) framework.ExpectNoError(f.WaitForPodRunningSlow(host1ROPod.Name))
By("deleting host0ROPod") By("deleting host0ROPod")
framework.ExpectNoError(podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0ROPod") framework.ExpectNoError(podClient.Delete(host0ROPod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0ROPod")
By("deleting host1ROPod") By("deleting host1ROPod")
framework.ExpectNoError(podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host1ROPod") framework.ExpectNoError(podClient.Delete(host1ROPod.Name, v1.NewDeleteOptions(0)), "Failed to delete host1ROPod")
By("Test completed successfully, waiting for PD to safely detach") By("Test completed successfully, waiting for PD to safely detach")
waitForPDDetach(diskName, host0Name) waitForPDDetach(diskName, host0Name)
@ -271,9 +271,9 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
By("cleaning up PD-RO test environment") By("cleaning up PD-RO test environment")
// Teardown pods, PD. Ignore errors. // Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed. // Teardown should do nothing unless test failed.
podClient.Delete(rwPod.Name, &api.DeleteOptions{}) podClient.Delete(rwPod.Name, &v1.DeleteOptions{})
podClient.Delete(host0ROPod.Name, &api.DeleteOptions{}) podClient.Delete(host0ROPod.Name, &v1.DeleteOptions{})
podClient.Delete(host1ROPod.Name, &api.DeleteOptions{}) podClient.Delete(host1ROPod.Name, &v1.DeleteOptions{})
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name}) detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
}() }()
@ -282,7 +282,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(err, "Failed to create rwPod") framework.ExpectNoError(err, "Failed to create rwPod")
framework.ExpectNoError(f.WaitForPodRunningSlow(rwPod.Name)) framework.ExpectNoError(f.WaitForPodRunningSlow(rwPod.Name))
// Delete pod with default grace period 30s // Delete pod with default grace period 30s
framework.ExpectNoError(podClient.Delete(rwPod.Name, &api.DeleteOptions{}), "Failed to delete host0Pod") framework.ExpectNoError(podClient.Delete(rwPod.Name, &v1.DeleteOptions{}), "Failed to delete host0Pod")
framework.ExpectNoError(waitForPDDetach(diskName, host0Name)) framework.ExpectNoError(waitForPDDetach(diskName, host0Name))
By("submitting host0ROPod to kubernetes") By("submitting host0ROPod to kubernetes")
@ -298,10 +298,10 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(f.WaitForPodRunningSlow(host1ROPod.Name)) framework.ExpectNoError(f.WaitForPodRunningSlow(host1ROPod.Name))
By("deleting host0ROPod") By("deleting host0ROPod")
framework.ExpectNoError(podClient.Delete(host0ROPod.Name, &api.DeleteOptions{}), "Failed to delete host0ROPod") framework.ExpectNoError(podClient.Delete(host0ROPod.Name, &v1.DeleteOptions{}), "Failed to delete host0ROPod")
By("deleting host1ROPod") By("deleting host1ROPod")
framework.ExpectNoError(podClient.Delete(host1ROPod.Name, &api.DeleteOptions{}), "Failed to delete host1ROPod") framework.ExpectNoError(podClient.Delete(host1ROPod.Name, &v1.DeleteOptions{}), "Failed to delete host1ROPod")
By("Test completed successfully, waiting for PD to safely detach") By("Test completed successfully, waiting for PD to safely detach")
waitForPDDetach(diskName, host0Name) waitForPDDetach(diskName, host0Name)
@ -315,14 +315,14 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
diskName, err := createPDWithRetry() diskName, err := createPDWithRetry()
framework.ExpectNoError(err, "Error creating PD") framework.ExpectNoError(err, "Error creating PD")
numContainers := 4 numContainers := 4
var host0Pod *api.Pod var host0Pod *v1.Pod
defer func() { defer func() {
By("cleaning up PD-RW test environment") By("cleaning up PD-RW test environment")
// Teardown pods, PD. Ignore errors. // Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed. // Teardown should do nothing unless test failed.
if host0Pod != nil { if host0Pod != nil {
podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)) podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0))
} }
detachAndDeletePDs(diskName, []types.NodeName{host0Name}) detachAndDeletePDs(diskName, []types.NodeName{host0Name})
}() }()
@ -354,7 +354,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify) verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify)
By("deleting host0Pod") By("deleting host0Pod")
framework.ExpectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod") framework.ExpectNoError(podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")
} }
By("Test completed successfully, waiting for PD to safely detach") By("Test completed successfully, waiting for PD to safely detach")
@ -370,14 +370,14 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
By("creating PD2") By("creating PD2")
disk2Name, err := createPDWithRetry() disk2Name, err := createPDWithRetry()
framework.ExpectNoError(err, "Error creating PD2") framework.ExpectNoError(err, "Error creating PD2")
var host0Pod *api.Pod var host0Pod *v1.Pod
defer func() { defer func() {
By("cleaning up PD-RW test environment") By("cleaning up PD-RW test environment")
// Teardown pods, PD. Ignore errors. // Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed. // Teardown should do nothing unless test failed.
if host0Pod != nil { if host0Pod != nil {
podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)) podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0))
} }
detachAndDeletePDs(disk1Name, []types.NodeName{host0Name}) detachAndDeletePDs(disk1Name, []types.NodeName{host0Name})
detachAndDeletePDs(disk2Name, []types.NodeName{host0Name}) detachAndDeletePDs(disk2Name, []types.NodeName{host0Name})
@ -413,7 +413,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify) verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify)
By("deleting host0Pod") By("deleting host0Pod")
framework.ExpectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod") framework.ExpectNoError(podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")
} }
By("Test completed successfully, waiting for PD to safely detach") By("Test completed successfully, waiting for PD to safely detach")
@ -590,8 +590,8 @@ func detachPD(nodeName types.NodeName, pdName string) error {
} }
} }
func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, numContainers int) *api.Pod { func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, numContainers int) *v1.Pod {
containers := make([]api.Container, numContainers) containers := make([]v1.Container, numContainers)
for i := range containers { for i := range containers {
containers[i].Name = "mycontainer" containers[i].Name = "mycontainer"
if numContainers > 1 { if numContainers > 1 {
@ -602,37 +602,37 @@ func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, num
containers[i].Command = []string{"sleep", "6000"} containers[i].Command = []string{"sleep", "6000"}
containers[i].VolumeMounts = make([]api.VolumeMount, len(diskNames)) containers[i].VolumeMounts = make([]v1.VolumeMount, len(diskNames))
for k := range diskNames { for k := range diskNames {
containers[i].VolumeMounts[k].Name = fmt.Sprintf("testpd%v", k+1) containers[i].VolumeMounts[k].Name = fmt.Sprintf("testpd%v", k+1)
containers[i].VolumeMounts[k].MountPath = fmt.Sprintf("/testpd%v", k+1) containers[i].VolumeMounts[k].MountPath = fmt.Sprintf("/testpd%v", k+1)
} }
containers[i].Resources.Limits = api.ResourceList{} containers[i].Resources.Limits = v1.ResourceList{}
containers[i].Resources.Limits[api.ResourceCPU] = *resource.NewQuantity(int64(0), resource.DecimalSI) containers[i].Resources.Limits[v1.ResourceCPU] = *resource.NewQuantity(int64(0), resource.DecimalSI)
} }
pod := &api.Pod{ pod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "Pod", Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "pd-test-" + string(uuid.NewUUID()), Name: "pd-test-" + string(uuid.NewUUID()),
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: containers, Containers: containers,
NodeName: string(targetNode), NodeName: string(targetNode),
}, },
} }
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" { if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
pod.Spec.Volumes = make([]api.Volume, len(diskNames)) pod.Spec.Volumes = make([]v1.Volume, len(diskNames))
for k, diskName := range diskNames { for k, diskName := range diskNames {
pod.Spec.Volumes[k].Name = fmt.Sprintf("testpd%v", k+1) pod.Spec.Volumes[k].Name = fmt.Sprintf("testpd%v", k+1)
pod.Spec.Volumes[k].VolumeSource = api.VolumeSource{ pod.Spec.Volumes[k].VolumeSource = v1.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: diskName, PDName: diskName,
FSType: "ext4", FSType: "ext4",
ReadOnly: readOnly, ReadOnly: readOnly,
@ -640,11 +640,11 @@ func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, num
} }
} }
} else if framework.TestContext.Provider == "aws" { } else if framework.TestContext.Provider == "aws" {
pod.Spec.Volumes = make([]api.Volume, len(diskNames)) pod.Spec.Volumes = make([]v1.Volume, len(diskNames))
for k, diskName := range diskNames { for k, diskName := range diskNames {
pod.Spec.Volumes[k].Name = fmt.Sprintf("testpd%v", k+1) pod.Spec.Volumes[k].Name = fmt.Sprintf("testpd%v", k+1)
pod.Spec.Volumes[k].VolumeSource = api.VolumeSource{ pod.Spec.Volumes[k].VolumeSource = v1.VolumeSource{
AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{ AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: diskName, VolumeID: diskName,
FSType: "ext4", FSType: "ext4",
ReadOnly: readOnly, ReadOnly: readOnly,
@ -711,7 +711,7 @@ func detachAndDeletePDs(diskName string, hosts []types.NodeName) {
} }
func waitForPDInVolumesInUse( func waitForPDInVolumesInUse(
nodeClient unversionedcore.NodeInterface, nodeClient v1core.NodeInterface,
diskName string, diskName string,
nodeName types.NodeName, nodeName types.NodeName,
timeout time.Duration, timeout time.Duration,

View File

@ -22,12 +22,12 @@ import (
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors" apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper" "k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -97,7 +97,7 @@ func pvPvcCleanup(c clientset.Interface, ns string, pvols pvmap, claims pvcmap)
// Delete the PVC and wait for the PV to become Available again. Validate that the PV // Delete the PVC and wait for the PV to become Available again. Validate that the PV
// has recycled (assumption here about reclaimPolicy). Caller tells this func which // has recycled (assumption here about reclaimPolicy). Caller tells this func which
// phase value to expect for the pv bound to the to-be-deleted claim. // phase value to expect for the pv bound to the to-be-deleted claim.
func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *api.PersistentVolumeClaim, pv *api.PersistentVolume, expctPVPhase api.PersistentVolumePhase) { func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expctPVPhase v1.PersistentVolumePhase) {
pvname := pvc.Spec.VolumeName pvname := pvc.Spec.VolumeName
framework.Logf("Deleting PVC %v to trigger recycling of PV %v", pvc.Name, pvname) framework.Logf("Deleting PVC %v to trigger recycling of PV %v", pvc.Name, pvname)
@ -118,11 +118,11 @@ func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *api.Persisten
pv, err = c.Core().PersistentVolumes().Get(pv.Name) pv, err = c.Core().PersistentVolumes().Get(pv.Name)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
cr := pv.Spec.ClaimRef cr := pv.Spec.ClaimRef
if expctPVPhase == api.VolumeAvailable { if expctPVPhase == v1.VolumeAvailable {
if cr != nil { // may be ok if cr != nil if cr != nil { // may be ok if cr != nil
Expect(len(cr.UID)).To(BeZero()) Expect(len(cr.UID)).To(BeZero())
} }
} else if expctPVPhase == api.VolumeBound { } else if expctPVPhase == v1.VolumeBound {
Expect(cr).NotTo(BeNil()) Expect(cr).NotTo(BeNil())
Expect(len(cr.UID)).NotTo(BeZero()) Expect(len(cr.UID)).NotTo(BeZero())
} }
@ -137,7 +137,7 @@ func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *api.Persisten
func deletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols pvmap, claims pvcmap) { func deletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols pvmap, claims pvcmap) {
var boundPVs, deletedPVCs int var boundPVs, deletedPVCs int
var expctPVPhase api.PersistentVolumePhase var expctPVPhase v1.PersistentVolumePhase
for pvName := range pvols { for pvName := range pvols {
pv, err := c.Core().PersistentVolumes().Get(pvName) pv, err := c.Core().PersistentVolumes().Get(pvName)
@ -156,11 +156,11 @@ func deletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols pvmap,
// what Phase do we expect the PV that was bound to the claim to // what Phase do we expect the PV that was bound to the claim to
// be in after that claim is deleted? // be in after that claim is deleted?
expctPVPhase = api.VolumeAvailable expctPVPhase = v1.VolumeAvailable
if len(claims) > len(pvols) { if len(claims) > len(pvols) {
// there are excess pvcs so expect the previously bound // there are excess pvcs so expect the previously bound
// PV to become bound again // PV to become bound again
expctPVPhase = api.VolumeBound expctPVPhase = v1.VolumeBound
} }
deletePVCandValidatePV(c, ns, pvc, pv, expctPVPhase) deletePVCandValidatePV(c, ns, pvc, pv, expctPVPhase)
@ -172,7 +172,7 @@ func deletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols pvmap,
} }
// create the PV resource. Fails test on error. // create the PV resource. Fails test on error.
func createPV(c clientset.Interface, pv *api.PersistentVolume) *api.PersistentVolume { func createPV(c clientset.Interface, pv *v1.PersistentVolume) *v1.PersistentVolume {
pv, err := c.Core().PersistentVolumes().Create(pv) pv, err := c.Core().PersistentVolumes().Create(pv)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -180,7 +180,7 @@ func createPV(c clientset.Interface, pv *api.PersistentVolume) *api.PersistentVo
} }
// create the PVC resource. Fails test on error. // create the PVC resource. Fails test on error.
func createPVC(c clientset.Interface, ns string, pvc *api.PersistentVolumeClaim) *api.PersistentVolumeClaim { func createPVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
pvc, err := c.Core().PersistentVolumeClaims(ns).Create(pvc) pvc, err := c.Core().PersistentVolumeClaims(ns).Create(pvc)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -193,9 +193,9 @@ func createPVC(c clientset.Interface, ns string, pvc *api.PersistentVolumeClaim)
// Note: in the pre-bind case the real PVC name, which is generated, is not // Note: in the pre-bind case the real PVC name, which is generated, is not
// known until after the PVC is instantiated. This is why the pvc is created // known until after the PVC is instantiated. This is why the pvc is created
// before the pv. // before the pv.
func createPVCPV(c clientset.Interface, serverIP, ns string, preBind bool) (*api.PersistentVolume, *api.PersistentVolumeClaim) { func createPVCPV(c clientset.Interface, serverIP, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
var bindTo *api.PersistentVolumeClaim var bindTo *v1.PersistentVolumeClaim
var preBindMsg string var preBindMsg string
// make the pvc definition first // make the pvc definition first
@ -227,7 +227,7 @@ func createPVCPV(c clientset.Interface, serverIP, ns string, preBind bool) (*api
// Note: in the pre-bind case the real PV name, which is generated, is not // Note: in the pre-bind case the real PV name, which is generated, is not
// known until after the PV is instantiated. This is why the pv is created // known until after the PV is instantiated. This is why the pv is created
// before the pvc. // before the pvc.
func createPVPVC(c clientset.Interface, serverIP, ns string, preBind bool) (*api.PersistentVolume, *api.PersistentVolumeClaim) { func createPVPVC(c clientset.Interface, serverIP, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
preBindMsg := "" preBindMsg := ""
if preBind { if preBind {
@ -256,8 +256,8 @@ func createPVPVC(c clientset.Interface, serverIP, ns string, preBind bool) (*api
func createPVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns, serverIP string) (pvmap, pvcmap) { func createPVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns, serverIP string) (pvmap, pvcmap) {
var i int var i int
var pv *api.PersistentVolume var pv *v1.PersistentVolume
var pvc *api.PersistentVolumeClaim var pvc *v1.PersistentVolumeClaim
pvMap := make(pvmap, numpvs) pvMap := make(pvmap, numpvs)
pvcMap := make(pvcmap, numpvcs) pvcMap := make(pvcmap, numpvcs)
@ -292,16 +292,16 @@ func createPVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns, serverIP stri
} }
// Wait for the pv and pvc to bind to each other. // Wait for the pv and pvc to bind to each other.
func waitOnPVandPVC(c clientset.Interface, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) { func waitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
// Wait for newly created PVC to bind to the PV // Wait for newly created PVC to bind to the PV
framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name) framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
err := framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second) err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be // Wait for PersistentVolume.Status.Phase to be Bound, which it should be
// since the PVC is already bound. // since the PVC is already bound.
err = framework.WaitForPersistentVolumePhase(api.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second) err = framework.WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Re-get the pv and pvc objects // Re-get the pv and pvc objects
@ -335,7 +335,7 @@ func waitAndVerifyBinds(c clientset.Interface, ns string, pvols pvmap, claims pv
} }
for pvName := range pvols { for pvName := range pvols {
err := framework.WaitForPersistentVolumePhase(api.VolumeBound, c, pvName, 3*time.Second, 180*time.Second) err := framework.WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, 3*time.Second, 180*time.Second)
if err != nil && len(pvols) > len(claims) { if err != nil && len(pvols) > len(claims) {
framework.Logf("WARN: pv %v is not bound after max wait", pvName) framework.Logf("WARN: pv %v is not bound after max wait", pvName)
framework.Logf(" This may be ok since there are more pvs than pvcs") framework.Logf(" This may be ok since there are more pvs than pvcs")
@ -352,7 +352,7 @@ func waitAndVerifyBinds(c clientset.Interface, ns string, pvols pvmap, claims pv
_, found := claims[pvcKey] _, found := claims[pvcKey]
Expect(found).To(BeTrue()) Expect(found).To(BeTrue())
err = framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, cr.Name, 3*time.Second, 180*time.Second) err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, 3*time.Second, 180*time.Second)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
actualBinds++ actualBinds++
} }
@ -364,7 +364,7 @@ func waitAndVerifyBinds(c clientset.Interface, ns string, pvols pvmap, claims pv
} }
// Test the pod's exit code to be zero. // Test the pod's exit code to be zero.
func testPodSuccessOrFail(c clientset.Interface, ns string, pod *api.Pod) { func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) {
By("Pod should terminate with exitcode 0 (success)") By("Pod should terminate with exitcode 0 (success)")
err := framework.WaitForPodSuccessInNamespace(c, pod.Name, ns) err := framework.WaitForPodSuccessInNamespace(c, pod.Name, ns)
@ -373,7 +373,7 @@ func testPodSuccessOrFail(c clientset.Interface, ns string, pod *api.Pod) {
} }
// Delete the passed in pod. // Delete the passed in pod.
func deletePod(f *framework.Framework, c clientset.Interface, ns string, pod *api.Pod) { func deletePod(f *framework.Framework, c clientset.Interface, ns string, pod *v1.Pod) {
framework.Logf("Deleting pod %v", pod.Name) framework.Logf("Deleting pod %v", pod.Name)
err := c.Core().Pods(ns).Delete(pod.Name, nil) err := c.Core().Pods(ns).Delete(pod.Name, nil)
@ -408,7 +408,7 @@ func createWaitAndDeletePod(f *framework.Framework, c clientset.Interface, ns st
// Validate PV/PVC, create and verify writer pod, delete the PVC, and validate the PV's // Validate PV/PVC, create and verify writer pod, delete the PVC, and validate the PV's
// phase. Note: the PV is deleted in the AfterEach, not here. // phase. Note: the PV is deleted in the AfterEach, not here.
func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) { func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
// 1. verify that the PV and PVC have binded correctly // 1. verify that the PV and PVC have binded correctly
By("Validating the PV-PVC binding") By("Validating the PV-PVC binding")
@ -421,7 +421,7 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *
// 3. delete the PVC, wait for PV to become "Available" // 3. delete the PVC, wait for PV to become "Available"
By("Deleting the PVC to invoke the recycler") By("Deleting the PVC to invoke the recycler")
deletePVCandValidatePV(c, ns, pvc, pv, api.VolumeAvailable) deletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable)
} }
// Validate pairs of PVs and PVCs, create and verify writer pod, delete PVC and validate // Validate pairs of PVs and PVCs, create and verify writer pod, delete PVC and validate
@ -460,11 +460,11 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() {
var ns string var ns string
var NFSconfig VolumeTestConfig var NFSconfig VolumeTestConfig
var serverIP string var serverIP string
var nfsServerPod *api.Pod var nfsServerPod *v1.Pod
// config for the nfs-server pod in the default namespace // config for the nfs-server pod in the default namespace
NFSconfig = VolumeTestConfig{ NFSconfig = VolumeTestConfig{
namespace: api.NamespaceDefault, namespace: v1.NamespaceDefault,
prefix: "nfs", prefix: "nfs",
serverImage: "gcr.io/google_containers/volume-nfs:0.7", serverImage: "gcr.io/google_containers/volume-nfs:0.7",
serverPorts: []int{2049}, serverPorts: []int{2049},
@ -496,8 +496,8 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() {
Context("with Single PV - PVC pairs", func() { Context("with Single PV - PVC pairs", func() {
var pv *api.PersistentVolume var pv *v1.PersistentVolume
var pvc *api.PersistentVolumeClaim var pvc *v1.PersistentVolumeClaim
// Note: this is the only code where the pv is deleted. // Note: this is the only code where the pv is deleted.
AfterEach(func() { AfterEach(func() {
@ -627,41 +627,41 @@ func makePvcKey(ns, name string) types.NamespacedName {
// (instantiated) and thus the PV's ClaimRef cannot be completely filled-in in // (instantiated) and thus the PV's ClaimRef cannot be completely filled-in in
// this func. Therefore, the ClaimRef's name is added later in // this func. Therefore, the ClaimRef's name is added later in
// createPVCPV. // createPVCPV.
func makePersistentVolume(serverIP string, pvc *api.PersistentVolumeClaim) *api.PersistentVolume { func makePersistentVolume(serverIP string, pvc *v1.PersistentVolumeClaim) *v1.PersistentVolume {
// Specs are expected to match this test's PersistentVolumeClaim // Specs are expected to match this test's PersistentVolumeClaim
var claimRef *api.ObjectReference var claimRef *v1.ObjectReference
if pvc != nil { if pvc != nil {
claimRef = &api.ObjectReference{ claimRef = &v1.ObjectReference{
Name: pvc.Name, Name: pvc.Name,
Namespace: pvc.Namespace, Namespace: pvc.Namespace,
} }
} }
return &api.PersistentVolume{ return &v1.PersistentVolume{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
GenerateName: "nfs-", GenerateName: "nfs-",
Annotations: map[string]string{ Annotations: map[string]string{
volumehelper.VolumeGidAnnotationKey: "777", volumehelper.VolumeGidAnnotationKey: "777",
}, },
}, },
Spec: api.PersistentVolumeSpec{ Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRecycle, PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimRecycle,
Capacity: api.ResourceList{ Capacity: v1.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("2Gi"), v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
}, },
PersistentVolumeSource: api.PersistentVolumeSource{ PersistentVolumeSource: v1.PersistentVolumeSource{
NFS: &api.NFSVolumeSource{ NFS: &v1.NFSVolumeSource{
Server: serverIP, Server: serverIP,
Path: "/exports", Path: "/exports",
ReadOnly: false, ReadOnly: false,
}, },
}, },
AccessModes: []api.PersistentVolumeAccessMode{ AccessModes: []v1.PersistentVolumeAccessMode{
api.ReadWriteOnce, v1.ReadWriteOnce,
api.ReadOnlyMany, v1.ReadOnlyMany,
api.ReadWriteMany, v1.ReadWriteMany,
}, },
ClaimRef: claimRef, ClaimRef: claimRef,
}, },
@ -672,23 +672,23 @@ func makePersistentVolume(serverIP string, pvc *api.PersistentVolumeClaim) *api.
// Note: if this PVC is intended to be pre-bound to a PV, whose name is not // Note: if this PVC is intended to be pre-bound to a PV, whose name is not
// known until the PV is instantiated, then the func createPVPVC will add // known until the PV is instantiated, then the func createPVPVC will add
// pvc.Spec.VolumeName to this claim. // pvc.Spec.VolumeName to this claim.
func makePersistentVolumeClaim(ns string) *api.PersistentVolumeClaim { func makePersistentVolumeClaim(ns string) *v1.PersistentVolumeClaim {
// Specs are expected to match this test's PersistentVolume // Specs are expected to match this test's PersistentVolume
return &api.PersistentVolumeClaim{ return &v1.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
GenerateName: "pvc-", GenerateName: "pvc-",
Namespace: ns, Namespace: ns,
}, },
Spec: api.PersistentVolumeClaimSpec{ Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []api.PersistentVolumeAccessMode{ AccessModes: []v1.PersistentVolumeAccessMode{
api.ReadWriteOnce, v1.ReadWriteOnce,
api.ReadOnlyMany, v1.ReadOnlyMany,
api.ReadWriteMany, v1.ReadWriteMany,
}, },
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: api.ResourceList{ Requests: v1.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("1Gi"), v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
}, },
}, },
}, },
@ -697,44 +697,44 @@ func makePersistentVolumeClaim(ns string) *api.PersistentVolumeClaim {
// Returns a pod definition based on the namespace. The pod references the PVC's // Returns a pod definition based on the namespace. The pod references the PVC's
// name. // name.
func makeWritePod(ns string, pvcName string) *api.Pod { func makeWritePod(ns string, pvcName string) *v1.Pod {
// Prepare pod that mounts the NFS volume again and // Prepare pod that mounts the NFS volume again and
// checks that /mnt/index.html was scrubbed there // checks that /mnt/index.html was scrubbed there
var isPrivileged bool = true var isPrivileged bool = true
return &api.Pod{ return &v1.Pod{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "Pod", Kind: "Pod",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
GenerateName: "write-pod-", GenerateName: "write-pod-",
Namespace: ns, Namespace: ns,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "write-pod", Name: "write-pod",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh"}, Command: []string{"/bin/sh"},
Args: []string{"-c", "touch /mnt/SUCCESS && (id -G | grep -E '\\b777\\b')"}, Args: []string{"-c", "touch /mnt/SUCCESS && (id -G | grep -E '\\b777\\b')"},
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: "nfs-pvc", Name: "nfs-pvc",
MountPath: "/mnt", MountPath: "/mnt",
}, },
}, },
SecurityContext: &api.SecurityContext{ SecurityContext: &v1.SecurityContext{
Privileged: &isPrivileged, Privileged: &isPrivileged,
}, },
}, },
}, },
RestartPolicy: api.RestartPolicyOnFailure, RestartPolicy: v1.RestartPolicyOnFailure,
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: "nfs-pvc", Name: "nfs-pvc",
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName, ClaimName: pvcName,
}, },
}, },

View File

@ -32,8 +32,9 @@ import (
apierrs "k8s.io/kubernetes/pkg/api/errors" apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/controller/petset" "k8s.io/kubernetes/pkg/controller/petset"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
klabels "k8s.io/kubernetes/pkg/labels" klabels "k8s.io/kubernetes/pkg/labels"
@ -89,12 +90,12 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
"baz": "blah", "baz": "blah",
} }
headlessSvcName := "test" headlessSvcName := "test"
var petMounts, podMounts []api.VolumeMount var petMounts, podMounts []v1.VolumeMount
var ps *apps.StatefulSet var ps *apps.StatefulSet
BeforeEach(func() { BeforeEach(func() {
petMounts = []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}} petMounts = []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
podMounts = []api.VolumeMount{{Name: "home", MountPath: "/home"}} podMounts = []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
ps = newStatefulSet(psName, ns, headlessSvcName, 2, petMounts, podMounts, labels) ps = newStatefulSet(psName, ns, headlessSvcName, 2, petMounts, podMounts, labels)
By("Creating service " + headlessSvcName + " in namespace " + ns) By("Creating service " + headlessSvcName + " in namespace " + ns)
@ -113,7 +114,7 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
It("should provide basic identity", func() { It("should provide basic identity", func() {
By("Creating statefulset " + psName + " in namespace " + ns) By("Creating statefulset " + psName + " in namespace " + ns)
ps.Spec.Replicas = 3 *(ps.Spec.Replicas) = 3
setInitializedAnnotation(ps, "false") setInitializedAnnotation(ps, "false")
_, err := c.Apps().StatefulSets(ns).Create(ps) _, err := c.Apps().StatefulSets(ns).Create(ps)
@ -148,7 +149,7 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
It("should handle healthy pet restarts during scale", func() { It("should handle healthy pet restarts during scale", func() {
By("Creating statefulset " + psName + " in namespace " + ns) By("Creating statefulset " + psName + " in namespace " + ns)
ps.Spec.Replicas = 2 *(ps.Spec.Replicas) = 2
setInitializedAnnotation(ps, "false") setInitializedAnnotation(ps, "false")
_, err := c.Apps().StatefulSets(ns).Create(ps) _, err := c.Apps().StatefulSets(ns).Create(ps)
@ -183,14 +184,14 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
It("should allow template updates", func() { It("should allow template updates", func() {
By("Creating stateful set " + psName + " in namespace " + ns) By("Creating stateful set " + psName + " in namespace " + ns)
ps.Spec.Replicas = 2 *(ps.Spec.Replicas) = 2
ps, err := c.Apps().StatefulSets(ns).Create(ps) ps, err := c.Apps().StatefulSets(ns).Create(ps)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
pst := statefulSetTester{c: c} pst := statefulSetTester{c: c}
pst.waitForRunningAndReady(ps.Spec.Replicas, ps) pst.waitForRunningAndReady(*ps.Spec.Replicas, ps)
newImage := newNginxImage newImage := newNginxImage
oldImage := ps.Spec.Template.Spec.Containers[0].Image oldImage := ps.Spec.Template.Spec.Containers[0].Image
@ -206,10 +207,10 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
pst.deletePetAtIndex(updateIndex, ps) pst.deletePetAtIndex(updateIndex, ps)
By("Waiting for all stateful pods to be running again") By("Waiting for all stateful pods to be running again")
pst.waitForRunningAndReady(ps.Spec.Replicas, ps) pst.waitForRunningAndReady(*ps.Spec.Replicas, ps)
By(fmt.Sprintf("Verifying stateful pod at index %d is updated", updateIndex)) By(fmt.Sprintf("Verifying stateful pod at index %d is updated", updateIndex))
verify := func(pod *api.Pod) { verify := func(pod *v1.Pod) {
podImage := pod.Spec.Containers[0].Image podImage := pod.Spec.Containers[0].Image
Expect(podImage).To(Equal(newImage), fmt.Sprintf("Expected stateful pod image %s updated to %s", podImage, newImage)) Expect(podImage).To(Equal(newImage), fmt.Sprintf("Expected stateful pod image %s updated to %s", podImage, newImage))
} }
@ -218,7 +219,7 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
It("Scaling down before scale up is finished should wait until current pod will be running and ready before it will be removed", func() { It("Scaling down before scale up is finished should wait until current pod will be running and ready before it will be removed", func() {
By("Creating stateful set " + psName + " in namespace " + ns + ", and pausing scale operations after each pod") By("Creating stateful set " + psName + " in namespace " + ns + ", and pausing scale operations after each pod")
testProbe := &api.Probe{Handler: api.Handler{HTTPGet: &api.HTTPGetAction{ testProbe := &v1.Probe{Handler: v1.Handler{HTTPGet: &v1.HTTPGetAction{
Path: "/index.html", Path: "/index.html",
Port: intstr.IntOrString{IntVal: 80}}}} Port: intstr.IntOrString{IntVal: 80}}}}
ps := newStatefulSet(psName, ns, headlessSvcName, 1, nil, nil, labels) ps := newStatefulSet(psName, ns, headlessSvcName, 1, nil, nil, labels)
@ -247,8 +248,8 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
expectedPodName := ps.Name + "-1" expectedPodName := ps.Name + "-1"
expectedPod, err := f.ClientSet.Core().Pods(ns).Get(expectedPodName) expectedPod, err := f.ClientSet.Core().Pods(ns).Get(expectedPodName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
watcher, err := f.ClientSet.Core().Pods(ns).Watch(api.SingleObject( watcher, err := f.ClientSet.Core().Pods(ns).Watch(v1.SingleObject(
api.ObjectMeta{ v1.ObjectMeta{
Name: expectedPod.Name, Name: expectedPod.Name,
ResourceVersion: expectedPod.ResourceVersion, ResourceVersion: expectedPod.ResourceVersion,
}, },
@ -258,16 +259,16 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
By("Verifying the 2nd pod is removed only when it becomes running and ready") By("Verifying the 2nd pod is removed only when it becomes running and ready")
pst.restoreProbe(ps, testProbe) pst.restoreProbe(ps, testProbe)
_, err = watch.Until(statefulsetTimeout, watcher, func(event watch.Event) (bool, error) { _, err = watch.Until(statefulsetTimeout, watcher, func(event watch.Event) (bool, error) {
pod := event.Object.(*api.Pod) pod := event.Object.(*v1.Pod)
if event.Type == watch.Deleted && pod.Name == expectedPodName { if event.Type == watch.Deleted && pod.Name == expectedPodName {
return false, fmt.Errorf("Pod %v was deleted before enter running", pod.Name) return false, fmt.Errorf("Pod %v was deleted before enter running", pod.Name)
} }
framework.Logf("Observed event %v for pod %v. Phase %v, Pod is ready %v", framework.Logf("Observed event %v for pod %v. Phase %v, Pod is ready %v",
event.Type, pod.Name, pod.Status.Phase, api.IsPodReady(pod)) event.Type, pod.Name, pod.Status.Phase, v1.IsPodReady(pod))
if pod.Name != expectedPodName { if pod.Name != expectedPodName {
return false, nil return false, nil
} }
if pod.Status.Phase == api.PodRunning && api.IsPodReady(pod) { if pod.Status.Phase == v1.PodRunning && v1.IsPodReady(pod) {
return true, nil return true, nil
} }
return false, nil return false, nil
@ -278,13 +279,13 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
It("Scaling should happen in predictable order and halt if any pet is unhealthy", func() { It("Scaling should happen in predictable order and halt if any pet is unhealthy", func() {
psLabels := klabels.Set(labels) psLabels := klabels.Set(labels)
By("Initializing watcher for selector " + psLabels.String()) By("Initializing watcher for selector " + psLabels.String())
watcher, err := f.ClientSet.Core().Pods(ns).Watch(api.ListOptions{ watcher, err := f.ClientSet.Core().Pods(ns).Watch(v1.ListOptions{
LabelSelector: psLabels.AsSelector(), LabelSelector: psLabels.AsSelector().String(),
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating stateful set " + psName + " in namespace " + ns) By("Creating stateful set " + psName + " in namespace " + ns)
testProbe := &api.Probe{Handler: api.Handler{HTTPGet: &api.HTTPGetAction{ testProbe := &v1.Probe{Handler: v1.Handler{HTTPGet: &v1.HTTPGetAction{
Path: "/index.html", Path: "/index.html",
Port: intstr.IntOrString{IntVal: 80}}}} Port: intstr.IntOrString{IntVal: 80}}}}
ps := newStatefulSet(psName, ns, headlessSvcName, 1, nil, nil, psLabels) ps := newStatefulSet(psName, ns, headlessSvcName, 1, nil, nil, psLabels)
@ -294,11 +295,11 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
By("Waiting until all stateful set " + psName + " replicas will be running in namespace " + ns) By("Waiting until all stateful set " + psName + " replicas will be running in namespace " + ns)
pst := &statefulSetTester{c: c} pst := &statefulSetTester{c: c}
pst.waitForRunningAndReady(ps.Spec.Replicas, ps) pst.waitForRunningAndReady(*ps.Spec.Replicas, ps)
By("Confirming that stateful set scale up will halt with unhealthy pet") By("Confirming that stateful set scale up will halt with unhealthy pet")
pst.breakProbe(ps, testProbe) pst.breakProbe(ps, testProbe)
pst.waitForRunningAndNotReady(ps.Spec.Replicas, ps) pst.waitForRunningAndNotReady(*ps.Spec.Replicas, ps)
pst.updateReplicas(ps, 3) pst.updateReplicas(ps, 3)
pst.confirmPetCount(1, ps, 10*time.Second) pst.confirmPetCount(1, ps, 10*time.Second)
@ -312,7 +313,7 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
if event.Type != watch.Added { if event.Type != watch.Added {
return false, nil return false, nil
} }
pod := event.Object.(*api.Pod) pod := event.Object.(*v1.Pod)
if pod.Name == expectedOrder[0] { if pod.Name == expectedOrder[0] {
expectedOrder = expectedOrder[1:] expectedOrder = expectedOrder[1:]
} }
@ -322,8 +323,8 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Scale down will halt with unhealthy pet") By("Scale down will halt with unhealthy pet")
watcher, err = f.ClientSet.Core().Pods(ns).Watch(api.ListOptions{ watcher, err = f.ClientSet.Core().Pods(ns).Watch(v1.ListOptions{
LabelSelector: psLabels.AsSelector(), LabelSelector: psLabels.AsSelector().String(),
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -342,7 +343,7 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
if event.Type != watch.Deleted { if event.Type != watch.Deleted {
return false, nil return false, nil
} }
pod := event.Object.(*api.Pod) pod := event.Object.(*v1.Pod)
if pod.Name == expectedOrder[0] { if pod.Name == expectedOrder[0] {
expectedOrder = expectedOrder[1:] expectedOrder = expectedOrder[1:]
} }
@ -430,17 +431,17 @@ var _ = framework.KubeDescribe("Stateful Set recreate [Slow]", func() {
node := nodes.Items[0] node := nodes.Items[0]
By("creating pod with conflicting port in namespace " + f.Namespace.Name) By("creating pod with conflicting port in namespace " + f.Namespace.Name)
conflictingPort := api.ContainerPort{HostPort: 21017, ContainerPort: 21017, Name: "conflict"} conflictingPort := v1.ContainerPort{HostPort: 21017, ContainerPort: 21017, Name: "conflict"}
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "nginx", Name: "nginx",
Image: "gcr.io/google_containers/nginx-slim:0.7", Image: "gcr.io/google_containers/nginx-slim:0.7",
Ports: []api.ContainerPort{conflictingPort}, Ports: []v1.ContainerPort{conflictingPort},
}, },
}, },
NodeName: node.Name, NodeName: node.Name,
@ -464,11 +465,11 @@ var _ = framework.KubeDescribe("Stateful Set recreate [Slow]", func() {
var initialPetPodUID types.UID var initialPetPodUID types.UID
By("waiting until pet pod " + petPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name) By("waiting until pet pod " + petPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name)
w, err := f.ClientSet.Core().Pods(f.Namespace.Name).Watch(api.SingleObject(api.ObjectMeta{Name: petPodName})) w, err := f.ClientSet.Core().Pods(f.Namespace.Name).Watch(v1.SingleObject(v1.ObjectMeta{Name: petPodName}))
framework.ExpectNoError(err) framework.ExpectNoError(err)
// we need to get UID from pod in any state and wait until stateful set controller will remove pod atleast once // we need to get UID from pod in any state and wait until stateful set controller will remove pod atleast once
_, err = watch.Until(petPodTimeout, w, func(event watch.Event) (bool, error) { _, err = watch.Until(petPodTimeout, w, func(event watch.Event) (bool, error) {
pod := event.Object.(*api.Pod) pod := event.Object.(*v1.Pod)
switch event.Type { switch event.Type {
case watch.Deleted: case watch.Deleted:
framework.Logf("Observed delete event for pet pod %v in namespace %v", pod.Name, pod.Namespace) framework.Logf("Observed delete event for pet pod %v in namespace %v", pod.Name, pod.Namespace)
@ -487,7 +488,7 @@ var _ = framework.KubeDescribe("Stateful Set recreate [Slow]", func() {
} }
By("removing pod with conflicting port in namespace " + f.Namespace.Name) By("removing pod with conflicting port in namespace " + f.Namespace.Name)
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)) err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0))
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("waiting when pet pod " + petPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state") By("waiting when pet pod " + petPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state")
@ -497,7 +498,7 @@ var _ = framework.KubeDescribe("Stateful Set recreate [Slow]", func() {
if err != nil { if err != nil {
return err return err
} }
if petPod.Status.Phase != api.PodRunning { if petPod.Status.Phase != v1.PodRunning {
return fmt.Errorf("Pod %v is not in running phase: %v", petPod.Name, petPod.Status.Phase) return fmt.Errorf("Pod %v is not in running phase: %v", petPod.Name, petPod.Status.Phase)
} else if petPod.UID == initialPetPodUID { } else if petPod.UID == initialPetPodUID {
return fmt.Errorf("Pod %v wasn't recreated: %v == %v", petPod.Name, petPod.UID, initialPetPodUID) return fmt.Errorf("Pod %v wasn't recreated: %v == %v", petPod.Name, petPod.UID, initialPetPodUID)
@ -508,7 +509,7 @@ var _ = framework.KubeDescribe("Stateful Set recreate [Slow]", func() {
}) })
func dumpDebugInfo(c clientset.Interface, ns string) { func dumpDebugInfo(c clientset.Interface, ns string) {
pl, _ := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: labels.Everything()}) pl, _ := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: labels.Everything().String()})
for _, p := range pl.Items { for _, p := range pl.Items {
desc, _ := framework.RunKubectl("describe", "po", p.Name, fmt.Sprintf("--namespace=%v", ns)) desc, _ := framework.RunKubectl("describe", "po", p.Name, fmt.Sprintf("--namespace=%v", ns))
framework.Logf("\nOutput of kubectl describe %v:\n%v", p.Name, desc) framework.Logf("\nOutput of kubectl describe %v:\n%v", p.Name, desc)
@ -557,7 +558,7 @@ func (c *clusterAppTester) run() {
if restartCluster { if restartCluster {
By("Restarting stateful set " + ps.Name) By("Restarting stateful set " + ps.Name)
c.tester.restart(ps) c.tester.restart(ps)
c.tester.waitForRunningAndReady(ps.Spec.Replicas, ps) c.tester.waitForRunningAndReady(*ps.Spec.Replicas, ps)
} }
} }
@ -746,9 +747,9 @@ func (p *statefulSetTester) createStatefulSet(manifestPath, ns string) *apps.Sta
framework.Logf(fmt.Sprintf("creating " + ps.Name + " service")) framework.Logf(fmt.Sprintf("creating " + ps.Name + " service"))
framework.RunKubectlOrDie("create", "-f", mkpath("service.yaml"), fmt.Sprintf("--namespace=%v", ns)) framework.RunKubectlOrDie("create", "-f", mkpath("service.yaml"), fmt.Sprintf("--namespace=%v", ns))
framework.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ps.Namespace, ps.Name, ps.Spec.Replicas, ps.Spec.Selector)) framework.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ps.Namespace, ps.Name, *(ps.Spec.Replicas), ps.Spec.Selector))
framework.RunKubectlOrDie("create", "-f", mkpath("petset.yaml"), fmt.Sprintf("--namespace=%v", ns)) framework.RunKubectlOrDie("create", "-f", mkpath("petset.yaml"), fmt.Sprintf("--namespace=%v", ns))
p.waitForRunningAndReady(ps.Spec.Replicas, ps) p.waitForRunningAndReady(*ps.Spec.Replicas, ps)
return ps return ps
} }
@ -797,7 +798,7 @@ func (p *statefulSetTester) checkHostname(ps *apps.StatefulSet) error {
func (p *statefulSetTester) saturate(ps *apps.StatefulSet) { func (p *statefulSetTester) saturate(ps *apps.StatefulSet) {
// TODO: Watch events and check that creation timestamps don't overlap // TODO: Watch events and check that creation timestamps don't overlap
var i int32 var i int32
for i = 0; i < ps.Spec.Replicas; i++ { for i = 0; i < *(ps.Spec.Replicas); i++ {
framework.Logf("Waiting for pet at index " + fmt.Sprintf("%v", i+1) + " to enter Running") framework.Logf("Waiting for pet at index " + fmt.Sprintf("%v", i+1) + " to enter Running")
p.waitForRunningAndReady(i+1, ps) p.waitForRunningAndReady(i+1, ps)
framework.Logf("Marking pet at index " + fmt.Sprintf("%v", i) + " healthy") framework.Logf("Marking pet at index " + fmt.Sprintf("%v", i) + " healthy")
@ -808,12 +809,12 @@ func (p *statefulSetTester) saturate(ps *apps.StatefulSet) {
func (p *statefulSetTester) deletePetAtIndex(index int, ps *apps.StatefulSet) { func (p *statefulSetTester) deletePetAtIndex(index int, ps *apps.StatefulSet) {
name := getPodNameAtIndex(index, ps) name := getPodNameAtIndex(index, ps)
noGrace := int64(0) noGrace := int64(0)
if err := p.c.Core().Pods(ps.Namespace).Delete(name, &api.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil { if err := p.c.Core().Pods(ps.Namespace).Delete(name, &v1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil {
framework.Failf("Failed to delete pet %v for StatefulSet %v/%v: %v", name, ps.Namespace, ps.Name, err) framework.Failf("Failed to delete pet %v for StatefulSet %v/%v: %v", name, ps.Namespace, ps.Name, err)
} }
} }
type verifyPodFunc func(*api.Pod) type verifyPodFunc func(*v1.Pod)
func (p *statefulSetTester) verifyPodAtIndex(index int, ps *apps.StatefulSet, verify verifyPodFunc) { func (p *statefulSetTester) verifyPodAtIndex(index int, ps *apps.StatefulSet, verify verifyPodFunc) {
name := getPodNameAtIndex(index, ps) name := getPodNameAtIndex(index, ps)
@ -831,9 +832,9 @@ func getPodNameAtIndex(index int, ps *apps.StatefulSet) string {
func (p *statefulSetTester) scale(ps *apps.StatefulSet, count int32) error { func (p *statefulSetTester) scale(ps *apps.StatefulSet, count int32) error {
name := ps.Name name := ps.Name
ns := ps.Namespace ns := ps.Namespace
p.update(ns, name, func(ps *apps.StatefulSet) { ps.Spec.Replicas = count }) p.update(ns, name, func(ps *apps.StatefulSet) { *(ps.Spec.Replicas) = count })
var petList *api.PodList var petList *v1.PodList
pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
petList = p.getPodList(ps) petList = p.getPodList(ps)
if int32(len(petList.Items)) == count { if int32(len(petList.Items)) == count {
@ -844,8 +845,8 @@ func (p *statefulSetTester) scale(ps *apps.StatefulSet, count int32) error {
if pollErr != nil { if pollErr != nil {
unhealthy := []string{} unhealthy := []string{}
for _, pet := range petList.Items { for _, pet := range petList.Items {
delTs, phase, readiness := pet.DeletionTimestamp, pet.Status.Phase, api.IsPodReady(&pet) delTs, phase, readiness := pet.DeletionTimestamp, pet.Status.Phase, v1.IsPodReady(&pet)
if delTs != nil || phase != api.PodRunning || !readiness { if delTs != nil || phase != v1.PodRunning || !readiness {
unhealthy = append(unhealthy, fmt.Sprintf("%v: deletion %v, phase %v, readiness %v", pet.Name, delTs, phase, readiness)) unhealthy = append(unhealthy, fmt.Sprintf("%v: deletion %v, phase %v, readiness %v", pet.Name, delTs, phase, readiness))
} }
} }
@ -855,13 +856,13 @@ func (p *statefulSetTester) scale(ps *apps.StatefulSet, count int32) error {
} }
func (p *statefulSetTester) updateReplicas(ps *apps.StatefulSet, count int32) { func (p *statefulSetTester) updateReplicas(ps *apps.StatefulSet, count int32) {
p.update(ps.Namespace, ps.Name, func(ps *apps.StatefulSet) { ps.Spec.Replicas = count }) p.update(ps.Namespace, ps.Name, func(ps *apps.StatefulSet) { ps.Spec.Replicas = &count })
} }
func (p *statefulSetTester) restart(ps *apps.StatefulSet) { func (p *statefulSetTester) restart(ps *apps.StatefulSet) {
oldReplicas := ps.Spec.Replicas oldReplicas := *(ps.Spec.Replicas)
ExpectNoError(p.scale(ps, 0)) ExpectNoError(p.scale(ps, 0))
p.update(ps.Namespace, ps.Name, func(ps *apps.StatefulSet) { ps.Spec.Replicas = oldReplicas }) p.update(ps.Namespace, ps.Name, func(ps *apps.StatefulSet) { *(ps.Spec.Replicas) = oldReplicas })
} }
func (p *statefulSetTester) update(ns, name string, update func(ps *apps.StatefulSet)) { func (p *statefulSetTester) update(ns, name string, update func(ps *apps.StatefulSet)) {
@ -882,10 +883,10 @@ func (p *statefulSetTester) update(ns, name string, update func(ps *apps.Statefu
framework.Failf("too many retries draining statefulset %q", name) framework.Failf("too many retries draining statefulset %q", name)
} }
func (p *statefulSetTester) getPodList(ps *apps.StatefulSet) *api.PodList { func (p *statefulSetTester) getPodList(ps *apps.StatefulSet) *v1.PodList {
selector, err := unversioned.LabelSelectorAsSelector(ps.Spec.Selector) selector, err := unversioned.LabelSelectorAsSelector(ps.Spec.Selector)
ExpectNoError(err) ExpectNoError(err)
podList, err := p.c.Core().Pods(ps.Namespace).List(api.ListOptions{LabelSelector: selector}) podList, err := p.c.Core().Pods(ps.Namespace).List(v1.ListOptions{LabelSelector: selector.String()})
ExpectNoError(err) ExpectNoError(err)
return podList return podList
} }
@ -916,10 +917,10 @@ func (p *statefulSetTester) waitForRunning(numPets int32, ps *apps.StatefulSet,
return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPets, len(podList.Items)) return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPets, len(podList.Items))
} }
for _, p := range podList.Items { for _, p := range podList.Items {
isReady := api.IsPodReady(&p) isReady := v1.IsPodReady(&p)
desiredReadiness := shouldBeReady == isReady desiredReadiness := shouldBeReady == isReady
framework.Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, api.PodRunning, shouldBeReady, p.Status.Phase, isReady) framework.Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, v1.PodRunning, shouldBeReady, p.Status.Phase, isReady)
if p.Status.Phase != api.PodRunning || !desiredReadiness { if p.Status.Phase != v1.PodRunning || !desiredReadiness {
return false, nil return false, nil
} }
} }
@ -938,7 +939,7 @@ func (p *statefulSetTester) waitForRunningAndNotReady(numPets int32, ps *apps.St
p.waitForRunning(numPets, ps, false) p.waitForRunning(numPets, ps, false)
} }
func (p *statefulSetTester) breakProbe(ps *apps.StatefulSet, probe *api.Probe) error { func (p *statefulSetTester) breakProbe(ps *apps.StatefulSet, probe *v1.Probe) error {
path := probe.HTTPGet.Path path := probe.HTTPGet.Path
if path == "" { if path == "" {
return fmt.Errorf("Path expected to be not empty: %v", path) return fmt.Errorf("Path expected to be not empty: %v", path)
@ -947,7 +948,7 @@ func (p *statefulSetTester) breakProbe(ps *apps.StatefulSet, probe *api.Probe) e
return p.execInPets(ps, cmd) return p.execInPets(ps, cmd)
} }
func (p *statefulSetTester) restoreProbe(ps *apps.StatefulSet, probe *api.Probe) error { func (p *statefulSetTester) restoreProbe(ps *apps.StatefulSet, probe *v1.Probe) error {
path := probe.HTTPGet.Path path := probe.HTTPGet.Path
if path == "" { if path == "" {
return fmt.Errorf("Path expected to be not empty: %v", path) return fmt.Errorf("Path expected to be not empty: %v", path)
@ -960,7 +961,7 @@ func (p *statefulSetTester) setHealthy(ps *apps.StatefulSet) {
podList := p.getPodList(ps) podList := p.getPodList(ps)
markedHealthyPod := "" markedHealthyPod := ""
for _, pod := range podList.Items { for _, pod := range podList.Items {
if pod.Status.Phase != api.PodRunning { if pod.Status.Phase != v1.PodRunning {
framework.Failf("Found pod in %v cannot set health", pod.Status.Phase) framework.Failf("Found pod in %v cannot set health", pod.Status.Phase)
} }
if isInitialized(pod) { if isInitialized(pod) {
@ -969,7 +970,7 @@ func (p *statefulSetTester) setHealthy(ps *apps.StatefulSet) {
if markedHealthyPod != "" { if markedHealthyPod != "" {
framework.Failf("Found multiple non-healthy pets: %v and %v", pod.Name, markedHealthyPod) framework.Failf("Found multiple non-healthy pets: %v and %v", pod.Name, markedHealthyPod)
} }
p, err := framework.UpdatePodWithRetries(p.c, pod.Namespace, pod.Name, func(up *api.Pod) { p, err := framework.UpdatePodWithRetries(p.c, pod.Namespace, pod.Name, func(up *v1.Pod) {
up.Annotations[petset.StatefulSetInitAnnotation] = "true" up.Annotations[petset.StatefulSetInitAnnotation] = "true"
}) })
ExpectNoError(err) ExpectNoError(err)
@ -1001,7 +1002,7 @@ func (p *statefulSetTester) waitForStatus(ps *apps.StatefulSet, expectedReplicas
func deleteAllStatefulSets(c clientset.Interface, ns string) { func deleteAllStatefulSets(c clientset.Interface, ns string) {
pst := &statefulSetTester{c: c} pst := &statefulSetTester{c: c}
psList, err := c.Apps().StatefulSets(ns).List(api.ListOptions{LabelSelector: labels.Everything()}) psList, err := c.Apps().StatefulSets(ns).List(v1.ListOptions{LabelSelector: labels.Everything().String()})
ExpectNoError(err) ExpectNoError(err)
// Scale down each statefulset, then delete it completely. // Scale down each statefulset, then delete it completely.
@ -1023,7 +1024,7 @@ func deleteAllStatefulSets(c clientset.Interface, ns string) {
pvNames := sets.NewString() pvNames := sets.NewString()
// TODO: Don't assume all pvcs in the ns belong to a statefulset // TODO: Don't assume all pvcs in the ns belong to a statefulset
pvcPollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { pvcPollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
pvcList, err := c.Core().PersistentVolumeClaims(ns).List(api.ListOptions{LabelSelector: labels.Everything()}) pvcList, err := c.Core().PersistentVolumeClaims(ns).List(v1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil { if err != nil {
framework.Logf("WARNING: Failed to list pvcs, retrying %v", err) framework.Logf("WARNING: Failed to list pvcs, retrying %v", err)
return false, nil return false, nil
@ -1043,7 +1044,7 @@ func deleteAllStatefulSets(c clientset.Interface, ns string) {
} }
pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) { pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
pvList, err := c.Core().PersistentVolumes().List(api.ListOptions{LabelSelector: labels.Everything()}) pvList, err := c.Core().PersistentVolumes().List(v1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil { if err != nil {
framework.Logf("WARNING: Failed to list pvs, retrying %v", err) framework.Logf("WARNING: Failed to list pvs, retrying %v", err)
return false, nil return false, nil
@ -1089,7 +1090,7 @@ func pollReadWithTimeout(pet petTester, petNumber int, key, expectedVal string)
return err return err
} }
func isInitialized(pod api.Pod) bool { func isInitialized(pod v1.Pod) bool {
initialized, ok := pod.Annotations[petset.StatefulSetInitAnnotation] initialized, ok := pod.Annotations[petset.StatefulSetInitAnnotation]
if !ok { if !ok {
return false return false
@ -1105,40 +1106,40 @@ func dec(i int64, exponent int) *inf.Dec {
return inf.NewDec(i, inf.Scale(-exponent)) return inf.NewDec(i, inf.Scale(-exponent))
} }
func newPVC(name string) api.PersistentVolumeClaim { func newPVC(name string) v1.PersistentVolumeClaim {
return api.PersistentVolumeClaim{ return v1.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Annotations: map[string]string{ Annotations: map[string]string{
"volume.alpha.kubernetes.io/storage-class": "anything", "volume.alpha.kubernetes.io/storage-class": "anything",
}, },
}, },
Spec: api.PersistentVolumeClaimSpec{ Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []api.PersistentVolumeAccessMode{ AccessModes: []v1.PersistentVolumeAccessMode{
api.ReadWriteOnce, v1.ReadWriteOnce,
}, },
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: api.ResourceList{ Requests: v1.ResourceList{
api.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI), v1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI),
}, },
}, },
}, },
} }
} }
func newStatefulSet(name, ns, governingSvcName string, replicas int32, petMounts []api.VolumeMount, podMounts []api.VolumeMount, labels map[string]string) *apps.StatefulSet { func newStatefulSet(name, ns, governingSvcName string, replicas int32, petMounts []v1.VolumeMount, podMounts []v1.VolumeMount, labels map[string]string) *apps.StatefulSet {
mounts := append(petMounts, podMounts...) mounts := append(petMounts, podMounts...)
claims := []api.PersistentVolumeClaim{} claims := []v1.PersistentVolumeClaim{}
for _, m := range petMounts { for _, m := range petMounts {
claims = append(claims, newPVC(m.Name)) claims = append(claims, newPVC(m.Name))
} }
vols := []api.Volume{} vols := []v1.Volume{}
for _, m := range podMounts { for _, m := range podMounts {
vols = append(vols, api.Volume{ vols = append(vols, v1.Volume{
Name: m.Name, Name: m.Name,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
HostPath: &api.HostPathVolumeSource{ HostPath: &v1.HostPathVolumeSource{
Path: fmt.Sprintf("/tmp/%v", m.Name), Path: fmt.Sprintf("/tmp/%v", m.Name),
}, },
}, },
@ -1150,7 +1151,7 @@ func newStatefulSet(name, ns, governingSvcName string, replicas int32, petMounts
Kind: "StatefulSet", Kind: "StatefulSet",
APIVersion: "apps/v1beta1", APIVersion: "apps/v1beta1",
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Namespace: ns, Namespace: ns,
}, },
@ -1158,14 +1159,14 @@ func newStatefulSet(name, ns, governingSvcName string, replicas int32, petMounts
Selector: &unversioned.LabelSelector{ Selector: &unversioned.LabelSelector{
MatchLabels: labels, MatchLabels: labels,
}, },
Replicas: replicas, Replicas: func(i int32) *int32 { return &i }(replicas),
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: labels, Labels: labels,
Annotations: map[string]string{}, Annotations: map[string]string{},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "nginx", Name: "nginx",
Image: nginxImage, Image: nginxImage,

View File

@ -22,7 +22,7 @@ import (
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -38,7 +38,7 @@ var _ = framework.KubeDescribe("Pod garbage collector [Feature:PodGarbageCollect
for count < 1000 { for count < 1000 {
pod, err := createTerminatingPod(f) pod, err := createTerminatingPod(f)
pod.ResourceVersion = "" pod.ResourceVersion = ""
pod.Status.Phase = api.PodFailed pod.Status.Phase = v1.PodFailed
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).UpdateStatus(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).UpdateStatus(pod)
if err != nil { if err != nil {
framework.Failf("err failing pod: %v", err) framework.Failf("err failing pod: %v", err)
@ -55,13 +55,13 @@ var _ = framework.KubeDescribe("Pod garbage collector [Feature:PodGarbageCollect
// The gc controller polls every 30s and fires off a goroutine per // The gc controller polls every 30s and fires off a goroutine per
// pod to terminate. // pod to terminate.
var err error var err error
var pods *api.PodList var pods *v1.PodList
timeout := 2 * time.Minute timeout := 2 * time.Minute
gcThreshold := 100 gcThreshold := 100
By(fmt.Sprintf("Waiting for gc controller to gc all but %d pods", gcThreshold)) By(fmt.Sprintf("Waiting for gc controller to gc all but %d pods", gcThreshold))
pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) { pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) {
pods, err = f.ClientSet.Core().Pods(f.Namespace.Name).List(api.ListOptions{}) pods, err = f.ClientSet.Core().Pods(f.Namespace.Name).List(v1.ListOptions{})
if err != nil { if err != nil {
framework.Logf("Failed to list pod %v", err) framework.Logf("Failed to list pod %v", err)
return false, nil return false, nil
@ -78,17 +78,17 @@ var _ = framework.KubeDescribe("Pod garbage collector [Feature:PodGarbageCollect
}) })
}) })
func createTerminatingPod(f *framework.Framework) (*api.Pod, error) { func createTerminatingPod(f *framework.Framework) (*v1.Pod, error) {
uuid := uuid.NewUUID() uuid := uuid.NewUUID()
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: string(uuid), Name: string(uuid),
Annotations: map[string]string{ Annotations: map[string]string{
"scheduler.alpha.kubernetes.io/name": "please don't schedule my pods", "scheduler.alpha.kubernetes.io/name": "please don't schedule my pods",
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: string(uuid), Name: string(uuid),
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",

View File

@ -24,7 +24,7 @@ import (
"strconv" "strconv"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -45,16 +45,16 @@ var _ = framework.KubeDescribe("Pods Delete Grace Period", func() {
By("creating the pod") By("creating the pod")
name := "pod-submit-remove-" + string(uuid.NewUUID()) name := "pod-submit-remove-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond()) value := strconv.Itoa(time.Now().Nanosecond())
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Labels: map[string]string{ Labels: map[string]string{
"name": "foo", "name": "foo",
"time": value, "time": value,
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "nginx", Name: "nginx",
Image: "gcr.io/google_containers/nginx-slim:0.7", Image: "gcr.io/google_containers/nginx-slim:0.7",
@ -65,12 +65,12 @@ var _ = framework.KubeDescribe("Pods Delete Grace Period", func() {
By("setting up watch") By("setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options := api.ListOptions{LabelSelector: selector} options := v1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options) pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pod") Expect(err).NotTo(HaveOccurred(), "failed to query for pod")
Expect(len(pods.Items)).To(Equal(0)) Expect(len(pods.Items)).To(Equal(0))
options = api.ListOptions{ options = v1.ListOptions{
LabelSelector: selector, LabelSelector: selector.String(),
ResourceVersion: pods.ListMeta.ResourceVersion, ResourceVersion: pods.ListMeta.ResourceVersion,
} }
w, err := podClient.Watch(options) w, err := podClient.Watch(options)
@ -81,7 +81,7 @@ var _ = framework.KubeDescribe("Pods Delete Grace Period", func() {
By("verifying the pod is in kubernetes") By("verifying the pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = api.ListOptions{LabelSelector: selector} options = v1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options) pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pod") Expect(err).NotTo(HaveOccurred(), "failed to query for pod")
Expect(len(pods.Items)).To(Equal(1)) Expect(len(pods.Items)).To(Equal(1))
@ -159,13 +159,13 @@ var _ = framework.KubeDescribe("Pods Delete Grace Period", func() {
By("verifying pod deletion was observed") By("verifying pod deletion was observed")
deleted := false deleted := false
timeout := false timeout := false
var lastPod *api.Pod var lastPod *v1.Pod
timer := time.After(30 * time.Second) timer := time.After(30 * time.Second)
for !deleted && !timeout { for !deleted && !timeout {
select { select {
case event, _ := <-w.ResultChan(): case event, _ := <-w.ResultChan():
if event.Type == watch.Deleted { if event.Type == watch.Deleted {
lastPod = event.Object.(*api.Pod) lastPod = event.Object.(*v1.Pod)
deleted = true deleted = true
} }
case <-timer: case <-timer:
@ -180,7 +180,7 @@ var _ = framework.KubeDescribe("Pods Delete Grace Period", func() {
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero()) Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero())
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value})) selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
options = api.ListOptions{LabelSelector: selector} options = v1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options) pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods") Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(0)) Expect(len(pods.Items)).To(Equal(0))

View File

@ -28,7 +28,7 @@ import (
"syscall" "syscall"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -46,18 +46,18 @@ var (
portForwardPortToStdOutV = version.MustParse("v1.3.0-alpha.4") portForwardPortToStdOutV = version.MustParse("v1.3.0-alpha.4")
) )
func pfPod(expectedClientData, chunks, chunkSize, chunkIntervalMillis string) *api.Pod { func pfPod(expectedClientData, chunks, chunkSize, chunkIntervalMillis string) *v1.Pod {
return &api.Pod{ return &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Labels: map[string]string{"name": podName}, Labels: map[string]string{"name": podName},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "portforwardtester", Name: "portforwardtester",
Image: "gcr.io/google_containers/portforwardtester:1.0", Image: "gcr.io/google_containers/portforwardtester:1.0",
Env: []api.EnvVar{ Env: []v1.EnvVar{
{ {
Name: "BIND_PORT", Name: "BIND_PORT",
Value: "80", Value: "80",
@ -81,7 +81,7 @@ func pfPod(expectedClientData, chunks, chunkSize, chunkIntervalMillis string) *a
}, },
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
} }

View File

@ -21,8 +21,8 @@ import (
"fmt" "fmt"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -36,16 +36,16 @@ type State struct {
func testPreStop(c clientset.Interface, ns string) { func testPreStop(c clientset.Interface, ns string) {
// This is the server that will receive the preStop notification // This is the server that will receive the preStop notification
podDescr := &api.Pod{ podDescr := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "server", Name: "server",
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "server", Name: "server",
Image: "gcr.io/google_containers/nettest:1.7", Image: "gcr.io/google_containers/nettest:1.7",
Ports: []api.ContainerPort{{ContainerPort: 8080}}, Ports: []v1.ContainerPort{{ContainerPort: 8080}},
}, },
}, },
}, },
@ -69,19 +69,19 @@ func testPreStop(c clientset.Interface, ns string) {
podOut, err := c.Core().Pods(ns).Get(podDescr.Name) podOut, err := c.Core().Pods(ns).Get(podDescr.Name)
framework.ExpectNoError(err, "getting pod info") framework.ExpectNoError(err, "getting pod info")
preStopDescr := &api.Pod{ preStopDescr := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "tester", Name: "tester",
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "tester", Name: "tester",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sleep", "600"}, Command: []string{"sleep", "600"},
Lifecycle: &api.Lifecycle{ Lifecycle: &v1.Lifecycle{
PreStop: &api.Handler{ PreStop: &v1.Handler{
Exec: &api.ExecAction{ Exec: &v1.ExecAction{
Command: []string{ Command: []string{
"wget", "-O-", "--post-data=" + val, fmt.Sprintf("http://%s:8080/write", podOut.Status.PodIP), "wget", "-O-", "--post-data=" + val, fmt.Sprintf("http://%s:8080/write", podOut.Status.PodIP),
}, },

View File

@ -24,10 +24,10 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/net" "k8s.io/kubernetes/pkg/util/net"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -49,7 +49,7 @@ const (
) )
var _ = framework.KubeDescribe("Proxy", func() { var _ = framework.KubeDescribe("Proxy", func() {
version := registered.GroupOrDie(api.GroupName).GroupVersion.Version version := registered.GroupOrDie(v1.GroupName).GroupVersion.Version
Context("version "+version, func() { Context("version "+version, func() {
options := framework.FrameworkOptions{ options := framework.FrameworkOptions{
ClientQPS: -1.0, ClientQPS: -1.0,
@ -71,13 +71,13 @@ var _ = framework.KubeDescribe("Proxy", func() {
It("should proxy through a service and a pod [Conformance]", func() { It("should proxy through a service and a pod [Conformance]", func() {
start := time.Now() start := time.Now()
labels := map[string]string{"proxy-service-target": "true"} labels := map[string]string{"proxy-service-target": "true"}
service, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(&api.Service{ service, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(&v1.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
GenerateName: "proxy-service-", GenerateName: "proxy-service-",
}, },
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Selector: labels, Selector: labels,
Ports: []api.ServicePort{ Ports: []v1.ServicePort{
{ {
Name: "portname1", Name: "portname1",
Port: 80, Port: 80,
@ -107,14 +107,15 @@ var _ = framework.KubeDescribe("Proxy", func() {
// a simple server which serves the values of the // a simple server which serves the values of the
// environmental variables below. // environmental variables below.
By("starting an echo server on multiple ports") By("starting an echo server on multiple ports")
pods := []*api.Pod{} pods := []*v1.Pod{}
cfg := testutils.RCConfig{ cfg := testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
Image: "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab", InternalClient: f.InternalClientset,
Name: service.Name, Image: "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab",
Namespace: f.Namespace.Name, Name: service.Name,
Replicas: 1, Namespace: f.Namespace.Name,
PollInterval: time.Second, Replicas: 1,
PollInterval: time.Second,
Env: map[string]string{ Env: map[string]string{
"SERVE_PORT_80": `<a href="/rewriteme">test</a>`, "SERVE_PORT_80": `<a href="/rewriteme">test</a>`,
"SERVE_PORT_1080": `<a href="/rewriteme">test</a>`, "SERVE_PORT_1080": `<a href="/rewriteme">test</a>`,
@ -132,9 +133,9 @@ var _ = framework.KubeDescribe("Proxy", func() {
"tlsdest1": 460, "tlsdest1": 460,
"tlsdest2": 462, "tlsdest2": 462,
}, },
ReadinessProbe: &api.Probe{ ReadinessProbe: &v1.Probe{
Handler: api.Handler{ Handler: v1.Handler{
HTTPGet: &api.HTTPGetAction{ HTTPGet: &v1.HTTPGetAction{
Port: intstr.FromInt(80), Port: intstr.FromInt(80),
}, },
}, },
@ -146,7 +147,7 @@ var _ = framework.KubeDescribe("Proxy", func() {
CreatedPods: &pods, CreatedPods: &pods,
} }
Expect(framework.RunRC(cfg)).NotTo(HaveOccurred()) Expect(framework.RunRC(cfg)).NotTo(HaveOccurred())
defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, cfg.Name) defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, cfg.Name)
Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred()) Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred())
@ -260,7 +261,7 @@ var _ = framework.KubeDescribe("Proxy", func() {
} }
if len(errs) != 0 { if len(errs) != 0 {
body, err := f.ClientSet.Core().Pods(f.Namespace.Name).GetLogs(pods[0].Name, &api.PodLogOptions{}).Do().Raw() body, err := f.ClientSet.Core().Pods(f.Namespace.Name).GetLogs(pods[0].Name, &v1.PodLogOptions{}).Do().Raw()
if err != nil { if err != nil {
framework.Logf("Error getting logs for pod %s: %v", pods[0].Name, err) framework.Logf("Error getting logs for pod %s: %v", pods[0].Name, err)
} else { } else {

View File

@ -20,8 +20,8 @@ import (
"fmt" "fmt"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/controller/replication" "k8s.io/kubernetes/pkg/controller/replication"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
@ -51,21 +51,21 @@ var _ = framework.KubeDescribe("ReplicationController", func() {
}) })
}) })
func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageName string, image string) *api.ReplicationController { func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageName string, image string) *v1.ReplicationController {
zero := int64(0) zero := int64(0)
return &api.ReplicationController{ return &v1.ReplicationController{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: rsName, Name: rsName,
}, },
Spec: api.ReplicationControllerSpec{ Spec: v1.ReplicationControllerSpec{
Replicas: replicas, Replicas: func(i int32) *int32 { return &i }(replicas),
Template: &api.PodTemplateSpec{ Template: &v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: rcPodLabels, Labels: rcPodLabels,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero, TerminationGracePeriodSeconds: &zero,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: imageName, Name: imageName,
Image: image, Image: image,
@ -89,25 +89,25 @@ func ServeImageOrFail(f *framework.Framework, test string, image string) {
// The source for the Docker containter kubernetes/serve_hostname is // The source for the Docker containter kubernetes/serve_hostname is
// in contrib/for-demos/serve_hostname // in contrib/for-demos/serve_hostname
By(fmt.Sprintf("Creating replication controller %s", name)) By(fmt.Sprintf("Creating replication controller %s", name))
controller, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{ controller, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(&v1.ReplicationController{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.ReplicationControllerSpec{ Spec: v1.ReplicationControllerSpec{
Replicas: replicas, Replicas: func(i int32) *int32 { return &i }(replicas),
Selector: map[string]string{ Selector: map[string]string{
"name": name, "name": name,
}, },
Template: &api.PodTemplateSpec{ Template: &v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"name": name}, Labels: map[string]string{"name": name},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: name, Name: name,
Image: image, Image: image,
Ports: []api.ContainerPort{{ContainerPort: 9376}}, Ports: []v1.ContainerPort{{ContainerPort: 9376}},
}, },
}, },
}, },
@ -118,7 +118,7 @@ func ServeImageOrFail(f *framework.Framework, test string, image string) {
// Cleanup the replication controller when we are done. // Cleanup the replication controller when we are done.
defer func() { defer func() {
// Resize the replication controller to zero to get rid of pods. // Resize the replication controller to zero to get rid of pods.
if err := framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, controller.Name); err != nil { if err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, controller.Name); err != nil {
framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
} }
}() }()
@ -169,7 +169,7 @@ func rcConditionCheck(f *framework.Framework) {
if err != nil { if err != nil {
return false, err return false, err
} }
podQuota := quota.Status.Hard[api.ResourcePods] podQuota := quota.Status.Hard[v1.ResourcePods]
quantity := resource.MustParse("2") quantity := resource.MustParse("2")
return (&podQuota).Cmp(quantity) == 0, nil return (&podQuota).Cmp(quantity) == 0, nil
}) })
@ -197,7 +197,7 @@ func rcConditionCheck(f *framework.Framework) {
} }
conditions = rc.Status.Conditions conditions = rc.Status.Conditions
cond := replication.GetCondition(rc.Status, api.ReplicationControllerReplicaFailure) cond := replication.GetCondition(rc.Status, v1.ReplicationControllerReplicaFailure)
return cond != nil, nil return cond != nil, nil
}) })
if err == wait.ErrWaitTimeout { if err == wait.ErrWaitTimeout {
@ -206,8 +206,9 @@ func rcConditionCheck(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Scaling down rc %q to satisfy pod quota", name)) By(fmt.Sprintf("Scaling down rc %q to satisfy pod quota", name))
rc, err = framework.UpdateReplicationControllerWithRetries(c, namespace, name, func(update *api.ReplicationController) { rc, err = framework.UpdateReplicationControllerWithRetries(c, namespace, name, func(update *v1.ReplicationController) {
update.Spec.Replicas = 2 x := int32(2)
update.Spec.Replicas = &x
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -225,7 +226,7 @@ func rcConditionCheck(f *framework.Framework) {
} }
conditions = rc.Status.Conditions conditions = rc.Status.Conditions
cond := replication.GetCondition(rc.Status, api.ReplicationControllerReplicaFailure) cond := replication.GetCondition(rc.Status, v1.ReplicationControllerReplicaFailure)
return cond == nil, nil return cond == nil, nil
}) })
if err == wait.ErrWaitTimeout { if err == wait.ErrWaitTimeout {

View File

@ -22,7 +22,8 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@ -64,7 +65,7 @@ var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
// events for the kube-system namespace on failures // events for the kube-system namespace on failures
namespaceName := api.NamespaceSystem namespaceName := api.NamespaceSystem
By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName)) By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName))
events, err := f.ClientSet.Core().Events(namespaceName).List(api.ListOptions{}) events, err := f.ClientSet.Core().Events(namespaceName).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
for _, e := range events.Items { for _, e := range events.Items {
@ -160,7 +161,7 @@ func testReboot(c clientset.Interface, rebootCmd string) {
} }
} }
func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podNames []string, pods []*api.Pod) { func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podNames []string, pods []*v1.Pod) {
printFn := func(id, log string, err error, previous bool) { printFn := func(id, log string, err error, previous bool) {
prefix := "Retrieving log for container" prefix := "Retrieving log for container"
if previous { if previous {

View File

@ -20,10 +20,10 @@ import (
"fmt" "fmt"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/controller/replicaset" "k8s.io/kubernetes/pkg/controller/replicaset"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
@ -37,18 +37,18 @@ import (
func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string) *extensions.ReplicaSet { func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string) *extensions.ReplicaSet {
zero := int64(0) zero := int64(0)
return &extensions.ReplicaSet{ return &extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: rsName, Name: rsName,
}, },
Spec: extensions.ReplicaSetSpec{ Spec: extensions.ReplicaSetSpec{
Replicas: replicas, Replicas: func(i int32) *int32 { return &i }(replicas),
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: rsPodLabels, Labels: rsPodLabels,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero, TerminationGracePeriodSeconds: &zero,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: imageName, Name: imageName,
Image: image, Image: image,
@ -60,14 +60,14 @@ func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageNa
} }
} }
func newPodQuota(name, number string) *api.ResourceQuota { func newPodQuota(name, number string) *v1.ResourceQuota {
return &api.ResourceQuota{ return &v1.ResourceQuota{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.ResourceQuotaSpec{ Spec: v1.ResourceQuotaSpec{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourcePods: resource.MustParse(number), v1.ResourcePods: resource.MustParse(number),
}, },
}, },
} }
@ -103,24 +103,24 @@ func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image strin
// in contrib/for-demos/serve_hostname // in contrib/for-demos/serve_hostname
By(fmt.Sprintf("Creating ReplicaSet %s", name)) By(fmt.Sprintf("Creating ReplicaSet %s", name))
rs, err := f.ClientSet.Extensions().ReplicaSets(f.Namespace.Name).Create(&extensions.ReplicaSet{ rs, err := f.ClientSet.Extensions().ReplicaSets(f.Namespace.Name).Create(&extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: extensions.ReplicaSetSpec{ Spec: extensions.ReplicaSetSpec{
Replicas: replicas, Replicas: func(i int32) *int32 { return &i }(replicas),
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{ Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{
"name": name, "name": name,
}}, }},
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"name": name}, Labels: map[string]string{"name": name},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: name, Name: name,
Image: image, Image: image,
Ports: []api.ContainerPort{{ContainerPort: 9376}}, Ports: []v1.ContainerPort{{ContainerPort: 9376}},
}, },
}, },
}, },
@ -131,7 +131,7 @@ func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image strin
// Cleanup the ReplicaSet when we are done. // Cleanup the ReplicaSet when we are done.
defer func() { defer func() {
// Resize the ReplicaSet to zero to get rid of pods. // Resize the ReplicaSet to zero to get rid of pods.
if err := framework.DeleteReplicaSet(f.ClientSet, f.Namespace.Name, rs.Name); err != nil { if err := framework.DeleteReplicaSet(f.ClientSet, f.InternalClientset, f.Namespace.Name, rs.Name); err != nil {
framework.Logf("Failed to cleanup ReplicaSet %v: %v.", rs.Name, err) framework.Logf("Failed to cleanup ReplicaSet %v: %v.", rs.Name, err)
} }
}() }()
@ -184,7 +184,7 @@ func rsConditionCheck(f *framework.Framework) {
return false, err return false, err
} }
quantity := resource.MustParse("2") quantity := resource.MustParse("2")
podQuota := quota.Status.Hard[api.ResourcePods] podQuota := quota.Status.Hard[v1.ResourcePods]
return (&podQuota).Cmp(quantity) == 0, nil return (&podQuota).Cmp(quantity) == 0, nil
}) })
if err == wait.ErrWaitTimeout { if err == wait.ErrWaitTimeout {
@ -222,7 +222,8 @@ func rsConditionCheck(f *framework.Framework) {
By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name)) By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name))
rs, err = framework.UpdateReplicaSetWithRetries(c, namespace, name, func(update *extensions.ReplicaSet) { rs, err = framework.UpdateReplicaSetWithRetries(c, namespace, name, func(update *extensions.ReplicaSet) {
update.Spec.Replicas = 2 x := int32(2)
update.Spec.Replicas = &x
}) })
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())

View File

@ -21,6 +21,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
@ -42,28 +43,28 @@ var _ = framework.KubeDescribe("Rescheduler [Serial]", func() {
nodeCount := len(nodes.Items) nodeCount := len(nodes.Items)
Expect(nodeCount).NotTo(BeZero()) Expect(nodeCount).NotTo(BeZero())
cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU] cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
totalMillicores = int((&cpu).MilliValue()) * nodeCount totalMillicores = int((&cpu).MilliValue()) * nodeCount
}) })
It("should ensure that critical pod is scheduled in case there is no resources available", func() { It("should ensure that critical pod is scheduled in case there is no resources available", func() {
By("reserving all available cpu") By("reserving all available cpu")
err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores) err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores)
defer framework.DeleteRCAndPods(f.ClientSet, ns, "reserve-all-cpu") defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, "reserve-all-cpu")
framework.ExpectNoError(err) framework.ExpectNoError(err)
By("creating a new instance of Dashboard and waiting for Dashboard to be scheduled") By("creating a new instance of Dashboard and waiting for Dashboard to be scheduled")
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kubernetes-dashboard"})) label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kubernetes-dashboard"}))
listOpts := api.ListOptions{LabelSelector: label} listOpts := v1.ListOptions{LabelSelector: label.String()}
deployments, err := f.ClientSet.Extensions().Deployments(api.NamespaceSystem).List(listOpts) deployments, err := f.ClientSet.Extensions().Deployments(api.NamespaceSystem).List(listOpts)
framework.ExpectNoError(err) framework.ExpectNoError(err)
Expect(len(deployments.Items)).Should(Equal(1)) Expect(len(deployments.Items)).Should(Equal(1))
deployment := deployments.Items[0] deployment := deployments.Items[0]
replicas := uint(deployment.Spec.Replicas) replicas := uint(*(deployment.Spec.Replicas))
err = framework.ScaleDeployment(f.ClientSet, api.NamespaceSystem, deployment.Name, replicas+1, true) err = framework.ScaleDeployment(f.ClientSet, f.InternalClientset, api.NamespaceSystem, deployment.Name, replicas+1, true)
defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, api.NamespaceSystem, deployment.Name, replicas, true)) defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.InternalClientset, api.NamespaceSystem, deployment.Name, replicas, true))
framework.ExpectNoError(err) framework.ExpectNoError(err)
}) })
@ -74,7 +75,7 @@ func reserveAllCpu(f *framework.Framework, id string, millicores int) error {
replicas := millicores / 100 replicas := millicores / 100
ReserveCpu(f, id, 1, 100) ReserveCpu(f, id, 1, 100)
framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.Namespace.Name, id, uint(replicas), false)) framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.Namespace.Name, id, uint(replicas), false))
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) { for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
pods, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, framework.ImagePullerLabels) pods, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, framework.ImagePullerLabels)
@ -100,9 +101,9 @@ func reserveAllCpu(f *framework.Framework, id string, millicores int) error {
return fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", id, timeout, replicas) return fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", id, timeout, replicas)
} }
func podRunningOrUnschedulable(pod *api.Pod) bool { func podRunningOrUnschedulable(pod *v1.Pod) bool {
_, cond := api.GetPodCondition(&pod.Status, api.PodScheduled) _, cond := v1.GetPodCondition(&pod.Status, v1.PodScheduled)
if cond != nil && cond.Status == api.ConditionFalse && cond.Reason == "Unschedulable" { if cond != nil && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" {
return true return true
} }
running, _ := testutils.PodRunningReady(pod) running, _ := testutils.PodRunningReady(pod)

View File

@ -25,8 +25,9 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -136,17 +137,17 @@ func WaitForGroupSize(group string, size int32) error {
return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size) return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size)
} }
func svcByName(name string, port int) *api.Service { func svcByName(name string, port int) *v1.Service {
return &api.Service{ return &v1.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Type: api.ServiceTypeNodePort, Type: v1.ServiceTypeNodePort,
Selector: map[string]string{ Selector: map[string]string{
"name": name, "name": name,
}, },
Ports: []api.ServicePort{{ Ports: []v1.ServicePort{{
Port: int32(port), Port: int32(port),
TargetPort: intstr.FromInt(port), TargetPort: intstr.FromInt(port),
}}, }},
@ -159,18 +160,18 @@ func newSVCByName(c clientset.Interface, ns, name string) error {
return err return err
} }
func rcByNamePort(name string, replicas int32, image string, port int, protocol api.Protocol, func rcByNamePort(name string, replicas int32, image string, port int, protocol v1.Protocol,
labels map[string]string, gracePeriod *int64) *api.ReplicationController { labels map[string]string, gracePeriod *int64) *v1.ReplicationController {
return rcByNameContainer(name, replicas, image, labels, api.Container{ return rcByNameContainer(name, replicas, image, labels, v1.Container{
Name: name, Name: name,
Image: image, Image: image,
Ports: []api.ContainerPort{{ContainerPort: int32(port), Protocol: protocol}}, Ports: []v1.ContainerPort{{ContainerPort: int32(port), Protocol: protocol}},
}, gracePeriod) }, gracePeriod)
} }
func rcByNameContainer(name string, replicas int32, image string, labels map[string]string, c api.Container, func rcByNameContainer(name string, replicas int32, image string, labels map[string]string, c v1.Container,
gracePeriod *int64) *api.ReplicationController { gracePeriod *int64) *v1.ReplicationController {
zeroGracePeriod := int64(0) zeroGracePeriod := int64(0)
@ -179,25 +180,25 @@ func rcByNameContainer(name string, replicas int32, image string, labels map[str
if gracePeriod == nil { if gracePeriod == nil {
gracePeriod = &zeroGracePeriod gracePeriod = &zeroGracePeriod
} }
return &api.ReplicationController{ return &v1.ReplicationController{
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "ReplicationController", Kind: "ReplicationController",
APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(), APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.ReplicationControllerSpec{ Spec: v1.ReplicationControllerSpec{
Replicas: replicas, Replicas: func(i int32) *int32 { return &i }(replicas),
Selector: map[string]string{ Selector: map[string]string{
"name": name, "name": name,
}, },
Template: &api.PodTemplateSpec{ Template: &v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: labels, Labels: labels,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{c}, Containers: []v1.Container{c},
TerminationGracePeriodSeconds: gracePeriod, TerminationGracePeriodSeconds: gracePeriod,
}, },
}, },
@ -206,10 +207,10 @@ func rcByNameContainer(name string, replicas int32, image string, labels map[str
} }
// newRCByName creates a replication controller with a selector by name of name. // newRCByName creates a replication controller with a selector by name of name.
func newRCByName(c clientset.Interface, ns, name string, replicas int32, gracePeriod *int64) (*api.ReplicationController, error) { func newRCByName(c clientset.Interface, ns, name string, replicas int32, gracePeriod *int64) (*v1.ReplicationController, error) {
By(fmt.Sprintf("creating replication controller %s", name)) By(fmt.Sprintf("creating replication controller %s", name))
return c.Core().ReplicationControllers(ns).Create(rcByNamePort( return c.Core().ReplicationControllers(ns).Create(rcByNamePort(
name, replicas, serveHostnameImage, 9376, api.ProtocolTCP, map[string]string{}, gracePeriod)) name, replicas, serveHostnameImage, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod))
} }
func resizeRC(c clientset.Interface, ns, name string, replicas int32) error { func resizeRC(c clientset.Interface, ns, name string, replicas int32) error {
@ -217,7 +218,7 @@ func resizeRC(c clientset.Interface, ns, name string, replicas int32) error {
if err != nil { if err != nil {
return err return err
} }
rc.Spec.Replicas = replicas *(rc.Spec.Replicas) = replicas
_, err = c.Core().ReplicationControllers(rc.Namespace).Update(rc) _, err = c.Core().ReplicationControllers(rc.Namespace).Update(rc)
return err return err
} }

View File

@ -20,9 +20,9 @@ import (
"fmt" "fmt"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -47,8 +47,8 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated") By("Ensuring resource quota status is calculated")
usedResources := api.ResourceList{} usedResources := v1.ResourceList{}
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[v1.ResourceQuotas] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -61,20 +61,20 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated") By("Ensuring resource quota status is calculated")
usedResources := api.ResourceList{} usedResources := v1.ResourceList{}
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[v1.ResourceQuotas] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a Service") By("Creating a Service")
service := newTestServiceForQuota("test-service", api.ServiceTypeClusterIP) service := newTestServiceForQuota("test-service", v1.ServiceTypeClusterIP)
service, err = f.ClientSet.Core().Services(f.Namespace.Name).Create(service) service, err = f.ClientSet.Core().Services(f.Namespace.Name).Create(service)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures service creation") By("Ensuring resource quota status captures service creation")
usedResources = api.ResourceList{} usedResources = v1.ResourceList{}
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[api.ResourceServices] = resource.MustParse("1") usedResources[v1.ResourceServices] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -83,14 +83,14 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage") By("Ensuring resource quota status released usage")
usedResources[api.ResourceServices] = resource.MustParse("0") usedResources[v1.ResourceServices] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
It("should create a ResourceQuota and capture the life of a secret.", func() { It("should create a ResourceQuota and capture the life of a secret.", func() {
By("Discovering how many secrets are in namespace by default") By("Discovering how many secrets are in namespace by default")
secrets, err := f.ClientSet.Core().Secrets(f.Namespace.Name).List(api.ListOptions{}) secrets, err := f.ClientSet.Core().Secrets(f.Namespace.Name).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
defaultSecrets := fmt.Sprintf("%d", len(secrets.Items)) defaultSecrets := fmt.Sprintf("%d", len(secrets.Items))
hardSecrets := fmt.Sprintf("%d", len(secrets.Items)+1) hardSecrets := fmt.Sprintf("%d", len(secrets.Items)+1)
@ -98,14 +98,14 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
By("Creating a ResourceQuota") By("Creating a ResourceQuota")
quotaName := "test-quota" quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName) resourceQuota := newTestResourceQuota(quotaName)
resourceQuota.Spec.Hard[api.ResourceSecrets] = resource.MustParse(hardSecrets) resourceQuota.Spec.Hard[v1.ResourceSecrets] = resource.MustParse(hardSecrets)
resourceQuota, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota) resourceQuota, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated") By("Ensuring resource quota status is calculated")
usedResources := api.ResourceList{} usedResources := v1.ResourceList{}
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[api.ResourceSecrets] = resource.MustParse(defaultSecrets) usedResources[v1.ResourceSecrets] = resource.MustParse(defaultSecrets)
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -115,8 +115,8 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures secret creation") By("Ensuring resource quota status captures secret creation")
usedResources = api.ResourceList{} usedResources = v1.ResourceList{}
usedResources[api.ResourceSecrets] = resource.MustParse(hardSecrets) usedResources[v1.ResourceSecrets] = resource.MustParse(hardSecrets)
// we expect there to be two secrets because each namespace will receive // we expect there to be two secrets because each namespace will receive
// a service account token secret by default // a service account token secret by default
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
@ -127,7 +127,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage") By("Ensuring resource quota status released usage")
usedResources[api.ResourceSecrets] = resource.MustParse(defaultSecrets) usedResources[v1.ResourceSecrets] = resource.MustParse(defaultSecrets)
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -140,42 +140,42 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated") By("Ensuring resource quota status is calculated")
usedResources := api.ResourceList{} usedResources := v1.ResourceList{}
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[v1.ResourceQuotas] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a Pod that fits quota") By("Creating a Pod that fits quota")
podName := "test-pod" podName := "test-pod"
requests := api.ResourceList{} requests := v1.ResourceList{}
requests[api.ResourceCPU] = resource.MustParse("500m") requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[api.ResourceMemory] = resource.MustParse("252Mi") requests[v1.ResourceMemory] = resource.MustParse("252Mi")
pod := newTestPodForQuota(f, podName, requests, api.ResourceList{}) pod := newTestPodForQuota(f, podName, requests, v1.ResourceList{})
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
podToUpdate := pod podToUpdate := pod
By("Ensuring ResourceQuota status captures the pod usage") By("Ensuring ResourceQuota status captures the pod usage")
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[api.ResourcePods] = resource.MustParse("1") usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[api.ResourceCPU] = requests[api.ResourceCPU] usedResources[v1.ResourceCPU] = requests[v1.ResourceCPU]
usedResources[api.ResourceMemory] = requests[api.ResourceMemory] usedResources[v1.ResourceMemory] = requests[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Not allowing a pod to be created that exceeds remaining quota") By("Not allowing a pod to be created that exceeds remaining quota")
requests = api.ResourceList{} requests = v1.ResourceList{}
requests[api.ResourceCPU] = resource.MustParse("600m") requests[v1.ResourceCPU] = resource.MustParse("600m")
requests[api.ResourceMemory] = resource.MustParse("100Mi") requests[v1.ResourceMemory] = resource.MustParse("100Mi")
pod = newTestPodForQuota(f, "fail-pod", requests, api.ResourceList{}) pod = newTestPodForQuota(f, "fail-pod", requests, v1.ResourceList{})
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
By("Ensuring a pod cannot update its resource requirements") By("Ensuring a pod cannot update its resource requirements")
// a pod cannot dynamically update its resource requirements. // a pod cannot dynamically update its resource requirements.
requests = api.ResourceList{} requests = v1.ResourceList{}
requests[api.ResourceCPU] = resource.MustParse("100m") requests[v1.ResourceCPU] = resource.MustParse("100m")
requests[api.ResourceMemory] = resource.MustParse("100Mi") requests[v1.ResourceMemory] = resource.MustParse("100Mi")
podToUpdate.Spec.Containers[0].Resources.Requests = requests podToUpdate.Spec.Containers[0].Resources.Requests = requests
_, err = f.ClientSet.Core().Pods(f.Namespace.Name).Update(podToUpdate) _, err = f.ClientSet.Core().Pods(f.Namespace.Name).Update(podToUpdate)
Expect(err).To(HaveOccurred()) Expect(err).To(HaveOccurred())
@ -185,14 +185,14 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Deleting the pod") By("Deleting the pod")
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0)) err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, v1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage") By("Ensuring resource quota status released the pod usage")
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[api.ResourcePods] = resource.MustParse("0") usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[api.ResourceCPU] = resource.MustParse("0") usedResources[v1.ResourceCPU] = resource.MustParse("0")
usedResources[api.ResourceMemory] = resource.MustParse("0") usedResources[v1.ResourceMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -205,8 +205,8 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated") By("Ensuring resource quota status is calculated")
usedResources := api.ResourceList{} usedResources := v1.ResourceList{}
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[v1.ResourceQuotas] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -216,9 +216,9 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures configMap creation") By("Ensuring resource quota status captures configMap creation")
usedResources = api.ResourceList{} usedResources = v1.ResourceList{}
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[api.ResourceConfigMaps] = resource.MustParse("1") usedResources[v1.ResourceConfigMaps] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -227,7 +227,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage") By("Ensuring resource quota status released usage")
usedResources[api.ResourceConfigMaps] = resource.MustParse("0") usedResources[v1.ResourceConfigMaps] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -240,9 +240,9 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated") By("Ensuring resource quota status is calculated")
usedResources := api.ResourceList{} usedResources := v1.ResourceList{}
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[api.ResourceReplicationControllers] = resource.MustParse("0") usedResources[v1.ResourceReplicationControllers] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -252,8 +252,8 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures replication controller creation") By("Ensuring resource quota status captures replication controller creation")
usedResources = api.ResourceList{} usedResources = v1.ResourceList{}
usedResources[api.ResourceReplicationControllers] = resource.MustParse("1") usedResources[v1.ResourceReplicationControllers] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -262,7 +262,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage") By("Ensuring resource quota status released usage")
usedResources[api.ResourceReplicationControllers] = resource.MustParse("0") usedResources[v1.ResourceReplicationControllers] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -275,10 +275,10 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated") By("Ensuring resource quota status is calculated")
usedResources := api.ResourceList{} usedResources := v1.ResourceList{}
usedResources[api.ResourceQuotas] = resource.MustParse("1") usedResources[v1.ResourceQuotas] = resource.MustParse("1")
usedResources[api.ResourcePersistentVolumeClaims] = resource.MustParse("0") usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
usedResources[api.ResourceRequestsStorage] = resource.MustParse("0") usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -288,9 +288,9 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures persistent volume claimcreation") By("Ensuring resource quota status captures persistent volume claimcreation")
usedResources = api.ResourceList{} usedResources = v1.ResourceList{}
usedResources[api.ResourcePersistentVolumeClaims] = resource.MustParse("1") usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("1")
usedResources[api.ResourceRequestsStorage] = resource.MustParse("1Gi") usedResources[v1.ResourceRequestsStorage] = resource.MustParse("1Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -299,8 +299,8 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage") By("Ensuring resource quota status released usage")
usedResources[api.ResourcePersistentVolumeClaims] = resource.MustParse("0") usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
usedResources[api.ResourceRequestsStorage] = resource.MustParse("0") usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
@ -308,18 +308,18 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
It("should verify ResourceQuota with terminating scopes.", func() { It("should verify ResourceQuota with terminating scopes.", func() {
By("Creating a ResourceQuota with terminating scope") By("Creating a ResourceQuota with terminating scope")
quotaTerminatingName := "quota-terminating" quotaTerminatingName := "quota-terminating"
resourceQuotaTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaTerminatingName, api.ResourceQuotaScopeTerminating)) resourceQuotaTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaTerminatingName, v1.ResourceQuotaScopeTerminating))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated") By("Ensuring ResourceQuota status is calculated")
usedResources := api.ResourceList{} usedResources := v1.ResourceList{}
usedResources[api.ResourcePods] = resource.MustParse("0") usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a ResourceQuota with not terminating scope") By("Creating a ResourceQuota with not terminating scope")
quotaNotTerminatingName := "quota-not-terminating" quotaNotTerminatingName := "quota-not-terminating"
resourceQuotaNotTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaNotTerminatingName, api.ResourceQuotaScopeNotTerminating)) resourceQuotaNotTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaNotTerminatingName, v1.ResourceQuotaScopeNotTerminating))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated") By("Ensuring ResourceQuota status is calculated")
@ -328,44 +328,44 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
By("Creating a long running pod") By("Creating a long running pod")
podName := "test-pod" podName := "test-pod"
requests := api.ResourceList{} requests := v1.ResourceList{}
requests[api.ResourceCPU] = resource.MustParse("500m") requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[api.ResourceMemory] = resource.MustParse("200Mi") requests[v1.ResourceMemory] = resource.MustParse("200Mi")
limits := api.ResourceList{} limits := v1.ResourceList{}
limits[api.ResourceCPU] = resource.MustParse("1") limits[v1.ResourceCPU] = resource.MustParse("1")
limits[api.ResourceMemory] = resource.MustParse("400Mi") limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod := newTestPodForQuota(f, podName, requests, limits) pod := newTestPodForQuota(f, podName, requests, limits)
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not terminating scope captures the pod usage") By("Ensuring resource quota with not terminating scope captures the pod usage")
usedResources[api.ResourcePods] = resource.MustParse("1") usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[api.ResourceRequestsCPU] = requests[api.ResourceCPU] usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
usedResources[api.ResourceRequestsMemory] = requests[api.ResourceMemory] usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
usedResources[api.ResourceLimitsCPU] = limits[api.ResourceCPU] usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
usedResources[api.ResourceLimitsMemory] = limits[api.ResourceMemory] usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with terminating scope ignored the pod usage") By("Ensuring resource quota with terminating scope ignored the pod usage")
usedResources[api.ResourcePods] = resource.MustParse("0") usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[api.ResourceRequestsCPU] = resource.MustParse("0") usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[api.ResourceRequestsMemory] = resource.MustParse("0") usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[api.ResourceLimitsCPU] = resource.MustParse("0") usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[api.ResourceLimitsMemory] = resource.MustParse("0") usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Deleting the pod") By("Deleting the pod")
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0)) err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, v1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage") By("Ensuring resource quota status released the pod usage")
usedResources[api.ResourcePods] = resource.MustParse("0") usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[api.ResourceRequestsCPU] = resource.MustParse("0") usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[api.ResourceRequestsMemory] = resource.MustParse("0") usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[api.ResourceLimitsCPU] = resource.MustParse("0") usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[api.ResourceLimitsMemory] = resource.MustParse("0") usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -378,50 +378,50 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with terminating scope captures the pod usage") By("Ensuring resource quota with terminating scope captures the pod usage")
usedResources[api.ResourcePods] = resource.MustParse("1") usedResources[v1.ResourcePods] = resource.MustParse("1")
usedResources[api.ResourceRequestsCPU] = requests[api.ResourceCPU] usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
usedResources[api.ResourceRequestsMemory] = requests[api.ResourceMemory] usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
usedResources[api.ResourceLimitsCPU] = limits[api.ResourceCPU] usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
usedResources[api.ResourceLimitsMemory] = limits[api.ResourceMemory] usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not terminating scope ignored the pod usage") By("Ensuring resource quota with not terminating scope ignored the pod usage")
usedResources[api.ResourcePods] = resource.MustParse("0") usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[api.ResourceRequestsCPU] = resource.MustParse("0") usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[api.ResourceRequestsMemory] = resource.MustParse("0") usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[api.ResourceLimitsCPU] = resource.MustParse("0") usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[api.ResourceLimitsMemory] = resource.MustParse("0") usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Deleting the pod") By("Deleting the pod")
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0)) err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, v1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage") By("Ensuring resource quota status released the pod usage")
usedResources[api.ResourcePods] = resource.MustParse("0") usedResources[v1.ResourcePods] = resource.MustParse("0")
usedResources[api.ResourceRequestsCPU] = resource.MustParse("0") usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
usedResources[api.ResourceRequestsMemory] = resource.MustParse("0") usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
usedResources[api.ResourceLimitsCPU] = resource.MustParse("0") usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
usedResources[api.ResourceLimitsMemory] = resource.MustParse("0") usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
It("should verify ResourceQuota with best effort scope.", func() { It("should verify ResourceQuota with best effort scope.", func() {
By("Creating a ResourceQuota with best effort scope") By("Creating a ResourceQuota with best effort scope")
resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-besteffort", api.ResourceQuotaScopeBestEffort)) resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-besteffort", v1.ResourceQuotaScopeBestEffort))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated") By("Ensuring ResourceQuota status is calculated")
usedResources := api.ResourceList{} usedResources := v1.ResourceList{}
usedResources[api.ResourcePods] = resource.MustParse("0") usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a ResourceQuota with not best effort scope") By("Creating a ResourceQuota with not best effort scope")
resourceQuotaNotBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-not-besteffort", api.ResourceQuotaScopeNotBestEffort)) resourceQuotaNotBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-not-besteffort", v1.ResourceQuotaScopeNotBestEffort))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated") By("Ensuring ResourceQuota status is calculated")
@ -429,111 +429,111 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a best-effort pod") By("Creating a best-effort pod")
pod := newTestPodForQuota(f, podName, api.ResourceList{}, api.ResourceList{}) pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{})
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with best effort scope captures the pod usage") By("Ensuring resource quota with best effort scope captures the pod usage")
usedResources[api.ResourcePods] = resource.MustParse("1") usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not best effort ignored the pod usage") By("Ensuring resource quota with not best effort ignored the pod usage")
usedResources[api.ResourcePods] = resource.MustParse("0") usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Deleting the pod") By("Deleting the pod")
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)) err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage") By("Ensuring resource quota status released the pod usage")
usedResources[api.ResourcePods] = resource.MustParse("0") usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Creating a not best-effort pod") By("Creating a not best-effort pod")
requests := api.ResourceList{} requests := v1.ResourceList{}
requests[api.ResourceCPU] = resource.MustParse("500m") requests[v1.ResourceCPU] = resource.MustParse("500m")
requests[api.ResourceMemory] = resource.MustParse("200Mi") requests[v1.ResourceMemory] = resource.MustParse("200Mi")
limits := api.ResourceList{} limits := v1.ResourceList{}
limits[api.ResourceCPU] = resource.MustParse("1") limits[v1.ResourceCPU] = resource.MustParse("1")
limits[api.ResourceMemory] = resource.MustParse("400Mi") limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod = newTestPodForQuota(f, "burstable-pod", requests, limits) pod = newTestPodForQuota(f, "burstable-pod", requests, limits)
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod) pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not best effort scope captures the pod usage") By("Ensuring resource quota with not best effort scope captures the pod usage")
usedResources[api.ResourcePods] = resource.MustParse("1") usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with best effort scope ignored the pod usage") By("Ensuring resource quota with best effort scope ignored the pod usage")
usedResources[api.ResourcePods] = resource.MustParse("0") usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Deleting the pod") By("Deleting the pod")
err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)) err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage") By("Ensuring resource quota status released the pod usage")
usedResources[api.ResourcePods] = resource.MustParse("0") usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources) err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
}) })
}) })
// newTestResourceQuotaWithScope returns a quota that enforces default constraints for testing with scopes // newTestResourceQuotaWithScope returns a quota that enforces default constraints for testing with scopes
func newTestResourceQuotaWithScope(name string, scope api.ResourceQuotaScope) *api.ResourceQuota { func newTestResourceQuotaWithScope(name string, scope v1.ResourceQuotaScope) *v1.ResourceQuota {
hard := api.ResourceList{} hard := v1.ResourceList{}
hard[api.ResourcePods] = resource.MustParse("5") hard[v1.ResourcePods] = resource.MustParse("5")
switch scope { switch scope {
case api.ResourceQuotaScopeTerminating, api.ResourceQuotaScopeNotTerminating: case v1.ResourceQuotaScopeTerminating, v1.ResourceQuotaScopeNotTerminating:
hard[api.ResourceRequestsCPU] = resource.MustParse("1") hard[v1.ResourceRequestsCPU] = resource.MustParse("1")
hard[api.ResourceRequestsMemory] = resource.MustParse("500Mi") hard[v1.ResourceRequestsMemory] = resource.MustParse("500Mi")
hard[api.ResourceLimitsCPU] = resource.MustParse("2") hard[v1.ResourceLimitsCPU] = resource.MustParse("2")
hard[api.ResourceLimitsMemory] = resource.MustParse("1Gi") hard[v1.ResourceLimitsMemory] = resource.MustParse("1Gi")
} }
return &api.ResourceQuota{ return &v1.ResourceQuota{
ObjectMeta: api.ObjectMeta{Name: name}, ObjectMeta: v1.ObjectMeta{Name: name},
Spec: api.ResourceQuotaSpec{Hard: hard, Scopes: []api.ResourceQuotaScope{scope}}, Spec: v1.ResourceQuotaSpec{Hard: hard, Scopes: []v1.ResourceQuotaScope{scope}},
} }
} }
// newTestResourceQuota returns a quota that enforces default constraints for testing // newTestResourceQuota returns a quota that enforces default constraints for testing
func newTestResourceQuota(name string) *api.ResourceQuota { func newTestResourceQuota(name string) *v1.ResourceQuota {
hard := api.ResourceList{} hard := v1.ResourceList{}
hard[api.ResourcePods] = resource.MustParse("5") hard[v1.ResourcePods] = resource.MustParse("5")
hard[api.ResourceServices] = resource.MustParse("10") hard[v1.ResourceServices] = resource.MustParse("10")
hard[api.ResourceServicesNodePorts] = resource.MustParse("1") hard[v1.ResourceServicesNodePorts] = resource.MustParse("1")
hard[api.ResourceServicesLoadBalancers] = resource.MustParse("1") hard[v1.ResourceServicesLoadBalancers] = resource.MustParse("1")
hard[api.ResourceReplicationControllers] = resource.MustParse("10") hard[v1.ResourceReplicationControllers] = resource.MustParse("10")
hard[api.ResourceQuotas] = resource.MustParse("1") hard[v1.ResourceQuotas] = resource.MustParse("1")
hard[api.ResourceCPU] = resource.MustParse("1") hard[v1.ResourceCPU] = resource.MustParse("1")
hard[api.ResourceMemory] = resource.MustParse("500Mi") hard[v1.ResourceMemory] = resource.MustParse("500Mi")
hard[api.ResourceConfigMaps] = resource.MustParse("2") hard[v1.ResourceConfigMaps] = resource.MustParse("2")
hard[api.ResourceSecrets] = resource.MustParse("10") hard[v1.ResourceSecrets] = resource.MustParse("10")
hard[api.ResourcePersistentVolumeClaims] = resource.MustParse("10") hard[v1.ResourcePersistentVolumeClaims] = resource.MustParse("10")
hard[api.ResourceRequestsStorage] = resource.MustParse("10Gi") hard[v1.ResourceRequestsStorage] = resource.MustParse("10Gi")
return &api.ResourceQuota{ return &v1.ResourceQuota{
ObjectMeta: api.ObjectMeta{Name: name}, ObjectMeta: v1.ObjectMeta{Name: name},
Spec: api.ResourceQuotaSpec{Hard: hard}, Spec: v1.ResourceQuotaSpec{Hard: hard},
} }
} }
// newTestPodForQuota returns a pod that has the specified requests and limits // newTestPodForQuota returns a pod that has the specified requests and limits
func newTestPodForQuota(f *framework.Framework, name string, requests api.ResourceList, limits api.ResourceList) *api.Pod { func newTestPodForQuota(f *framework.Framework, name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod {
return &api.Pod{ return &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "pause", Name: "pause",
Image: framework.GetPauseImageName(f.ClientSet), Image: framework.GetPauseImageName(f.ClientSet),
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: requests, Requests: requests,
Limits: limits, Limits: limits,
}, },
@ -544,20 +544,20 @@ func newTestPodForQuota(f *framework.Framework, name string, requests api.Resour
} }
// newTestPersistentVolumeClaimForQuota returns a simple persistent volume claim // newTestPersistentVolumeClaimForQuota returns a simple persistent volume claim
func newTestPersistentVolumeClaimForQuota(name string) *api.PersistentVolumeClaim { func newTestPersistentVolumeClaimForQuota(name string) *v1.PersistentVolumeClaim {
return &api.PersistentVolumeClaim{ return &v1.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.PersistentVolumeClaimSpec{ Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []api.PersistentVolumeAccessMode{ AccessModes: []v1.PersistentVolumeAccessMode{
api.ReadWriteOnce, v1.ReadWriteOnce,
api.ReadOnlyMany, v1.ReadOnlyMany,
api.ReadWriteMany, v1.ReadWriteMany,
}, },
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: api.ResourceList{ Requests: v1.ResourceList{
api.ResourceName(api.ResourceStorage): resource.MustParse("1Gi"), v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
}, },
}, },
}, },
@ -565,22 +565,22 @@ func newTestPersistentVolumeClaimForQuota(name string) *api.PersistentVolumeClai
} }
// newTestReplicationControllerForQuota returns a simple replication controller // newTestReplicationControllerForQuota returns a simple replication controller
func newTestReplicationControllerForQuota(name, image string, replicas int32) *api.ReplicationController { func newTestReplicationControllerForQuota(name, image string, replicas int32) *v1.ReplicationController {
return &api.ReplicationController{ return &v1.ReplicationController{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.ReplicationControllerSpec{ Spec: v1.ReplicationControllerSpec{
Replicas: replicas, Replicas: func(i int32) *int32 { return &i }(replicas),
Selector: map[string]string{ Selector: map[string]string{
"name": name, "name": name,
}, },
Template: &api.PodTemplateSpec{ Template: &v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"name": name}, Labels: map[string]string{"name": name},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: name, Name: name,
Image: image, Image: image,
@ -593,14 +593,14 @@ func newTestReplicationControllerForQuota(name, image string, replicas int32) *a
} }
// newTestServiceForQuota returns a simple service // newTestServiceForQuota returns a simple service
func newTestServiceForQuota(name string, serviceType api.ServiceType) *api.Service { func newTestServiceForQuota(name string, serviceType v1.ServiceType) *v1.Service {
return &api.Service{ return &v1.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Type: serviceType, Type: serviceType,
Ports: []api.ServicePort{{ Ports: []v1.ServicePort{{
Port: 80, Port: 80,
TargetPort: intstr.FromInt(80), TargetPort: intstr.FromInt(80),
}}, }},
@ -608,9 +608,9 @@ func newTestServiceForQuota(name string, serviceType api.ServiceType) *api.Servi
} }
} }
func newTestConfigMapForQuota(name string) *api.ConfigMap { func newTestConfigMapForQuota(name string) *v1.ConfigMap {
return &api.ConfigMap{ return &v1.ConfigMap{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Data: map[string]string{ Data: map[string]string{
@ -619,9 +619,9 @@ func newTestConfigMapForQuota(name string) *api.ConfigMap {
} }
} }
func newTestSecretForQuota(name string) *api.Secret { func newTestSecretForQuota(name string) *v1.Secret {
return &api.Secret{ return &v1.Secret{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Data: map[string][]byte{ Data: map[string][]byte{
@ -633,7 +633,7 @@ func newTestSecretForQuota(name string) *api.Secret {
} }
// createResourceQuota in the specified namespace // createResourceQuota in the specified namespace
func createResourceQuota(c clientset.Interface, namespace string, resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error) { func createResourceQuota(c clientset.Interface, namespace string, resourceQuota *v1.ResourceQuota) (*v1.ResourceQuota, error) {
return c.Core().ResourceQuotas(namespace).Create(resourceQuota) return c.Core().ResourceQuotas(namespace).Create(resourceQuota)
} }
@ -643,7 +643,7 @@ func deleteResourceQuota(c clientset.Interface, namespace, name string) error {
} }
// wait for resource quota status to show the expected used resources value // wait for resource quota status to show the expected used resources value
func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used api.ResourceList) error { func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used v1.ResourceList) error {
return wait.Poll(framework.Poll, resourceQuotaTimeout, func() (bool, error) { return wait.Poll(framework.Poll, resourceQuotaTimeout, func() (bool, error) {
resourceQuota, err := c.Core().ResourceQuotas(ns).Get(quotaName) resourceQuota, err := c.Core().ResourceQuotas(ns).Get(quotaName)
if err != nil { if err != nil {

View File

@ -21,6 +21,7 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod" kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@ -32,15 +33,15 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
func isNotRestartAlwaysMirrorPod(p *api.Pod) bool { func isNotRestartAlwaysMirrorPod(p *v1.Pod) bool {
if !kubepod.IsMirrorPod(p) { if !kubepod.IsMirrorPod(p) {
return false return false
} }
return p.Spec.RestartPolicy != api.RestartPolicyAlways return p.Spec.RestartPolicy != v1.RestartPolicyAlways
} }
func filterIrrelevantPods(pods []*api.Pod) []*api.Pod { func filterIrrelevantPods(pods []*v1.Pod) []*v1.Pod {
var results []*api.Pod var results []*v1.Pod
for _, p := range pods { for _, p := range pods {
if isNotRestartAlwaysMirrorPod(p) { if isNotRestartAlwaysMirrorPod(p) {
// Mirror pods with restart policy == Never will not get // Mirror pods with restart policy == Never will not get
@ -128,7 +129,7 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() {
// returning their names if it can do so before timeout. // returning their names if it can do so before timeout.
func waitForNPods(ps *testutils.PodStore, expect int, timeout time.Duration) ([]string, error) { func waitForNPods(ps *testutils.PodStore, expect int, timeout time.Duration) ([]string, error) {
// Loop until we find expect pods or timeout is passed. // Loop until we find expect pods or timeout is passed.
var pods []*api.Pod var pods []*v1.Pod
var errLast error var errLast error
found := wait.Poll(framework.Poll, timeout, func() (bool, error) { found := wait.Poll(framework.Poll, timeout, func() (bool, error) {
allPods := ps.List() allPods := ps.List()

View File

@ -23,7 +23,8 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -44,12 +45,12 @@ type pausePodConfig struct {
Name string Name string
Affinity string Affinity string
Annotations, Labels, NodeSelector map[string]string Annotations, Labels, NodeSelector map[string]string
Resources *api.ResourceRequirements Resources *v1.ResourceRequirements
} }
var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() { var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
var cs clientset.Interface var cs clientset.Interface
var nodeList *api.NodeList var nodeList *v1.NodeList
var systemPodsNo int var systemPodsNo int
var totalPodCapacity int64 var totalPodCapacity int64
var RCName string var RCName string
@ -59,9 +60,9 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
AfterEach(func() { AfterEach(func() {
rc, err := cs.Core().ReplicationControllers(ns).Get(RCName) rc, err := cs.Core().ReplicationControllers(ns).Get(RCName)
if err == nil && rc.Spec.Replicas != 0 { if err == nil && *(rc.Spec.Replicas) != 0 {
By("Cleaning up the replication controller") By("Cleaning up the replication controller")
err := framework.DeleteRCAndPods(f.ClientSet, ns, RCName) err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, RCName)
framework.ExpectNoError(err) framework.ExpectNoError(err)
} }
}) })
@ -69,7 +70,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
BeforeEach(func() { BeforeEach(func() {
cs = f.ClientSet cs = f.ClientSet
ns = f.Namespace.Name ns = f.Namespace.Name
nodeList = &api.NodeList{} nodeList = &v1.NodeList{}
framework.WaitForAllNodesHealthy(cs, time.Minute) framework.WaitForAllNodesHealthy(cs, time.Minute)
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs) masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
@ -156,11 +157,11 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
} }
framework.WaitForStableCluster(cs, masterNodes) framework.WaitForStableCluster(cs, masterNodes)
pods, err := cs.Core().Pods(api.NamespaceAll).List(api.ListOptions{}) pods, err := cs.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
for _, pod := range pods.Items { for _, pod := range pods.Items {
_, found := nodeToCapacityMap[pod.Spec.NodeName] _, found := nodeToCapacityMap[pod.Spec.NodeName]
if found && pod.Status.Phase != api.PodSucceeded && pod.Status.Phase != api.PodFailed { if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
framework.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName) framework.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName)
nodeToCapacityMap[pod.Spec.NodeName] -= getRequestedCPU(pod) nodeToCapacityMap[pod.Spec.NodeName] -= getRequestedCPU(pod)
} }
@ -189,11 +190,11 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
*initPausePod(f, pausePodConfig{ *initPausePod(f, pausePodConfig{
Name: "", Name: "",
Labels: map[string]string{"name": ""}, Labels: map[string]string{"name": ""},
Resources: &api.ResourceRequirements{ Resources: &v1.ResourceRequirements{
Limits: api.ResourceList{ Limits: v1.ResourceList{
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"), "cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
}, },
Requests: api.ResourceList{ Requests: v1.ResourceList{
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"), "cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
}, },
}, },
@ -203,8 +204,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
createPausePod(f, pausePodConfig{ createPausePod(f, pausePodConfig{
Name: podName, Name: podName,
Labels: map[string]string{"name": "additional"}, Labels: map[string]string{"name": "additional"},
Resources: &api.ResourceRequirements{ Resources: &v1.ResourceRequirements{
Limits: api.ResourceList{ Limits: v1.ResourceList{
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"), "cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
}, },
}, },
@ -511,8 +512,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// cannot be scheduled onto it. // cannot be scheduled onto it.
By("Launching two pods on two distinct nodes to get two node names") By("Launching two pods on two distinct nodes to get two node names")
CreateHostPortPods(f, "host-port", 2, true) CreateHostPortPods(f, "host-port", 2, true)
defer framework.DeleteRCAndPods(f.ClientSet, ns, "host-port") defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, "host-port")
podList, err := cs.Core().Pods(ns).List(api.ListOptions{}) podList, err := cs.Core().Pods(ns).List(v1.ListOptions{})
ExpectNoError(err) ExpectNoError(err)
Expect(len(podList.Items)).To(Equal(2)) Expect(len(podList.Items)).To(Equal(2))
nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName} nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName}
@ -671,10 +672,10 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
nodeName := getNodeThatCanRunPodWithoutToleration(f) nodeName := getNodeThatCanRunPodWithoutToleration(f)
By("Trying to apply a random taint on the found node.") By("Trying to apply a random taint on the found node.")
testTaint := api.Taint{ testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-%s", string(uuid.NewUUID())), Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-%s", string(uuid.NewUUID())),
Value: "testing-taint-value", Value: "testing-taint-value",
Effect: api.TaintEffectNoSchedule, Effect: v1.TaintEffectNoSchedule,
} }
framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, testTaint) framework.ExpectNodeHasTaint(cs, nodeName, testTaint)
@ -723,10 +724,10 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
nodeName := getNodeThatCanRunPodWithoutToleration(f) nodeName := getNodeThatCanRunPodWithoutToleration(f)
By("Trying to apply a random taint on the found node.") By("Trying to apply a random taint on the found node.")
testTaint := api.Taint{ testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-%s", string(uuid.NewUUID())), Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-%s", string(uuid.NewUUID())),
Value: "testing-taint-value", Value: "testing-taint-value",
Effect: api.TaintEffectNoSchedule, Effect: v1.TaintEffectNoSchedule,
} }
framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint) framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, testTaint) framework.ExpectNodeHasTaint(cs, nodeName, testTaint)
@ -757,25 +758,25 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
}) })
}) })
func initPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod { func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
if conf.Affinity != "" { if conf.Affinity != "" {
if conf.Annotations == nil { if conf.Annotations == nil {
conf.Annotations = map[string]string{ conf.Annotations = map[string]string{
api.AffinityAnnotationKey: conf.Affinity, v1.AffinityAnnotationKey: conf.Affinity,
} }
} else { } else {
conf.Annotations[api.AffinityAnnotationKey] = conf.Affinity conf.Annotations[v1.AffinityAnnotationKey] = conf.Affinity
} }
} }
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: conf.Name, Name: conf.Name,
Labels: conf.Labels, Labels: conf.Labels,
Annotations: conf.Annotations, Annotations: conf.Annotations,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
NodeSelector: conf.NodeSelector, NodeSelector: conf.NodeSelector,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: podName, Name: podName,
Image: framework.GetPauseImageName(f.ClientSet), Image: framework.GetPauseImageName(f.ClientSet),
@ -789,13 +790,13 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod {
return pod return pod
} }
func createPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod { func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(initPausePod(f, conf)) pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(initPausePod(f, conf))
framework.ExpectNoError(err) framework.ExpectNoError(err)
return pod return pod
} }
func runPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod { func runPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
pod := createPausePod(f, conf) pod := createPausePod(f, conf)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod)) framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(conf.Name) pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(conf.Name)
@ -811,13 +812,13 @@ func runPodAndGetNodeName(f *framework.Framework, conf pausePodConfig) string {
pod := runPausePod(f, conf) pod := runPausePod(f, conf)
By("Explicitly delete pod here to free the resource it takes.") By("Explicitly delete pod here to free the resource it takes.")
err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)) err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0))
framework.ExpectNoError(err) framework.ExpectNoError(err)
return pod.Spec.NodeName return pod.Spec.NodeName
} }
func createPodWithNodeAffinity(f *framework.Framework) *api.Pod { func createPodWithNodeAffinity(f *framework.Framework) *v1.Pod {
return createPausePod(f, pausePodConfig{ return createPausePod(f, pausePodConfig{
Name: "with-nodeaffinity-" + string(uuid.NewUUID()), Name: "with-nodeaffinity-" + string(uuid.NewUUID()),
Affinity: `{ Affinity: `{
@ -836,7 +837,7 @@ func createPodWithNodeAffinity(f *framework.Framework) *api.Pod {
}) })
} }
func createPodWithPodAffinity(f *framework.Framework, topologyKey string) *api.Pod { func createPodWithPodAffinity(f *framework.Framework, topologyKey string) *v1.Pod {
return createPausePod(f, pausePodConfig{ return createPausePod(f, pausePodConfig{
Name: "with-podantiaffinity-" + string(uuid.NewUUID()), Name: "with-podantiaffinity-" + string(uuid.NewUUID()),
Affinity: `{ Affinity: `{
@ -869,23 +870,23 @@ func createPodWithPodAffinity(f *framework.Framework, topologyKey string) *api.P
} }
// Returns a number of currently scheduled and not scheduled Pods. // Returns a number of currently scheduled and not scheduled Pods.
func getPodsScheduled(pods *api.PodList) (scheduledPods, notScheduledPods []api.Pod) { func getPodsScheduled(pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
for _, pod := range pods.Items { for _, pod := range pods.Items {
if !masterNodes.Has(pod.Spec.NodeName) { if !masterNodes.Has(pod.Spec.NodeName) {
if pod.Spec.NodeName != "" { if pod.Spec.NodeName != "" {
_, scheduledCondition := api.GetPodCondition(&pod.Status, api.PodScheduled) _, scheduledCondition := v1.GetPodCondition(&pod.Status, v1.PodScheduled)
// We can't assume that the scheduledCondition is always set if Pod is assigned to Node, // We can't assume that the scheduledCondition is always set if Pod is assigned to Node,
// as e.g. DaemonController doesn't set it when assigning Pod to a Node. Currently // as e.g. DaemonController doesn't set it when assigning Pod to a Node. Currently
// Kubelet sets this condition when it gets a Pod without it, but if we were expecting // Kubelet sets this condition when it gets a Pod without it, but if we were expecting
// that it would always be not nil, this would cause a rare race condition. // that it would always be not nil, this would cause a rare race condition.
if scheduledCondition != nil { if scheduledCondition != nil {
Expect(scheduledCondition.Status).To(Equal(api.ConditionTrue)) Expect(scheduledCondition.Status).To(Equal(v1.ConditionTrue))
} }
scheduledPods = append(scheduledPods, pod) scheduledPods = append(scheduledPods, pod)
} else { } else {
_, scheduledCondition := api.GetPodCondition(&pod.Status, api.PodScheduled) _, scheduledCondition := v1.GetPodCondition(&pod.Status, v1.PodScheduled)
if scheduledCondition != nil { if scheduledCondition != nil {
Expect(scheduledCondition.Status).To(Equal(api.ConditionFalse)) Expect(scheduledCondition.Status).To(Equal(v1.ConditionFalse))
} }
if scheduledCondition.Reason == "Unschedulable" { if scheduledCondition.Reason == "Unschedulable" {
notScheduledPods = append(notScheduledPods, pod) notScheduledPods = append(notScheduledPods, pod)
@ -896,7 +897,7 @@ func getPodsScheduled(pods *api.PodList) (scheduledPods, notScheduledPods []api.
return return
} }
func getRequestedCPU(pod api.Pod) int64 { func getRequestedCPU(pod v1.Pod) int64 {
var result int64 var result int64
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
result += container.Resources.Requests.Cpu().MilliValue() result += container.Resources.Requests.Cpu().MilliValue()
@ -913,7 +914,7 @@ func waitForScheduler() {
// TODO: upgrade calls in PodAffinity tests when we're able to run them // TODO: upgrade calls in PodAffinity tests when we're able to run them
func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) { func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) {
allPods, err := c.Core().Pods(ns).List(api.ListOptions{}) allPods, err := c.Core().Pods(ns).List(v1.ListOptions{})
framework.ExpectNoError(err) framework.ExpectNoError(err)
scheduledPods, notScheduledPods := framework.GetPodsScheduled(masterNodes, allPods) scheduledPods, notScheduledPods := framework.GetPodsScheduled(masterNodes, allPods)

View File

@ -25,7 +25,7 @@ package e2e
import ( import (
"fmt" "fmt"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -33,26 +33,25 @@ import (
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
) )
func scTestPod(hostIPC bool, hostPID bool) *api.Pod { func scTestPod(hostIPC bool, hostPID bool) *v1.Pod {
podName := "security-context-" + string(uuid.NewUUID()) podName := "security-context-" + string(uuid.NewUUID())
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Labels: map[string]string{"name": podName}, Labels: map[string]string{"name": podName},
Annotations: map[string]string{}, Annotations: map[string]string{},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
SecurityContext: &api.PodSecurityContext{ HostIPC: hostIPC,
HostIPC: hostIPC, HostPID: hostPID,
HostPID: hostPID, SecurityContext: &v1.PodSecurityContext{},
}, Containers: []v1.Container{
Containers: []api.Container{
{ {
Name: "test-container", Name: "test-container",
Image: "gcr.io/google_containers/busybox:1.24", Image: "gcr.io/google_containers/busybox:1.24",
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
@ -86,7 +85,7 @@ var _ = framework.KubeDescribe("Security Context [Feature:SecurityContext]", fun
var uid int64 = 1001 var uid int64 = 1001
var overrideUid int64 = 1002 var overrideUid int64 = 1002
pod.Spec.SecurityContext.RunAsUser = &uid pod.Spec.SecurityContext.RunAsUser = &uid
pod.Spec.Containers[0].SecurityContext = new(api.SecurityContext) pod.Spec.Containers[0].SecurityContext = new(v1.SecurityContext)
pod.Spec.Containers[0].SecurityContext.RunAsUser = &overrideUid pod.Spec.Containers[0].SecurityContext.RunAsUser = &overrideUid
pod.Spec.Containers[0].Command = []string{"sh", "-c", "id -u"} pod.Spec.Containers[0].Command = []string{"sh", "-c", "id -u"}
@ -110,33 +109,33 @@ var _ = framework.KubeDescribe("Security Context [Feature:SecurityContext]", fun
It("should support seccomp alpha unconfined annotation on the container [Feature:Seccomp]", func() { It("should support seccomp alpha unconfined annotation on the container [Feature:Seccomp]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha // TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false) pod := scTestPod(false, false)
pod.Annotations[api.SeccompContainerAnnotationKeyPrefix+"test-container"] = "unconfined" pod.Annotations[v1.SeccompContainerAnnotationKeyPrefix+"test-container"] = "unconfined"
pod.Annotations[api.SeccompPodAnnotationKey] = "docker/default" pod.Annotations[v1.SeccompPodAnnotationKey] = "docker/default"
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
f.TestContainerOutput(api.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
}) })
It("should support seccomp alpha unconfined annotation on the pod [Feature:Seccomp]", func() { It("should support seccomp alpha unconfined annotation on the pod [Feature:Seccomp]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha // TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false) pod := scTestPod(false, false)
pod.Annotations[api.SeccompPodAnnotationKey] = "unconfined" pod.Annotations[v1.SeccompPodAnnotationKey] = "unconfined"
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
f.TestContainerOutput(api.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
}) })
It("should support seccomp alpha docker/default annotation [Feature:Seccomp]", func() { It("should support seccomp alpha docker/default annotation [Feature:Seccomp]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha // TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false) pod := scTestPod(false, false)
pod.Annotations[api.SeccompContainerAnnotationKeyPrefix+"test-container"] = "docker/default" pod.Annotations[v1.SeccompContainerAnnotationKeyPrefix+"test-container"] = "docker/default"
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
f.TestContainerOutput(api.SeccompPodAnnotationKey, pod, 0, []string{"2"}) // seccomp filtered f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"2"}) // seccomp filtered
}) })
It("should support seccomp default which is unconfined [Feature:Seccomp]", func() { It("should support seccomp default which is unconfined [Feature:Seccomp]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha // TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false) pod := scTestPod(false, false)
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"} pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
f.TestContainerOutput(api.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
}) })
}) })
@ -146,23 +145,23 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
pod := scTestPod(hostIPC, hostPID) pod := scTestPod(hostIPC, hostPID)
volumeName := "test-volume" volumeName := "test-volume"
mountPath := "/mounted_volume" mountPath := "/mounted_volume"
pod.Spec.Containers[0].VolumeMounts = []api.VolumeMount{ pod.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{
{ {
Name: volumeName, Name: volumeName,
MountPath: mountPath, MountPath: mountPath,
}, },
} }
pod.Spec.Volumes = []api.Volume{ pod.Spec.Volumes = []v1.Volume{
{ {
Name: volumeName, Name: volumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{ EmptyDir: &v1.EmptyDirVolumeSource{
Medium: api.StorageMediumDefault, Medium: v1.StorageMediumDefault,
}, },
}, },
}, },
} }
pod.Spec.SecurityContext.SELinuxOptions = &api.SELinuxOptions{ pod.Spec.SecurityContext.SELinuxOptions = &v1.SELinuxOptions{
Level: "s0:c0,c1", Level: "s0:c0,c1",
} }
pod.Spec.Containers[0].Command = []string{"sleep", "6000"} pod.Spec.Containers[0].Command = []string{"sleep", "6000"}
@ -190,17 +189,17 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
By(fmt.Sprintf("confirming a container with the same label can read the file under --volume-dir=%s", framework.TestContext.KubeVolumeDir)) By(fmt.Sprintf("confirming a container with the same label can read the file under --volume-dir=%s", framework.TestContext.KubeVolumeDir))
pod = scTestPod(hostIPC, hostPID) pod = scTestPod(hostIPC, hostPID)
pod.Spec.NodeName = foundPod.Spec.NodeName pod.Spec.NodeName = foundPod.Spec.NodeName
volumeMounts := []api.VolumeMount{ volumeMounts := []v1.VolumeMount{
{ {
Name: volumeName, Name: volumeName,
MountPath: mountPath, MountPath: mountPath,
}, },
} }
volumes := []api.Volume{ volumes := []v1.Volume{
{ {
Name: volumeName, Name: volumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
HostPath: &api.HostPathVolumeSource{ HostPath: &v1.HostPathVolumeSource{
Path: volumeHostPath, Path: volumeHostPath,
}, },
}, },
@ -209,7 +208,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
pod.Spec.Containers[0].VolumeMounts = volumeMounts pod.Spec.Containers[0].VolumeMounts = volumeMounts
pod.Spec.Volumes = volumes pod.Spec.Volumes = volumes
pod.Spec.Containers[0].Command = []string{"cat", testFilePath} pod.Spec.Containers[0].Command = []string{"cat", testFilePath}
pod.Spec.SecurityContext.SELinuxOptions = &api.SELinuxOptions{ pod.Spec.SecurityContext.SELinuxOptions = &v1.SELinuxOptions{
Level: "s0:c0,c1", Level: "s0:c0,c1",
} }
@ -220,7 +219,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
pod.Spec.Volumes = volumes pod.Spec.Volumes = volumes
pod.Spec.Containers[0].VolumeMounts = volumeMounts pod.Spec.Containers[0].VolumeMounts = volumeMounts
pod.Spec.Containers[0].Command = []string{"sleep", "6000"} pod.Spec.Containers[0].Command = []string{"sleep", "6000"}
pod.Spec.SecurityContext.SELinuxOptions = &api.SELinuxOptions{ pod.Spec.SecurityContext.SELinuxOptions = &v1.SELinuxOptions{
Level: "s0:c2,c3", Level: "s0:c2,c3",
} }
_, err = client.Create(pod) _, err = client.Create(pod)

File diff suppressed because it is too large Load Diff

View File

@ -20,8 +20,8 @@ import (
"fmt" "fmt"
"time" "time"
"k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors" apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid" "k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/version" "k8s.io/kubernetes/pkg/version"
@ -39,7 +39,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
It("should ensure a single API token exists", func() { It("should ensure a single API token exists", func() {
// wait for the service account to reference a single secret // wait for the service account to reference a single secret
var secrets []api.ObjectReference var secrets []v1.ObjectReference
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) { framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) {
By("waiting for a single token reference") By("waiting for a single token reference")
sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default") sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default")
@ -178,9 +178,9 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
framework.Logf("Error getting secret %s: %v", secretRef.Name, err) framework.Logf("Error getting secret %s: %v", secretRef.Name, err)
continue continue
} }
if secret.Type == api.SecretTypeServiceAccountToken { if secret.Type == v1.SecretTypeServiceAccountToken {
tokenContent = string(secret.Data[api.ServiceAccountTokenKey]) tokenContent = string(secret.Data[v1.ServiceAccountTokenKey])
rootCAContent = string(secret.Data[api.ServiceAccountRootCAKey]) rootCAContent = string(secret.Data[v1.ServiceAccountRootCAKey])
return true, nil return true, nil
} }
} }
@ -189,52 +189,52 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
return false, nil return false, nil
})) }))
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
GenerateName: "pod-service-account-" + string(uuid.NewUUID()) + "-", GenerateName: "pod-service-account-" + string(uuid.NewUUID()) + "-",
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "token-test", Name: "token-test",
Image: "gcr.io/google_containers/mounttest:0.7", Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{ Args: []string{
fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, api.ServiceAccountTokenKey), fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey),
}, },
}, },
{ {
Name: "root-ca-test", Name: "root-ca-test",
Image: "gcr.io/google_containers/mounttest:0.7", Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{ Args: []string{
fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, api.ServiceAccountRootCAKey), fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountRootCAKey),
}, },
}, },
}, },
RestartPolicy: api.RestartPolicyNever, RestartPolicy: v1.RestartPolicyNever,
}, },
} }
supportsTokenNamespace, _ := framework.ServerVersionGTE(serviceAccountTokenNamespaceVersion, f.ClientSet.Discovery()) supportsTokenNamespace, _ := framework.ServerVersionGTE(serviceAccountTokenNamespaceVersion, f.ClientSet.Discovery())
if supportsTokenNamespace { if supportsTokenNamespace {
pod.Spec.Containers = append(pod.Spec.Containers, api.Container{ pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
Name: "namespace-test", Name: "namespace-test",
Image: "gcr.io/google_containers/mounttest:0.7", Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{ Args: []string{
fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, api.ServiceAccountNamespaceKey), fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey),
}, },
}) })
} }
f.TestContainerOutput("consume service account token", pod, 0, []string{ f.TestContainerOutput("consume service account token", pod, 0, []string{
fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, api.ServiceAccountTokenKey, tokenContent), fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey, tokenContent),
}) })
f.TestContainerOutput("consume service account root CA", pod, 1, []string{ f.TestContainerOutput("consume service account root CA", pod, 1, []string{
fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, api.ServiceAccountRootCAKey, rootCAContent), fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountRootCAKey, rootCAContent),
}) })
if supportsTokenNamespace { if supportsTokenNamespace {
f.TestContainerOutput("consume service account namespace", pod, 2, []string{ f.TestContainerOutput("consume service account namespace", pod, 2, []string{
fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, api.ServiceAccountNamespaceKey, f.Namespace.Name), fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey, f.Namespace.Name),
}) })
} }
}) })

View File

@ -22,7 +22,7 @@ import (
"strings" "strings"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@ -118,12 +118,13 @@ var _ = framework.KubeDescribe("Service endpoints latency", func() {
func runServiceLatencies(f *framework.Framework, inParallel, total int) (output []time.Duration, err error) { func runServiceLatencies(f *framework.Framework, inParallel, total int) (output []time.Duration, err error) {
cfg := testutils.RCConfig{ cfg := testutils.RCConfig{
Client: f.ClientSet, Client: f.ClientSet,
Image: framework.GetPauseImageName(f.ClientSet), InternalClient: f.InternalClientset,
Name: "svc-latency-rc", Image: framework.GetPauseImageName(f.ClientSet),
Namespace: f.Namespace.Name, Name: "svc-latency-rc",
Replicas: 1, Namespace: f.Namespace.Name,
PollInterval: time.Second, Replicas: 1,
PollInterval: time.Second,
} }
if err := framework.RunRC(cfg); err != nil { if err := framework.RunRC(cfg); err != nil {
return nil, err return nil, err
@ -179,7 +180,7 @@ func runServiceLatencies(f *framework.Framework, inParallel, total int) (output
type endpointQuery struct { type endpointQuery struct {
endpointsName string endpointsName string
endpoints *api.Endpoints endpoints *v1.Endpoints
result chan<- struct{} result chan<- struct{}
} }
@ -188,7 +189,7 @@ type endpointQueries struct {
stop chan struct{} stop chan struct{}
requestChan chan *endpointQuery requestChan chan *endpointQuery
seenChan chan *api.Endpoints seenChan chan *v1.Endpoints
} }
func newQuerier() *endpointQueries { func newQuerier() *endpointQueries {
@ -197,7 +198,7 @@ func newQuerier() *endpointQueries {
stop: make(chan struct{}, 100), stop: make(chan struct{}, 100),
requestChan: make(chan *endpointQuery), requestChan: make(chan *endpointQuery),
seenChan: make(chan *api.Endpoints, 100), seenChan: make(chan *v1.Endpoints, 100),
} }
go eq.join() go eq.join()
return eq return eq
@ -257,7 +258,7 @@ func (eq *endpointQueries) join() {
} }
// request blocks until the requested endpoint is seen. // request blocks until the requested endpoint is seen.
func (eq *endpointQueries) request(endpointsName string) *api.Endpoints { func (eq *endpointQueries) request(endpointsName string) *v1.Endpoints {
result := make(chan struct{}) result := make(chan struct{})
req := &endpointQuery{ req := &endpointQuery{
endpointsName: endpointsName, endpointsName: endpointsName,
@ -269,7 +270,7 @@ func (eq *endpointQueries) request(endpointsName string) *api.Endpoints {
} }
// marks e as added; does not block. // marks e as added; does not block.
func (eq *endpointQueries) added(e *api.Endpoints) { func (eq *endpointQueries) added(e *v1.Endpoints) {
eq.seenChan <- e eq.seenChan <- e
} }
@ -277,26 +278,26 @@ func (eq *endpointQueries) added(e *api.Endpoints) {
func startEndpointWatcher(f *framework.Framework, q *endpointQueries) { func startEndpointWatcher(f *framework.Framework, q *endpointQueries) {
_, controller := cache.NewInformer( _, controller := cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
obj, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(options) obj, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(options)
return runtime.Object(obj), err return runtime.Object(obj), err
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return f.ClientSet.Core().Endpoints(f.Namespace.Name).Watch(options) return f.ClientSet.Core().Endpoints(f.Namespace.Name).Watch(options)
}, },
}, },
&api.Endpoints{}, &v1.Endpoints{},
0, 0,
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
if e, ok := obj.(*api.Endpoints); ok { if e, ok := obj.(*v1.Endpoints); ok {
if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 { if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 {
q.added(e) q.added(e)
} }
} }
}, },
UpdateFunc: func(old, cur interface{}) { UpdateFunc: func(old, cur interface{}) {
if e, ok := cur.(*api.Endpoints); ok { if e, ok := cur.(*v1.Endpoints); ok {
if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 { if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 {
q.added(e) q.added(e)
} }
@ -315,15 +316,15 @@ func startEndpointWatcher(f *framework.Framework, q *endpointQueries) {
func singleServiceLatency(f *framework.Framework, name string, q *endpointQueries) (time.Duration, error) { func singleServiceLatency(f *framework.Framework, name string, q *endpointQueries) (time.Duration, error) {
// Make a service that points to that pod. // Make a service that points to that pod.
svc := &api.Service{ svc := &v1.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
GenerateName: "latency-svc-", GenerateName: "latency-svc-",
}, },
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Ports: []api.ServicePort{{Protocol: api.ProtocolTCP, Port: 80}}, Ports: []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: 80}},
Selector: map[string]string{"name": name}, Selector: map[string]string{"name": name},
Type: api.ServiceTypeClusterIP, Type: v1.ServiceTypeClusterIP,
SessionAffinity: api.ServiceAffinityNone, SessionAffinity: v1.ServiceAffinityNone,
}, },
} }
startTime := time.Now() startTime := time.Now()

View File

@ -22,7 +22,8 @@ import (
"net/http" "net/http"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@ -111,7 +112,7 @@ func (h *haproxyControllerTester) start(namespace string) (err error) {
// Find the pods of the rc we just created. // Find the pods of the rc we just created.
labelSelector := labels.SelectorFromSet( labelSelector := labels.SelectorFromSet(
labels.Set(map[string]string{"name": h.rcName})) labels.Set(map[string]string{"name": h.rcName}))
options := api.ListOptions{LabelSelector: labelSelector} options := v1.ListOptions{LabelSelector: labelSelector.String()}
pods, err := h.client.Core().Pods(h.rcNamespace).List(options) pods, err := h.client.Core().Pods(h.rcNamespace).List(options)
if err != nil { if err != nil {
return err return err
@ -262,8 +263,8 @@ func simpleGET(c *http.Client, url, host string) (string, error) {
} }
// rcFromManifest reads a .json/yaml file and returns the rc in it. // rcFromManifest reads a .json/yaml file and returns the rc in it.
func rcFromManifest(fileName string) *api.ReplicationController { func rcFromManifest(fileName string) *v1.ReplicationController {
var controller api.ReplicationController var controller v1.ReplicationController
framework.Logf("Parsing rc from %v", fileName) framework.Logf("Parsing rc from %v", fileName)
data := framework.ReadOrDie(fileName) data := framework.ReadOrDie(fileName)
@ -275,8 +276,8 @@ func rcFromManifest(fileName string) *api.ReplicationController {
} }
// svcFromManifest reads a .json/yaml file and returns the rc in it. // svcFromManifest reads a .json/yaml file and returns the rc in it.
func svcFromManifest(fileName string) *api.Service { func svcFromManifest(fileName string) *v1.Service {
var svc api.Service var svc v1.Service
framework.Logf("Parsing service from %v", fileName) framework.Logf("Parsing service from %v", fileName)
data := framework.ReadOrDie(fileName) data := framework.ReadOrDie(fileName)

View File

@ -24,8 +24,9 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/extensions" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
@ -45,7 +46,7 @@ var data = `{
type Foo struct { type Foo struct {
unversioned.TypeMeta `json:",inline"` unversioned.TypeMeta `json:",inline"`
api.ObjectMeta `json:"metadata,omitempty" description:"standard object metadata"` v1.ObjectMeta `json:"metadata,omitempty" description:"standard object metadata"`
SomeField string `json:"someField"` SomeField string `json:"someField"`
OtherField int `json:"otherField"` OtherField int `json:"otherField"`
@ -64,7 +65,7 @@ var _ = Describe("ThirdParty resources [Flaky] [Disruptive]", func() {
f := framework.NewDefaultFramework("thirdparty") f := framework.NewDefaultFramework("thirdparty")
rsrc := &extensions.ThirdPartyResource{ rsrc := &extensions.ThirdPartyResource{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo.company.com", Name: "foo.company.com",
}, },
Versions: []extensions.APIVersion{ Versions: []extensions.APIVersion{
@ -120,7 +121,7 @@ var _ = Describe("ThirdParty resources [Flaky] [Disruptive]", func() {
TypeMeta: unversioned.TypeMeta{ TypeMeta: unversioned.TypeMeta{
Kind: "Foo", Kind: "Foo",
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
}, },
SomeField: "bar", SomeField: "bar",

View File

@ -22,9 +22,9 @@ import (
. "github.com/onsi/ginkgo" . "github.com/onsi/ginkgo"
. "github.com/onsi/gomega" . "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@ -61,16 +61,16 @@ var _ = framework.KubeDescribe("Multi-AZ Clusters", func() {
func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) { func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) {
// First create the service // First create the service
serviceName := "test-service" serviceName := "test-service"
serviceSpec := &api.Service{ serviceSpec := &v1.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: serviceName, Name: serviceName,
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
}, },
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Selector: map[string]string{ Selector: map[string]string{
"service": serviceName, "service": serviceName,
}, },
Ports: []api.ServicePort{{ Ports: []v1.ServicePort{{
Port: 80, Port: 80,
TargetPort: intstr.FromInt(80), TargetPort: intstr.FromInt(80),
}}, }},
@ -80,13 +80,13 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
// Now create some pods behind the service // Now create some pods behind the service
podSpec := &api.Pod{ podSpec := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: serviceName, Name: serviceName,
Labels: map[string]string{"service": serviceName}, Labels: map[string]string{"service": serviceName},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "test", Name: "test",
Image: framework.GetPauseImageName(f.ClientSet), Image: framework.GetPauseImageName(f.ClientSet),
@ -113,7 +113,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
} }
// Find the name of the zone in which a Node is running // Find the name of the zone in which a Node is running
func getZoneNameForNode(node api.Node) (string, error) { func getZoneNameForNode(node v1.Node) (string, error) {
for key, value := range node.Labels { for key, value := range node.Labels {
if key == unversioned.LabelZoneFailureDomain { if key == unversioned.LabelZoneFailureDomain {
return value, nil return value, nil
@ -126,7 +126,7 @@ func getZoneNameForNode(node api.Node) (string, error) {
// Find the names of all zones in which we have nodes in this cluster. // Find the names of all zones in which we have nodes in this cluster.
func getZoneNames(c clientset.Interface) ([]string, error) { func getZoneNames(c clientset.Interface) ([]string, error) {
zoneNames := sets.NewString() zoneNames := sets.NewString()
nodes, err := c.Core().Nodes().List(api.ListOptions{}) nodes, err := c.Core().Nodes().List(v1.ListOptions{})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -148,7 +148,7 @@ func getZoneCount(c clientset.Interface) (int, error) {
} }
// Find the name of the zone in which the pod is scheduled // Find the name of the zone in which the pod is scheduled
func getZoneNameForPod(c clientset.Interface, pod api.Pod) (string, error) { func getZoneNameForPod(c clientset.Interface, pod v1.Pod) (string, error) {
By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName)) By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName))
node, err := c.Core().Nodes().Get(pod.Spec.NodeName) node, err := c.Core().Nodes().Get(pod.Spec.NodeName)
Expect(err).NotTo(HaveOccurred()) Expect(err).NotTo(HaveOccurred())
@ -157,7 +157,7 @@ func getZoneNameForPod(c clientset.Interface, pod api.Pod) (string, error) {
// Determine whether a set of pods are approximately evenly spread // Determine whether a set of pods are approximately evenly spread
// across a given set of zones // across a given set of zones
func checkZoneSpreading(c clientset.Interface, pods *api.PodList, zoneNames []string) (bool, error) { func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []string) (bool, error) {
podsPerZone := make(map[string]int) podsPerZone := make(map[string]int)
for _, zoneName := range zoneNames { for _, zoneName := range zoneNames {
podsPerZone[zoneName] = 0 podsPerZone[zoneName] = 0
@ -190,26 +190,26 @@ func checkZoneSpreading(c clientset.Interface, pods *api.PodList, zoneNames []st
func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) { func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
name := "ubelite-spread-rc-" + string(uuid.NewUUID()) name := "ubelite-spread-rc-" + string(uuid.NewUUID())
By(fmt.Sprintf("Creating replication controller %s", name)) By(fmt.Sprintf("Creating replication controller %s", name))
controller, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{ controller, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(&v1.ReplicationController{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Namespace: f.Namespace.Name, Namespace: f.Namespace.Name,
Name: name, Name: name,
}, },
Spec: api.ReplicationControllerSpec{ Spec: v1.ReplicationControllerSpec{
Replicas: replicaCount, Replicas: &replicaCount,
Selector: map[string]string{ Selector: map[string]string{
"name": name, "name": name,
}, },
Template: &api.PodTemplateSpec{ Template: &v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"name": name}, Labels: map[string]string{"name": name},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: name, Name: name,
Image: image, Image: image,
Ports: []api.ContainerPort{{ContainerPort: 9376}}, Ports: []v1.ContainerPort{{ContainerPort: 9376}},
}, },
}, },
}, },
@ -220,7 +220,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
// Cleanup the replication controller when we are done. // Cleanup the replication controller when we are done.
defer func() { defer func() {
// Resize the replication controller to zero to get rid of pods. // Resize the replication controller to zero to get rid of pods.
if err := framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, controller.Name); err != nil { if err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, controller.Name); err != nil {
framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err) framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
} }
}() }()

Some files were not shown because too many files have changed in this diff Show More