diff --git a/test/e2e/addon_update.go b/test/e2e/addon_update.go
index 50aef73444a..0ebe73af989 100644
--- a/test/e2e/addon_update.go
+++ b/test/e2e/addon_update.go
@@ -25,8 +25,8 @@ import (
"time"
"golang.org/x/crypto/ssh"
- "k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
@@ -205,7 +205,7 @@ spec:
const (
addonTestPollInterval = 3 * time.Second
addonTestPollTimeout = 5 * time.Minute
- defaultNsName = api.NamespaceDefault
+ defaultNsName = v1.NamespaceDefault
addonNsName = "kube-system"
)
diff --git a/test/e2e/autoscaling_utils.go b/test/e2e/autoscaling_utils.go
index ce060acaf48..7b7ff7eafa6 100644
--- a/test/e2e/autoscaling_utils.go
+++ b/test/e2e/autoscaling_utils.go
@@ -21,8 +21,9 @@ import (
"strconv"
"time"
- "k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
@@ -97,7 +98,7 @@ cpuLimit argument is in millicores, cpuLimit is a maximum amount of cpu that can
func newResourceConsumer(name, kind string, replicas, initCPUTotal, initMemoryTotal, initCustomMetric, consumptionTimeInSeconds, requestSizeInMillicores,
requestSizeInMegabytes int, requestSizeCustomMetric int, cpuLimit, memLimit int64, f *framework.Framework) *ResourceConsumer {
- runServiceAndWorkloadForResourceConsumer(f.ClientSet, f.Namespace.Name, name, kind, replicas, cpuLimit, memLimit)
+ runServiceAndWorkloadForResourceConsumer(f.ClientSet, f.InternalClientset, f.Namespace.Name, name, kind, replicas, cpuLimit, memLimit)
rc := &ResourceConsumer{
name: name,
controllerName: name + "-ctrl",
@@ -303,20 +304,20 @@ func (rc *ResourceConsumer) CleanUp() {
rc.stopCustomMetric <- 0
// Wait some time to ensure all child goroutines are finished.
time.Sleep(10 * time.Second)
- framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.ClientSet, rc.framework.Namespace.Name, rc.name))
+ framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.ClientSet, rc.framework.InternalClientset, rc.framework.Namespace.Name, rc.name))
framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.name, nil))
- framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.ClientSet, rc.framework.Namespace.Name, rc.controllerName))
+ framework.ExpectNoError(framework.DeleteRCAndPods(rc.framework.ClientSet, rc.framework.InternalClientset, rc.framework.Namespace.Name, rc.controllerName))
framework.ExpectNoError(rc.framework.ClientSet.Core().Services(rc.framework.Namespace.Name).Delete(rc.controllerName, nil))
}
-func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) {
+func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, internalClient internalclientset.Interface, ns, name, kind string, replicas int, cpuLimitMillis, memLimitMb int64) {
By(fmt.Sprintf("Running consuming RC %s via %s with %v replicas", name, kind, replicas))
- _, err := c.Core().Services(ns).Create(&api.Service{
- ObjectMeta: api.ObjectMeta{
+ _, err := c.Core().Services(ns).Create(&v1.Service{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
- Spec: api.ServiceSpec{
- Ports: []api.ServicePort{{
+ Spec: v1.ServiceSpec{
+ Ports: []v1.ServicePort{{
Port: port,
TargetPort: intstr.FromInt(targetPort),
}},
@@ -329,16 +330,17 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name, k
framework.ExpectNoError(err)
rcConfig := testutils.RCConfig{
- Client: c,
- Image: resourceConsumerImage,
- Name: name,
- Namespace: ns,
- Timeout: timeoutRC,
- Replicas: replicas,
- CpuRequest: cpuLimitMillis,
- CpuLimit: cpuLimitMillis,
- MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes
- MemLimit: memLimitMb * 1024 * 1024,
+ Client: c,
+ InternalClient: internalClient,
+ Image: resourceConsumerImage,
+ Name: name,
+ Namespace: ns,
+ Timeout: timeoutRC,
+ Replicas: replicas,
+ CpuRequest: cpuLimitMillis,
+ CpuLimit: cpuLimitMillis,
+ MemRequest: memLimitMb * 1024 * 1024, // MemLimit is in bytes
+ MemLimit: memLimitMb * 1024 * 1024,
}
switch kind {
@@ -364,12 +366,12 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name, k
By(fmt.Sprintf("Running controller"))
controllerName := name + "-ctrl"
- _, err = c.Core().Services(ns).Create(&api.Service{
- ObjectMeta: api.ObjectMeta{
+ _, err = c.Core().Services(ns).Create(&v1.Service{
+ ObjectMeta: v1.ObjectMeta{
Name: controllerName,
},
- Spec: api.ServiceSpec{
- Ports: []api.ServicePort{{
+ Spec: v1.ServiceSpec{
+ Ports: []v1.ServicePort{{
Port: port,
TargetPort: intstr.FromInt(targetPort),
}},
@@ -381,7 +383,7 @@ func runServiceAndWorkloadForResourceConsumer(c clientset.Interface, ns, name, k
})
framework.ExpectNoError(err)
- dnsClusterFirst := api.DNSClusterFirst
+ dnsClusterFirst := v1.DNSClusterFirst
controllerRcConfig := testutils.RCConfig{
Client: c,
Image: resourceConsumerControllerImage,
diff --git a/test/e2e/batch_v1_jobs.go b/test/e2e/batch_v1_jobs.go
index 71d582f1191..82b9cde6cb9 100644
--- a/test/e2e/batch_v1_jobs.go
+++ b/test/e2e/batch_v1_jobs.go
@@ -25,8 +25,10 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
- "k8s.io/kubernetes/pkg/apis/batch"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ batchinternal "k8s.io/kubernetes/pkg/apis/batch"
+ batch "k8s.io/kubernetes/pkg/apis/batch/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait"
@@ -53,7 +55,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
// Simplest case: all pods succeed promptly
It("should run a job to completion when tasks succeed", func() {
By("Creating a job")
- job := newTestV1Job("succeed", "all-succeed", api.RestartPolicyNever, parallelism, completions)
+ job := newTestV1Job("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions)
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
@@ -72,7 +74,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
// up to 5 minutes between restarts, making test timeouts
// due to successive failures too likely with a reasonable
// test timeout.
- job := newTestV1Job("failOnce", "fail-once-local", api.RestartPolicyOnFailure, parallelism, completions)
+ job := newTestV1Job("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions)
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
@@ -90,7 +92,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
// Worst case analysis: 15 failures, each taking 1 minute to
// run due to some slowness, 1 in 2^15 chance of happening,
// causing test flake. Should be very rare.
- job := newTestV1Job("randomlySucceedOrFail", "rand-non-local", api.RestartPolicyNever, parallelism, completions)
+ job := newTestV1Job("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions)
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
@@ -101,7 +103,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
It("should keep restarting failed pods", func() {
By("Creating a job")
- job := newTestV1Job("fail", "all-fail", api.RestartPolicyNever, parallelism, completions)
+ job := newTestV1Job("fail", "all-fail", v1.RestartPolicyNever, parallelism, completions)
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
@@ -119,7 +121,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
startParallelism := int32(1)
endParallelism := int32(2)
By("Creating a job")
- job := newTestV1Job("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions)
+ job := newTestV1Job("notTerminate", "scale-up", v1.RestartPolicyNever, startParallelism, completions)
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
@@ -128,7 +130,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
Expect(err).NotTo(HaveOccurred())
By("scale job up")
- scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.ClientSet)
+ scaler, err := kubectl.ScalerFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@@ -144,7 +146,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
startParallelism := int32(2)
endParallelism := int32(1)
By("Creating a job")
- job := newTestV1Job("notTerminate", "scale-down", api.RestartPolicyNever, startParallelism, completions)
+ job := newTestV1Job("notTerminate", "scale-down", v1.RestartPolicyNever, startParallelism, completions)
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
@@ -153,7 +155,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
Expect(err).NotTo(HaveOccurred())
By("scale job down")
- scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.ClientSet)
+ scaler, err := kubectl.ScalerFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@@ -167,7 +169,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
It("should delete a job", func() {
By("Creating a job")
- job := newTestV1Job("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
+ job := newTestV1Job("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions)
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
@@ -176,7 +178,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
Expect(err).NotTo(HaveOccurred())
By("delete a job")
- reaper, err := kubectl.ReaperFor(batch.Kind("Job"), f.ClientSet)
+ reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))
@@ -190,7 +192,7 @@ var _ = framework.KubeDescribe("V1Job", func() {
It("should fail a job", func() {
By("Creating a job")
- job := newTestV1Job("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
+ job := newTestV1Job("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions)
activeDeadlineSeconds := int64(10)
job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
job, err := createV1Job(f.ClientSet, f.Namespace.Name, job)
@@ -215,34 +217,34 @@ var _ = framework.KubeDescribe("V1Job", func() {
})
// newTestV1Job returns a job which does one of several testing behaviors.
-func newTestV1Job(behavior, name string, rPol api.RestartPolicy, parallelism, completions int32) *batch.Job {
+func newTestV1Job(behavior, name string, rPol v1.RestartPolicy, parallelism, completions int32) *batch.Job {
job := &batch.Job{
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
Spec: batch.JobSpec{
Parallelism: ¶llelism,
Completions: &completions,
- Template: api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
+ Template: v1.PodTemplateSpec{
+ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"somekey": "somevalue"},
},
- Spec: api.PodSpec{
+ Spec: v1.PodSpec{
RestartPolicy: rPol,
- Volumes: []api.Volume{
+ Volumes: []v1.Volume{
{
Name: "data",
- VolumeSource: api.VolumeSource{
- EmptyDir: &api.EmptyDirVolumeSource{},
+ VolumeSource: v1.VolumeSource{
+ EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: "c",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
MountPath: "/data",
Name: "data",
@@ -289,21 +291,21 @@ func updateV1Job(c clientset.Interface, ns string, job *batch.Job) (*batch.Job,
}
func deleteV1Job(c clientset.Interface, ns, name string) error {
- return c.Batch().Jobs(ns).Delete(name, api.NewDeleteOptions(0))
+ return c.Batch().Jobs(ns).Delete(name, v1.NewDeleteOptions(0))
}
// Wait for all pods to become Running. Only use when pods will run for a long time, or it will be racy.
func waitForAllPodsRunningV1(c clientset.Interface, ns, jobName string, parallelism int32) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{v1JobSelectorKey: jobName}))
return wait.Poll(framework.Poll, v1JobTimeout, func() (bool, error) {
- options := api.ListOptions{LabelSelector: label}
+ options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options)
if err != nil {
return false, err
}
count := int32(0)
for _, p := range pods.Items {
- if p.Status.Phase == api.PodRunning {
+ if p.Status.Phase == v1.PodRunning {
count++
}
}
@@ -330,7 +332,7 @@ func waitForV1JobFail(c clientset.Interface, ns, jobName string, timeout time.Du
return false, err
}
for _, c := range curr.Status.Conditions {
- if c.Type == batch.JobFailed && c.Status == api.ConditionTrue {
+ if c.Type == batch.JobFailed && c.Status == v1.ConditionTrue {
return true, nil
}
}
diff --git a/test/e2e/cadvisor.go b/test/e2e/cadvisor.go
index 27876a6314e..4e099f9e3fa 100644
--- a/test/e2e/cadvisor.go
+++ b/test/e2e/cadvisor.go
@@ -20,8 +20,8 @@ import (
"fmt"
"time"
- "k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
@@ -39,7 +39,7 @@ var _ = framework.KubeDescribe("Cadvisor", func() {
func CheckCadvisorHealthOnAllNodes(c clientset.Interface, timeout time.Duration) {
// It should be OK to list unschedulable Nodes here.
By("getting list of nodes")
- nodeList, err := c.Core().Nodes().List(api.ListOptions{})
+ nodeList, err := c.Core().Nodes().List(v1.ListOptions{})
framework.ExpectNoError(err)
var errors []error
diff --git a/test/e2e/cluster_logging_es.go b/test/e2e/cluster_logging_es.go
index b1b8c5f0185..e37656e64fd 100644
--- a/test/e2e/cluster_logging_es.go
+++ b/test/e2e/cluster_logging_es.go
@@ -24,6 +24,7 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework"
@@ -52,7 +53,7 @@ var _ = framework.KubeDescribe("Cluster level logging using Elasticsearch [Featu
By("Running synthetic logger")
createSynthLogger(f, expectedLinesCount)
- defer f.PodClient().Delete(synthLoggerPodName, &api.DeleteOptions{})
+ defer f.PodClient().Delete(synthLoggerPodName, &v1.DeleteOptions{})
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, synthLoggerPodName, f.Namespace.Name)
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName))
@@ -101,7 +102,7 @@ func checkElasticsearchReadiness(f *framework.Framework) error {
// Wait for the Elasticsearch pods to enter the running state.
By("Checking to make sure the Elasticsearch pods are running")
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "elasticsearch-logging"}))
- options := api.ListOptions{LabelSelector: label}
+ options := v1.ListOptions{LabelSelector: label.String()}
pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
diff --git a/test/e2e/cluster_logging_gcl.go b/test/e2e/cluster_logging_gcl.go
index db84062b7e9..9d5e65687ee 100644
--- a/test/e2e/cluster_logging_gcl.go
+++ b/test/e2e/cluster_logging_gcl.go
@@ -23,7 +23,7 @@ import (
"strings"
"time"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/json"
"k8s.io/kubernetes/test/e2e/framework"
@@ -42,7 +42,7 @@ var _ = framework.KubeDescribe("Cluster level logging using GCL", func() {
It("should check that logs from containers are ingested in GCL", func() {
By("Running synthetic logger")
createSynthLogger(f, expectedLinesCount)
- defer f.PodClient().Delete(synthLoggerPodName, &api.DeleteOptions{})
+ defer f.PodClient().Delete(synthLoggerPodName, &v1.DeleteOptions{})
err := framework.WaitForPodSuccessInNamespace(f.ClientSet, synthLoggerPodName, f.Namespace.Name)
framework.ExpectNoError(err, fmt.Sprintf("Should've successfully waited for pod %s to succeed", synthLoggerPodName))
diff --git a/test/e2e/cluster_logging_utils.go b/test/e2e/cluster_logging_utils.go
index 2dfa0205255..950b789bf45 100644
--- a/test/e2e/cluster_logging_utils.go
+++ b/test/e2e/cluster_logging_utils.go
@@ -22,6 +22,7 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework"
)
@@ -41,14 +42,14 @@ const (
)
func createSynthLogger(f *framework.Framework, linesCount int) {
- f.PodClient().Create(&api.Pod{
- ObjectMeta: api.ObjectMeta{
+ f.PodClient().Create(&v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: synthLoggerPodName,
Namespace: f.Namespace.Name,
},
- Spec: api.PodSpec{
- RestartPolicy: api.RestartPolicyOnFailure,
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ RestartPolicy: v1.RestartPolicyOnFailure,
+ Containers: []v1.Container{
{
Name: synthLoggerPodName,
Image: "gcr.io/google_containers/busybox:1.24",
@@ -72,7 +73,7 @@ func reportLogsFromFluentdPod(f *framework.Framework) error {
}
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "fluentd-logging"}))
- options := api.ListOptions{LabelSelector: label}
+ options := v1.ListOptions{LabelSelector: label.String()}
fluentdPods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
for _, fluentdPod := range fluentdPods.Items {
diff --git a/test/e2e/cluster_size_autoscaling.go b/test/e2e/cluster_size_autoscaling.go
index 6f9684ef68a..e26391d54dc 100644
--- a/test/e2e/cluster_size_autoscaling.go
+++ b/test/e2e/cluster_size_autoscaling.go
@@ -26,8 +26,8 @@ import (
"strings"
"time"
- "k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/test/e2e/framework"
@@ -63,8 +63,8 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
nodeCount = len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
- cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]
- mem := nodes.Items[0].Status.Capacity[api.ResourceMemory]
+ cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
+ mem := nodes.Items[0].Status.Capacity[v1.ResourceMemory]
coresPerNode = int((&cpu).MilliValue() / 1000)
memCapacityMb = int((&mem).Value() / 1024 / 1024)
@@ -98,7 +98,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
It("shouldn't increase cluster size if pending pod is too large [Feature:ClusterSizeAutoscalingScaleUp]", func() {
By("Creating unschedulable pod")
ReserveMemory(f, "memory-reservation", 1, memCapacityMb, false)
- defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "memory-reservation")
+ defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
By("Waiting for scale up hoping it won't happen")
// Verfiy, that the appropreate event was generated.
@@ -106,7 +106,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
EventsLoop:
for start := time.Now(); time.Since(start) < scaleUpTimeout; time.Sleep(20 * time.Second) {
By("Waiting for NotTriggerScaleUp event")
- events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(api.ListOptions{})
+ events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(v1.ListOptions{})
framework.ExpectNoError(err)
for _, e := range events.Items {
@@ -125,7 +125,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
It("should increase cluster size if pending pods are small [Feature:ClusterSizeAutoscalingScaleUp]", func() {
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false)
- defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "memory-reservation")
+ defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
// Verify, that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
@@ -144,7 +144,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
glog.Infof("Not enabling cluster autoscaler for the node pool (on purpose).")
ReserveMemory(f, "memory-reservation", 100, nodeCount*memCapacityMb, false)
- defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "memory-reservation")
+ defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
// Verify, that cluster size is increased
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
@@ -166,7 +166,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
It("should increase cluster size if pods are pending due to host port conflict [Feature:ClusterSizeAutoscalingScaleUp]", func() {
CreateHostPortPods(f, "host-port", nodeCount+2, false)
- defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "host-port")
+ defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "host-port")
framework.ExpectNoError(WaitForClusterSizeFunc(f.ClientSet,
func(size int) bool { return size >= nodeCount+2 }, scaleUpTimeout))
@@ -218,7 +218,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
func(size int) bool { return size >= nodeCount+1 }, scaleUpTimeout))
framework.ExpectNoError(waitForAllCaPodsReadyInNamespace(f, c))
- framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "node-selector"))
+ framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "node-selector"))
})
It("should scale up correct target pool [Feature:ClusterSizeAutoscalingScaleUp]", func() {
@@ -233,7 +233,7 @@ var _ = framework.KubeDescribe("Cluster size autoscaling [Slow]", func() {
By("Creating rc with 2 pods too big to fit default-pool but fitting extra-pool")
ReserveMemory(f, "memory-reservation", 2, 2*memCapacityMb, false)
- defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, "memory-reservation")
+ defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, "memory-reservation")
// Apparently GKE master is restarted couple minutes after the node pool is added
// reseting all the timers in scale down code. Adding 5 extra minutes to workaround
@@ -458,14 +458,15 @@ func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nod
By(fmt.Sprintf("Running RC which reserves host port and defines node selector"))
config := &testutils.RCConfig{
- Client: f.ClientSet,
- Name: "node-selector",
- Namespace: f.Namespace.Name,
- Timeout: defaultTimeout,
- Image: framework.GetPauseImageName(f.ClientSet),
- Replicas: replicas,
- HostPorts: map[string]int{"port1": 4321},
- NodeSelector: map[string]string{"cluster-autoscaling-test.special-node": "true"},
+ Client: f.ClientSet,
+ InternalClient: f.InternalClientset,
+ Name: "node-selector",
+ Namespace: f.Namespace.Name,
+ Timeout: defaultTimeout,
+ Image: framework.GetPauseImageName(f.ClientSet),
+ Replicas: replicas,
+ HostPorts: map[string]int{"port1": 4321},
+ NodeSelector: map[string]string{"cluster-autoscaling-test.special-node": "true"},
}
err := framework.RunRC(*config)
if expectRunning {
@@ -476,13 +477,14 @@ func CreateNodeSelectorPods(f *framework.Framework, id string, replicas int, nod
func CreateHostPortPods(f *framework.Framework, id string, replicas int, expectRunning bool) {
By(fmt.Sprintf("Running RC which reserves host port"))
config := &testutils.RCConfig{
- Client: f.ClientSet,
- Name: id,
- Namespace: f.Namespace.Name,
- Timeout: defaultTimeout,
- Image: framework.GetPauseImageName(f.ClientSet),
- Replicas: replicas,
- HostPorts: map[string]int{"port1": 4321},
+ Client: f.ClientSet,
+ InternalClient: f.InternalClientset,
+ Name: id,
+ Namespace: f.Namespace.Name,
+ Timeout: defaultTimeout,
+ Image: framework.GetPauseImageName(f.ClientSet),
+ Replicas: replicas,
+ HostPorts: map[string]int{"port1": 4321},
}
err := framework.RunRC(*config)
if expectRunning {
@@ -494,13 +496,14 @@ func ReserveCpu(f *framework.Framework, id string, replicas, millicores int) {
By(fmt.Sprintf("Running RC which reserves %v millicores", millicores))
request := int64(millicores / replicas)
config := &testutils.RCConfig{
- Client: f.ClientSet,
- Name: id,
- Namespace: f.Namespace.Name,
- Timeout: defaultTimeout,
- Image: framework.GetPauseImageName(f.ClientSet),
- Replicas: replicas,
- CpuRequest: request,
+ Client: f.ClientSet,
+ InternalClient: f.InternalClientset,
+ Name: id,
+ Namespace: f.Namespace.Name,
+ Timeout: defaultTimeout,
+ Image: framework.GetPauseImageName(f.ClientSet),
+ Replicas: replicas,
+ CpuRequest: request,
}
framework.ExpectNoError(framework.RunRC(*config))
}
@@ -509,13 +512,14 @@ func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
By(fmt.Sprintf("Running RC which reserves %v MB of memory", megabytes))
request := int64(1024 * 1024 * megabytes / replicas)
config := &testutils.RCConfig{
- Client: f.ClientSet,
- Name: id,
- Namespace: f.Namespace.Name,
- Timeout: defaultTimeout,
- Image: framework.GetPauseImageName(f.ClientSet),
- Replicas: replicas,
- MemRequest: request,
+ Client: f.ClientSet,
+ InternalClient: f.InternalClientset,
+ Name: id,
+ Namespace: f.Namespace.Name,
+ Timeout: defaultTimeout,
+ Image: framework.GetPauseImageName(f.ClientSet),
+ Replicas: replicas,
+ MemRequest: request,
}
err := framework.RunRC(*config)
if expectRunning {
@@ -526,9 +530,9 @@ func ReserveMemory(f *framework.Framework, id string, replicas, megabytes int, e
// WaitForClusterSize waits until the cluster size matches the given function.
func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
- nodes, err := c.Core().Nodes().List(api.ListOptions{FieldSelector: fields.Set{
+ nodes, err := c.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
- }.AsSelector()})
+ }.AsSelector().String()})
if err != nil {
glog.Warningf("Failed to list nodes: %v", err)
continue
@@ -536,8 +540,8 @@ func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, time
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
- framework.FilterNodes(nodes, func(node api.Node) bool {
- return framework.IsNodeConditionSetAsExpected(&node, api.NodeReady, true)
+ framework.FilterNodes(nodes, func(node v1.Node) bool {
+ return framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
@@ -553,7 +557,7 @@ func WaitForClusterSizeFunc(c clientset.Interface, sizeFunc func(int) bool, time
func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interface) error {
var notready []string
for start := time.Now(); time.Now().Before(start.Add(scaleUpTimeout)); time.Sleep(20 * time.Second) {
- pods, err := c.Core().Pods(f.Namespace.Name).List(api.ListOptions{})
+ pods, err := c.Core().Pods(f.Namespace.Name).List(v1.ListOptions{})
if err != nil {
return fmt.Errorf("failed to get pods: %v", err)
}
@@ -561,16 +565,16 @@ func waitForAllCaPodsReadyInNamespace(f *framework.Framework, c clientset.Interf
for _, pod := range pods.Items {
ready := false
for _, c := range pod.Status.Conditions {
- if c.Type == api.PodReady && c.Status == api.ConditionTrue {
+ if c.Type == v1.PodReady && c.Status == v1.ConditionTrue {
ready = true
}
}
// Failed pods in this context generally mean that they have been
// double scheduled onto a node, but then failed a constraint check.
- if pod.Status.Phase == api.PodFailed {
+ if pod.Status.Phase == v1.PodFailed {
glog.Warningf("Pod has failed: %v", pod)
}
- if !ready && pod.Status.Phase != api.PodFailed {
+ if !ready && pod.Status.Phase != v1.PodFailed {
notready = append(notready, pod.Name)
}
}
diff --git a/test/e2e/cluster_upgrade.go b/test/e2e/cluster_upgrade.go
index 8fd43d5b51a..2e34ef45430 100644
--- a/test/e2e/cluster_upgrade.go
+++ b/test/e2e/cluster_upgrade.go
@@ -21,8 +21,8 @@ import (
"path"
"strings"
- "k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/chaosmonkey"
"k8s.io/kubernetes/test/e2e/framework"
@@ -152,11 +152,11 @@ func testService(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringD
By("creating a TCP service " + serviceName + " with type=LoadBalancer in namespace " + f.Namespace.Name)
// TODO it's weird that we have to do this and then wait WaitForLoadBalancer which changes
// tcpService.
- tcpService := jig.CreateTCPServiceOrFail(f.Namespace.Name, func(s *api.Service) {
- s.Spec.Type = api.ServiceTypeLoadBalancer
+ tcpService := jig.CreateTCPServiceOrFail(f.Namespace.Name, func(s *v1.Service) {
+ s.Spec.Type = v1.ServiceTypeLoadBalancer
})
tcpService = jig.WaitForLoadBalancerOrFail(f.Namespace.Name, tcpService.Name, loadBalancerCreateTimeoutDefault)
- jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer)
+ jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
// Get info to hit it with
tcpIngressIP := getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
@@ -188,7 +188,7 @@ func testService(f *framework.Framework, sem *chaosmonkey.Semaphore, testDuringD
// Sanity check and hit it once more
By("hitting the pod through the service's LoadBalancer")
jig.TestReachableHTTP(tcpIngressIP, svcPort, loadBalancerLagTimeoutDefault)
- jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer)
+ jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
}
func checkMasterVersion(c clientset.Interface, want string) error {
diff --git a/test/e2e/common/configmap.go b/test/e2e/common/configmap.go
index 47780ff977f..20d7a0aa80b 100644
--- a/test/e2e/common/configmap.go
+++ b/test/e2e/common/configmap.go
@@ -21,7 +21,7 @@ import (
"os"
"time"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
@@ -79,8 +79,8 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
volumeMountPath := "/etc/configmap-volume"
containerName := "configmap-volume-test"
- configMap := &api.ConfigMap{
- ObjectMeta: api.ObjectMeta{
+ configMap := &v1.ConfigMap{
+ ObjectMeta: v1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
@@ -95,29 +95,29 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
- Spec: api.PodSpec{
- Volumes: []api.Volume{
+ Spec: v1.PodSpec{
+ Volumes: []v1.Volume{
{
Name: volumeName,
- VolumeSource: api.VolumeSource{
- ConfigMap: &api.ConfigMapVolumeSource{
- LocalObjectReference: api.LocalObjectReference{
+ VolumeSource: v1.VolumeSource{
+ ConfigMap: &v1.ConfigMapVolumeSource{
+ LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: containerName,
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=/etc/configmap-volume/data-1"},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
@@ -126,7 +126,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
},
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
By("Creating the pod")
@@ -157,22 +157,22 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "env-test",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"},
- Env: []api.EnvVar{
+ Env: []v1.EnvVar{
{
Name: "CONFIG_DATA_1",
- ValueFrom: &api.EnvVarSource{
- ConfigMapKeyRef: &api.ConfigMapKeySelector{
- LocalObjectReference: api.LocalObjectReference{
+ ValueFrom: &v1.EnvVarSource{
+ ConfigMapKeyRef: &v1.ConfigMapKeySelector{
+ LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "data-1",
@@ -182,7 +182,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
},
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
@@ -207,17 +207,17 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
- Spec: api.PodSpec{
- Volumes: []api.Volume{
+ Spec: v1.PodSpec{
+ Volumes: []v1.Volume{
{
Name: volumeName,
- VolumeSource: api.VolumeSource{
- ConfigMap: &api.ConfigMapVolumeSource{
- LocalObjectReference: api.LocalObjectReference{
+ VolumeSource: v1.VolumeSource{
+ ConfigMap: &v1.ConfigMapVolumeSource{
+ LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
@@ -225,21 +225,21 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
},
{
Name: volumeName2,
- VolumeSource: api.VolumeSource{
- ConfigMap: &api.ConfigMapVolumeSource{
- LocalObjectReference: api.LocalObjectReference{
+ VolumeSource: v1.VolumeSource{
+ ConfigMap: &v1.ConfigMapVolumeSource{
+ LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{"--file_content=/etc/configmap-volume/data-1"},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
@@ -253,7 +253,7 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
},
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
@@ -264,9 +264,9 @@ var _ = framework.KubeDescribe("ConfigMap", func() {
})
})
-func newConfigMap(f *framework.Framework, name string) *api.ConfigMap {
- return &api.ConfigMap{
- ObjectMeta: api.ObjectMeta{
+func newConfigMap(f *framework.Framework, name string) *v1.ConfigMap {
+ return &v1.ConfigMap{
+ ObjectMeta: v1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
@@ -292,32 +292,32 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
- Spec: api.PodSpec{
- SecurityContext: &api.PodSecurityContext{},
- Volumes: []api.Volume{
+ Spec: v1.PodSpec{
+ SecurityContext: &v1.PodSecurityContext{},
+ Volumes: []v1.Volume{
{
Name: volumeName,
- VolumeSource: api.VolumeSource{
- ConfigMap: &api.ConfigMapVolumeSource{
- LocalObjectReference: api.LocalObjectReference{
+ VolumeSource: v1.VolumeSource{
+ ConfigMap: &v1.ConfigMapVolumeSource{
+ LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
},
},
},
},
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{
"--file_content=/etc/configmap-volume/data-1",
"--file_mode=/etc/configmap-volume/data-1"},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
@@ -325,7 +325,7 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d
},
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
@@ -353,7 +353,6 @@ func doConfigMapE2EWithoutMappings(f *framework.Framework, uid, fsGroup int64, d
output = append(output, "mode of file \"/etc/configmap-volume/data-1\": "+modeString)
}
f.TestContainerOutput("consume configMaps", pod, 0, output)
-
}
func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, itemMode *int32) {
@@ -371,21 +370,21 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item
framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
}
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "pod-configmaps-" + string(uuid.NewUUID()),
},
- Spec: api.PodSpec{
- SecurityContext: &api.PodSecurityContext{},
- Volumes: []api.Volume{
+ Spec: v1.PodSpec{
+ SecurityContext: &v1.PodSecurityContext{},
+ Volumes: []v1.Volume{
{
Name: volumeName,
- VolumeSource: api.VolumeSource{
- ConfigMap: &api.ConfigMapVolumeSource{
- LocalObjectReference: api.LocalObjectReference{
+ VolumeSource: v1.VolumeSource{
+ ConfigMap: &v1.ConfigMapVolumeSource{
+ LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
- Items: []api.KeyToPath{
+ Items: []v1.KeyToPath{
{
Key: "data-2",
Path: "path/to/data-2",
@@ -395,13 +394,13 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item
},
},
},
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: "configmap-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{"--file_content=/etc/configmap-volume/path/to/data-2",
"--file_mode=/etc/configmap-volume/path/to/data-2"},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
@@ -410,7 +409,7 @@ func doConfigMapE2EWithMappings(f *framework.Framework, uid, fsGroup int64, item
},
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
diff --git a/test/e2e/common/container_probe.go b/test/e2e/common/container_probe.go
index 30b6fd0e94f..4a693f6bde6 100644
--- a/test/e2e/common/container_probe.go
+++ b/test/e2e/common/container_probe.go
@@ -20,7 +20,7 @@ import (
"fmt"
"time"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
@@ -80,7 +80,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
if err != nil {
return false, err
}
- return api.IsPodReady(p), nil
+ return v1.IsPodReady(p), nil
}, 1*time.Minute, 1*time.Second).ShouldNot(BeTrue(), "pod should not be ready")
p, err := podClient.Get(p.Name)
@@ -94,20 +94,20 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
It("should be restarted with a exec \"cat /tmp/health\" liveness probe [Conformance]", func() {
- runLivenessTest(f, &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ runLivenessTest(f, &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "liveness-exec",
Labels: map[string]string{"test": "liveness"},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "liveness",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 10; rm -rf /tmp/health; sleep 600"},
- LivenessProbe: &api.Probe{
- Handler: api.Handler{
- Exec: &api.ExecAction{
+ LivenessProbe: &v1.Probe{
+ Handler: v1.Handler{
+ Exec: &v1.ExecAction{
Command: []string{"cat", "/tmp/health"},
},
},
@@ -121,20 +121,20 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
It("should *not* be restarted with a exec \"cat /tmp/health\" liveness probe [Conformance]", func() {
- runLivenessTest(f, &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ runLivenessTest(f, &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "liveness-exec",
Labels: map[string]string{"test": "liveness"},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "liveness",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c", "echo ok >/tmp/health; sleep 600"},
- LivenessProbe: &api.Probe{
- Handler: api.Handler{
- Exec: &api.ExecAction{
+ LivenessProbe: &v1.Probe{
+ Handler: v1.Handler{
+ Exec: &v1.ExecAction{
Command: []string{"cat", "/tmp/health"},
},
},
@@ -148,20 +148,20 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
It("should be restarted with a /healthz http liveness probe [Conformance]", func() {
- runLivenessTest(f, &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ runLivenessTest(f, &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "liveness-http",
Labels: map[string]string{"test": "liveness"},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "liveness",
Image: "gcr.io/google_containers/liveness:e2e",
Command: []string{"/server"},
- LivenessProbe: &api.Probe{
- Handler: api.Handler{
- HTTPGet: &api.HTTPGetAction{
+ LivenessProbe: &v1.Probe{
+ Handler: v1.Handler{
+ HTTPGet: &v1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromInt(8080),
},
@@ -177,20 +177,20 @@ var _ = framework.KubeDescribe("Probing container", func() {
// Slow by design (5 min)
It("should have monotonically increasing restart count [Conformance] [Slow]", func() {
- runLivenessTest(f, &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ runLivenessTest(f, &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "liveness-http",
Labels: map[string]string{"test": "liveness"},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "liveness",
Image: "gcr.io/google_containers/liveness:e2e",
Command: []string{"/server"},
- LivenessProbe: &api.Probe{
- Handler: api.Handler{
- HTTPGet: &api.HTTPGetAction{
+ LivenessProbe: &v1.Probe{
+ Handler: v1.Handler{
+ HTTPGet: &v1.HTTPGetAction{
Path: "/healthz",
Port: intstr.FromInt(8080),
},
@@ -205,20 +205,20 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
It("should *not* be restarted with a /healthz http liveness probe [Conformance]", func() {
- runLivenessTest(f, &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ runLivenessTest(f, &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "liveness-http",
Labels: map[string]string{"test": "liveness"},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "liveness",
Image: "gcr.io/google_containers/nginx-slim:0.7",
- Ports: []api.ContainerPort{{ContainerPort: 80}},
- LivenessProbe: &api.Probe{
- Handler: api.Handler{
- HTTPGet: &api.HTTPGetAction{
+ Ports: []v1.ContainerPort{{ContainerPort: 80}},
+ LivenessProbe: &v1.Probe{
+ Handler: v1.Handler{
+ HTTPGet: &v1.HTTPGetAction{
Path: "/",
Port: intstr.FromInt(80),
},
@@ -236,20 +236,20 @@ var _ = framework.KubeDescribe("Probing container", func() {
It("should be restarted with a docker exec liveness probe with timeout [Conformance]", func() {
// TODO: enable this test once the default exec handler supports timeout.
Skip("The default exec handler, dockertools.NativeExecHandler, does not support timeouts due to a limitation in the Docker Remote API")
- runLivenessTest(f, &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ runLivenessTest(f, &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "liveness-exec",
Labels: map[string]string{"test": "liveness"},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "liveness",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh", "-c", "sleep 600"},
- LivenessProbe: &api.Probe{
- Handler: api.Handler{
- Exec: &api.ExecAction{
+ LivenessProbe: &v1.Probe{
+ Handler: v1.Handler{
+ Exec: &v1.ExecAction{
Command: []string{"/bin/sh", "-c", "sleep 10"},
},
},
@@ -264,7 +264,7 @@ var _ = framework.KubeDescribe("Probing container", func() {
})
})
-func getContainerStartedTime(p *api.Pod, containerName string) (time.Time, error) {
+func getContainerStartedTime(p *v1.Pod, containerName string) (time.Time, error) {
for _, status := range p.Status.ContainerStatuses {
if status.Name != containerName {
continue
@@ -277,16 +277,16 @@ func getContainerStartedTime(p *api.Pod, containerName string) (time.Time, error
return time.Time{}, fmt.Errorf("cannot find container named %q", containerName)
}
-func getTransitionTimeForReadyCondition(p *api.Pod) (time.Time, error) {
+func getTransitionTimeForReadyCondition(p *v1.Pod) (time.Time, error) {
for _, cond := range p.Status.Conditions {
- if cond.Type == api.PodReady {
+ if cond.Type == v1.PodReady {
return cond.LastTransitionTime.Time, nil
}
}
return time.Time{}, fmt.Errorf("No ready condition can be found for pod")
}
-func getRestartCount(p *api.Pod) int {
+func getRestartCount(p *v1.Pod) int {
count := 0
for _, containerStatus := range p.Status.ContainerStatuses {
count += int(containerStatus.RestartCount)
@@ -294,11 +294,11 @@ func getRestartCount(p *api.Pod) int {
return count
}
-func makePodSpec(readinessProbe, livenessProbe *api.Probe) *api.Pod {
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{Name: "test-webserver-" + string(uuid.NewUUID())},
- Spec: api.PodSpec{
- Containers: []api.Container{
+func makePodSpec(readinessProbe, livenessProbe *v1.Probe) *v1.Pod {
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{Name: "test-webserver-" + string(uuid.NewUUID())},
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: probTestContainerName,
Image: "gcr.io/google_containers/test-webserver:e2e",
@@ -326,10 +326,10 @@ func (b webserverProbeBuilder) withInitialDelay() webserverProbeBuilder {
return b
}
-func (b webserverProbeBuilder) build() *api.Probe {
- probe := &api.Probe{
- Handler: api.Handler{
- HTTPGet: &api.HTTPGetAction{
+func (b webserverProbeBuilder) build() *v1.Probe {
+ probe := &v1.Probe{
+ Handler: v1.Handler{
+ HTTPGet: &v1.HTTPGetAction{
Port: intstr.FromInt(80),
Path: "/",
},
@@ -344,7 +344,7 @@ func (b webserverProbeBuilder) build() *api.Probe {
return probe
}
-func runLivenessTest(f *framework.Framework, pod *api.Pod, expectNumRestarts int, timeout time.Duration) {
+func runLivenessTest(f *framework.Framework, pod *v1.Pod, expectNumRestarts int, timeout time.Duration) {
podClient := f.PodClient()
ns := f.Namespace.Name
Expect(pod.Spec.Containers).NotTo(BeEmpty())
@@ -352,7 +352,7 @@ func runLivenessTest(f *framework.Framework, pod *api.Pod, expectNumRestarts int
// At the end of the test, clean up by removing the pod.
defer func() {
By("deleting the pod")
- podClient.Delete(pod.Name, api.NewDeleteOptions(0))
+ podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
}()
By(fmt.Sprintf("Creating pod %s in namespace %s", pod.Name, ns))
podClient.Create(pod)
@@ -368,7 +368,7 @@ func runLivenessTest(f *framework.Framework, pod *api.Pod, expectNumRestarts int
By("checking the pod's current state and verifying that restartCount is present")
pod, err := podClient.Get(pod.Name)
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s in namespace %s", pod.Name, ns))
- initialRestartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
+ initialRestartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
framework.Logf("Initial restart count of pod %s is %d", pod.Name, initialRestartCount)
// Wait for the restart state to be as desired.
@@ -378,7 +378,7 @@ func runLivenessTest(f *framework.Framework, pod *api.Pod, expectNumRestarts int
for start := time.Now(); time.Now().Before(deadline); time.Sleep(2 * time.Second) {
pod, err = podClient.Get(pod.Name)
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", pod.Name))
- restartCount := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
+ restartCount := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, containerName).RestartCount
if restartCount != lastRestartCount {
framework.Logf("Restart count of pod %s/%s is now %d (%v elapsed)",
ns, pod.Name, restartCount, time.Since(start))
diff --git a/test/e2e/common/docker_containers.go b/test/e2e/common/docker_containers.go
index 329c2faa7ba..896178f7c4d 100644
--- a/test/e2e/common/docker_containers.go
+++ b/test/e2e/common/docker_containers.go
@@ -17,7 +17,7 @@ limitations under the License.
package common
import (
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
@@ -67,21 +67,21 @@ var _ = framework.KubeDescribe("Docker Containers", func() {
const testContainerName = "test-container"
// Return a prototypical entrypoint test pod
-func entrypointTestPod() *api.Pod {
+func entrypointTestPod() *v1.Pod {
podName := "client-containers-" + string(uuid.NewUUID())
- return &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ return &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: testContainerName,
Image: "gcr.io/google_containers/eptest:0.1",
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
}
diff --git a/test/e2e/common/downward_api.go b/test/e2e/common/downward_api.go
index b6ac8662716..f8ee9cb12de 100644
--- a/test/e2e/common/downward_api.go
+++ b/test/e2e/common/downward_api.go
@@ -19,8 +19,8 @@ package common
import (
"fmt"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
@@ -32,11 +32,11 @@ var _ = framework.KubeDescribe("Downward API", func() {
It("should provide pod name and namespace as env vars [Conformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
- env := []api.EnvVar{
+ env := []v1.EnvVar{
{
Name: "POD_NAME",
- ValueFrom: &api.EnvVarSource{
- FieldRef: &api.ObjectFieldSelector{
+ ValueFrom: &v1.EnvVarSource{
+ FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.name",
},
@@ -44,8 +44,8 @@ var _ = framework.KubeDescribe("Downward API", func() {
},
{
Name: "POD_NAMESPACE",
- ValueFrom: &api.EnvVarSource{
- FieldRef: &api.ObjectFieldSelector{
+ ValueFrom: &v1.EnvVarSource{
+ FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.namespace",
},
@@ -63,11 +63,11 @@ var _ = framework.KubeDescribe("Downward API", func() {
It("should provide pod IP as an env var [Conformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
- env := []api.EnvVar{
+ env := []v1.EnvVar{
{
Name: "POD_IP",
- ValueFrom: &api.EnvVarSource{
- FieldRef: &api.ObjectFieldSelector{
+ ValueFrom: &v1.EnvVarSource{
+ FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "status.podIP",
},
@@ -84,35 +84,35 @@ var _ = framework.KubeDescribe("Downward API", func() {
It("should provide container's limits.cpu/memory and requests.cpu/memory as env vars [Conformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
- env := []api.EnvVar{
+ env := []v1.EnvVar{
{
Name: "CPU_LIMIT",
- ValueFrom: &api.EnvVarSource{
- ResourceFieldRef: &api.ResourceFieldSelector{
+ ValueFrom: &v1.EnvVarSource{
+ ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.cpu",
},
},
},
{
Name: "MEMORY_LIMIT",
- ValueFrom: &api.EnvVarSource{
- ResourceFieldRef: &api.ResourceFieldSelector{
+ ValueFrom: &v1.EnvVarSource{
+ ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.memory",
},
},
},
{
Name: "CPU_REQUEST",
- ValueFrom: &api.EnvVarSource{
- ResourceFieldRef: &api.ResourceFieldSelector{
+ ValueFrom: &v1.EnvVarSource{
+ ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "requests.cpu",
},
},
},
{
Name: "MEMORY_REQUEST",
- ValueFrom: &api.EnvVarSource{
- ResourceFieldRef: &api.ResourceFieldSelector{
+ ValueFrom: &v1.EnvVarSource{
+ ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "requests.memory",
},
},
@@ -130,19 +130,19 @@ var _ = framework.KubeDescribe("Downward API", func() {
It("should provide default limits.cpu/memory from node allocatable [Conformance]", func() {
podName := "downward-api-" + string(uuid.NewUUID())
- env := []api.EnvVar{
+ env := []v1.EnvVar{
{
Name: "CPU_LIMIT",
- ValueFrom: &api.EnvVarSource{
- ResourceFieldRef: &api.ResourceFieldSelector{
+ ValueFrom: &v1.EnvVarSource{
+ ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.cpu",
},
},
},
{
Name: "MEMORY_LIMIT",
- ValueFrom: &api.EnvVarSource{
- ResourceFieldRef: &api.ResourceFieldSelector{
+ ValueFrom: &v1.EnvVarSource{
+ ResourceFieldRef: &v1.ResourceFieldSelector{
Resource: "limits.memory",
},
},
@@ -152,13 +152,13 @@ var _ = framework.KubeDescribe("Downward API", func() {
fmt.Sprintf("CPU_LIMIT=[1-9]"),
fmt.Sprintf("MEMORY_LIMIT=[1-9]"),
}
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24",
@@ -166,7 +166,7 @@ var _ = framework.KubeDescribe("Downward API", func() {
Env: env,
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
@@ -174,38 +174,38 @@ var _ = framework.KubeDescribe("Downward API", func() {
})
})
-func testDownwardAPI(f *framework.Framework, podName string, env []api.EnvVar, expectations []string) {
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+func testDownwardAPI(f *framework.Framework, podName string, env []v1.EnvVar, expectations []string) {
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"},
- Resources: api.ResourceRequirements{
- Requests: api.ResourceList{
- api.ResourceCPU: resource.MustParse("250m"),
- api.ResourceMemory: resource.MustParse("32Mi"),
+ Resources: v1.ResourceRequirements{
+ Requests: v1.ResourceList{
+ v1.ResourceCPU: resource.MustParse("250m"),
+ v1.ResourceMemory: resource.MustParse("32Mi"),
},
- Limits: api.ResourceList{
- api.ResourceCPU: resource.MustParse("1250m"),
- api.ResourceMemory: resource.MustParse("64Mi"),
+ Limits: v1.ResourceList{
+ v1.ResourceCPU: resource.MustParse("1250m"),
+ v1.ResourceMemory: resource.MustParse("64Mi"),
},
},
Env: env,
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
testDownwardAPIUsingPod(f, pod, env, expectations)
}
-func testDownwardAPIUsingPod(f *framework.Framework, pod *api.Pod, env []api.EnvVar, expectations []string) {
+func testDownwardAPIUsingPod(f *framework.Framework, pod *v1.Pod, env []v1.EnvVar, expectations []string) {
f.TestContainerOutputRegexp("downward api env vars", pod, 0, expectations)
}
diff --git a/test/e2e/common/downwardapi_volume.go b/test/e2e/common/downwardapi_volume.go
index 9f3f8453d46..3b2141f52e5 100644
--- a/test/e2e/common/downwardapi_volume.go
+++ b/test/e2e/common/downwardapi_volume.go
@@ -20,8 +20,8 @@ import (
"fmt"
"time"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
@@ -72,7 +72,7 @@ var _ = framework.KubeDescribe("Downward API volume", func() {
uid := int64(1001)
gid := int64(1234)
pod := downwardAPIVolumePodForSimpleTest(podName, "/etc/podname")
- pod.Spec.SecurityContext = &api.PodSecurityContext{
+ pod.Spec.SecurityContext = &v1.PodSecurityContext{
RunAsUser: &uid,
FSGroup: &gid,
}
@@ -98,7 +98,7 @@ var _ = framework.KubeDescribe("Downward API volume", func() {
podLogTimeout, framework.Poll).Should(ContainSubstring("key1=\"value1\"\n"))
//modify labels
- podClient.Update(podName, func(pod *api.Pod) {
+ podClient.Update(podName, func(pod *v1.Pod) {
pod.Labels["key3"] = "value3"
})
@@ -127,7 +127,7 @@ var _ = framework.KubeDescribe("Downward API volume", func() {
podLogTimeout, framework.Poll).Should(ContainSubstring("builder=\"bar\"\n"))
//modify annotations
- podClient.Update(podName, func(pod *api.Pod) {
+ podClient.Update(podName, func(pod *v1.Pod) {
pod.Annotations["builder"] = "foo"
})
@@ -189,15 +189,15 @@ var _ = framework.KubeDescribe("Downward API volume", func() {
})
-func downwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMode *int32) *api.Pod {
+func downwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMode *int32) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
- pod.Spec.Containers = []api.Container{
+ pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_mode=" + filePath},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
@@ -215,15 +215,15 @@ func downwardAPIVolumePodForModeTest(name, filePath string, itemMode, defaultMod
return pod
}
-func downwardAPIVolumePodForSimpleTest(name string, filePath string) *api.Pod {
+func downwardAPIVolumePodForSimpleTest(name string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
- pod.Spec.Containers = []api.Container{
+ pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_content=" + filePath},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
@@ -236,35 +236,35 @@ func downwardAPIVolumePodForSimpleTest(name string, filePath string) *api.Pod {
return pod
}
-func downwardAPIVolumeForContainerResources(name string, filePath string) *api.Pod {
+func downwardAPIVolumeForContainerResources(name string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = downwardAPIVolumeBaseContainers("client-container", filePath)
return pod
}
-func downwardAPIVolumeForDefaultContainerResources(name string, filePath string) *api.Pod {
+func downwardAPIVolumeForDefaultContainerResources(name string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, nil, nil)
pod.Spec.Containers = downwardAPIVolumeDefaultBaseContainer("client-container", filePath)
return pod
}
-func downwardAPIVolumeBaseContainers(name, filePath string) []api.Container {
- return []api.Container{
+func downwardAPIVolumeBaseContainers(name, filePath string) []v1.Container {
+ return []v1.Container{
{
Name: name,
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_content=" + filePath},
- Resources: api.ResourceRequirements{
- Requests: api.ResourceList{
- api.ResourceCPU: resource.MustParse("250m"),
- api.ResourceMemory: resource.MustParse("32Mi"),
+ Resources: v1.ResourceRequirements{
+ Requests: v1.ResourceList{
+ v1.ResourceCPU: resource.MustParse("250m"),
+ v1.ResourceMemory: resource.MustParse("32Mi"),
},
- Limits: api.ResourceList{
- api.ResourceCPU: resource.MustParse("1250m"),
- api.ResourceMemory: resource.MustParse("64Mi"),
+ Limits: v1.ResourceList{
+ v1.ResourceCPU: resource.MustParse("1250m"),
+ v1.ResourceMemory: resource.MustParse("64Mi"),
},
},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
@@ -276,13 +276,13 @@ func downwardAPIVolumeBaseContainers(name, filePath string) []api.Container {
}
-func downwardAPIVolumeDefaultBaseContainer(name, filePath string) []api.Container {
- return []api.Container{
+func downwardAPIVolumeDefaultBaseContainer(name, filePath string) []v1.Container {
+ return []v1.Container{
{
Name: name,
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--file_content=" + filePath},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
@@ -293,15 +293,15 @@ func downwardAPIVolumeDefaultBaseContainer(name, filePath string) []api.Containe
}
-func downwardAPIVolumePodForUpdateTest(name string, labels, annotations map[string]string, filePath string) *api.Pod {
+func downwardAPIVolumePodForUpdateTest(name string, labels, annotations map[string]string, filePath string) *v1.Pod {
pod := downwardAPIVolumeBasePod(name, labels, annotations)
- pod.Spec.Containers = []api.Container{
+ pod.Spec.Containers = []v1.Container{
{
Name: "client-container",
Image: "gcr.io/google_containers/mounttest:0.7",
Command: []string{"/mt", "--break_on_expected_content=false", "--retry_time=120", "--file_content_in_loop=" + filePath},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: "podinfo",
MountPath: "/etc",
@@ -315,51 +315,51 @@ func downwardAPIVolumePodForUpdateTest(name string, labels, annotations map[stri
return pod
}
-func downwardAPIVolumeBasePod(name string, labels, annotations map[string]string) *api.Pod {
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+func downwardAPIVolumeBasePod(name string, labels, annotations map[string]string) *v1.Pod {
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: labels,
Annotations: annotations,
},
- Spec: api.PodSpec{
- Volumes: []api.Volume{
+ Spec: v1.PodSpec{
+ Volumes: []v1.Volume{
{
Name: "podinfo",
- VolumeSource: api.VolumeSource{
- DownwardAPI: &api.DownwardAPIVolumeSource{
- Items: []api.DownwardAPIVolumeFile{
+ VolumeSource: v1.VolumeSource{
+ DownwardAPI: &v1.DownwardAPIVolumeSource{
+ Items: []v1.DownwardAPIVolumeFile{
{
Path: "podname",
- FieldRef: &api.ObjectFieldSelector{
+ FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.name",
},
},
{
Path: "cpu_limit",
- ResourceFieldRef: &api.ResourceFieldSelector{
+ ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "limits.cpu",
},
},
{
Path: "cpu_request",
- ResourceFieldRef: &api.ResourceFieldSelector{
+ ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "requests.cpu",
},
},
{
Path: "memory_limit",
- ResourceFieldRef: &api.ResourceFieldSelector{
+ ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "limits.memory",
},
},
{
Path: "memory_request",
- ResourceFieldRef: &api.ResourceFieldSelector{
+ ResourceFieldRef: &v1.ResourceFieldSelector{
ContainerName: "client-container",
Resource: "requests.memory",
},
@@ -369,18 +369,18 @@ func downwardAPIVolumeBasePod(name string, labels, annotations map[string]string
},
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
return pod
}
-func applyLabelsAndAnnotationsToDownwardAPIPod(labels, annotations map[string]string, pod *api.Pod) {
+func applyLabelsAndAnnotationsToDownwardAPIPod(labels, annotations map[string]string, pod *v1.Pod) {
if len(labels) > 0 {
- pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, api.DownwardAPIVolumeFile{
+ pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{
Path: "labels",
- FieldRef: &api.ObjectFieldSelector{
+ FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.labels",
},
@@ -388,9 +388,9 @@ func applyLabelsAndAnnotationsToDownwardAPIPod(labels, annotations map[string]st
}
if len(annotations) > 0 {
- pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, api.DownwardAPIVolumeFile{
+ pod.Spec.Volumes[0].DownwardAPI.Items = append(pod.Spec.Volumes[0].DownwardAPI.Items, v1.DownwardAPIVolumeFile{
Path: "annotations",
- FieldRef: &api.ObjectFieldSelector{
+ FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "metadata.annotations",
},
diff --git a/test/e2e/common/empty_dir.go b/test/e2e/common/empty_dir.go
index f9bd314b0b1..456e7a3a2a3 100644
--- a/test/e2e/common/empty_dir.go
+++ b/test/e2e/common/empty_dir.go
@@ -20,8 +20,8 @@ import (
"fmt"
"path"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
@@ -40,80 +40,80 @@ var _ = framework.KubeDescribe("EmptyDir volumes", func() {
Context("when FSGroup is specified [Feature:FSGroup]", func() {
It("new files should be created with FSGroup ownership when container is root", func() {
- doTestSetgidFSGroup(f, testImageRootUid, api.StorageMediumMemory)
+ doTestSetgidFSGroup(f, testImageRootUid, v1.StorageMediumMemory)
})
It("new files should be created with FSGroup ownership when container is non-root", func() {
- doTestSetgidFSGroup(f, testImageNonRootUid, api.StorageMediumMemory)
+ doTestSetgidFSGroup(f, testImageNonRootUid, v1.StorageMediumMemory)
})
It("files with FSGroup ownership should support (root,0644,tmpfs)", func() {
- doTest0644FSGroup(f, testImageRootUid, api.StorageMediumMemory)
+ doTest0644FSGroup(f, testImageRootUid, v1.StorageMediumMemory)
})
It("volume on default medium should have the correct mode using FSGroup", func() {
- doTestVolumeModeFSGroup(f, testImageRootUid, api.StorageMediumDefault)
+ doTestVolumeModeFSGroup(f, testImageRootUid, v1.StorageMediumDefault)
})
It("volume on tmpfs should have the correct mode using FSGroup", func() {
- doTestVolumeModeFSGroup(f, testImageRootUid, api.StorageMediumMemory)
+ doTestVolumeModeFSGroup(f, testImageRootUid, v1.StorageMediumMemory)
})
})
It("volume on tmpfs should have the correct mode [Conformance]", func() {
- doTestVolumeMode(f, testImageRootUid, api.StorageMediumMemory)
+ doTestVolumeMode(f, testImageRootUid, v1.StorageMediumMemory)
})
It("should support (root,0644,tmpfs) [Conformance]", func() {
- doTest0644(f, testImageRootUid, api.StorageMediumMemory)
+ doTest0644(f, testImageRootUid, v1.StorageMediumMemory)
})
It("should support (root,0666,tmpfs) [Conformance]", func() {
- doTest0666(f, testImageRootUid, api.StorageMediumMemory)
+ doTest0666(f, testImageRootUid, v1.StorageMediumMemory)
})
It("should support (root,0777,tmpfs) [Conformance]", func() {
- doTest0777(f, testImageRootUid, api.StorageMediumMemory)
+ doTest0777(f, testImageRootUid, v1.StorageMediumMemory)
})
It("should support (non-root,0644,tmpfs) [Conformance]", func() {
- doTest0644(f, testImageNonRootUid, api.StorageMediumMemory)
+ doTest0644(f, testImageNonRootUid, v1.StorageMediumMemory)
})
It("should support (non-root,0666,tmpfs) [Conformance]", func() {
- doTest0666(f, testImageNonRootUid, api.StorageMediumMemory)
+ doTest0666(f, testImageNonRootUid, v1.StorageMediumMemory)
})
It("should support (non-root,0777,tmpfs) [Conformance]", func() {
- doTest0777(f, testImageNonRootUid, api.StorageMediumMemory)
+ doTest0777(f, testImageNonRootUid, v1.StorageMediumMemory)
})
It("volume on default medium should have the correct mode [Conformance]", func() {
- doTestVolumeMode(f, testImageRootUid, api.StorageMediumDefault)
+ doTestVolumeMode(f, testImageRootUid, v1.StorageMediumDefault)
})
It("should support (root,0644,default) [Conformance]", func() {
- doTest0644(f, testImageRootUid, api.StorageMediumDefault)
+ doTest0644(f, testImageRootUid, v1.StorageMediumDefault)
})
It("should support (root,0666,default) [Conformance]", func() {
- doTest0666(f, testImageRootUid, api.StorageMediumDefault)
+ doTest0666(f, testImageRootUid, v1.StorageMediumDefault)
})
It("should support (root,0777,default) [Conformance]", func() {
- doTest0777(f, testImageRootUid, api.StorageMediumDefault)
+ doTest0777(f, testImageRootUid, v1.StorageMediumDefault)
})
It("should support (non-root,0644,default) [Conformance]", func() {
- doTest0644(f, testImageNonRootUid, api.StorageMediumDefault)
+ doTest0644(f, testImageNonRootUid, v1.StorageMediumDefault)
})
It("should support (non-root,0666,default) [Conformance]", func() {
- doTest0666(f, testImageNonRootUid, api.StorageMediumDefault)
+ doTest0666(f, testImageNonRootUid, v1.StorageMediumDefault)
})
It("should support (non-root,0777,default) [Conformance]", func() {
- doTest0777(f, testImageNonRootUid, api.StorageMediumDefault)
+ doTest0777(f, testImageNonRootUid, v1.StorageMediumDefault)
})
})
@@ -122,11 +122,11 @@ const (
volumeName = "test-volume"
)
-func doTestSetgidFSGroup(f *framework.Framework, image string, medium api.StorageMedium) {
+func doTestSetgidFSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file")
- source = &api.EmptyDirVolumeSource{Medium: medium}
+ source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(testImageRootUid, volumePath, source)
)
@@ -146,16 +146,16 @@ func doTestSetgidFSGroup(f *framework.Framework, image string, medium api.Storag
"content of file \"/test-volume/test-file\": mount-tester new file",
"owner GID of \"/test-volume/test-file\": 123",
}
- if medium == api.StorageMediumMemory {
+ if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
-func doTestVolumeModeFSGroup(f *framework.Framework, image string, medium api.StorageMedium) {
+func doTestVolumeModeFSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
- source = &api.EmptyDirVolumeSource{Medium: medium}
+ source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(testImageRootUid, volumePath, source)
)
@@ -171,17 +171,17 @@ func doTestVolumeModeFSGroup(f *framework.Framework, image string, medium api.St
out := []string{
"perms of file \"/test-volume\": -rwxrwxrwx",
}
- if medium == api.StorageMediumMemory {
+ if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
-func doTest0644FSGroup(f *framework.Framework, image string, medium api.StorageMedium) {
+func doTest0644FSGroup(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file")
- source = &api.EmptyDirVolumeSource{Medium: medium}
+ source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source)
)
@@ -199,16 +199,16 @@ func doTest0644FSGroup(f *framework.Framework, image string, medium api.StorageM
"perms of file \"/test-volume/test-file\": -rw-r--r--",
"content of file \"/test-volume/test-file\": mount-tester new file",
}
- if medium == api.StorageMediumMemory {
+ if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
-func doTestVolumeMode(f *framework.Framework, image string, medium api.StorageMedium) {
+func doTestVolumeMode(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
- source = &api.EmptyDirVolumeSource{Medium: medium}
+ source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(testImageRootUid, volumePath, source)
)
@@ -221,17 +221,17 @@ func doTestVolumeMode(f *framework.Framework, image string, medium api.StorageMe
out := []string{
"perms of file \"/test-volume\": -rwxrwxrwx",
}
- if medium == api.StorageMediumMemory {
+ if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
-func doTest0644(f *framework.Framework, image string, medium api.StorageMedium) {
+func doTest0644(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file")
- source = &api.EmptyDirVolumeSource{Medium: medium}
+ source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source)
)
@@ -246,17 +246,17 @@ func doTest0644(f *framework.Framework, image string, medium api.StorageMedium)
"perms of file \"/test-volume/test-file\": -rw-r--r--",
"content of file \"/test-volume/test-file\": mount-tester new file",
}
- if medium == api.StorageMediumMemory {
+ if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
-func doTest0666(f *framework.Framework, image string, medium api.StorageMedium) {
+func doTest0666(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file")
- source = &api.EmptyDirVolumeSource{Medium: medium}
+ source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source)
)
@@ -271,17 +271,17 @@ func doTest0666(f *framework.Framework, image string, medium api.StorageMedium)
"perms of file \"/test-volume/test-file\": -rw-rw-rw-",
"content of file \"/test-volume/test-file\": mount-tester new file",
}
- if medium == api.StorageMediumMemory {
+ if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
-func doTest0777(f *framework.Framework, image string, medium api.StorageMedium) {
+func doTest0777(f *framework.Framework, image string, medium v1.StorageMedium) {
var (
volumePath = "/test-volume"
filePath = path.Join(volumePath, "test-file")
- source = &api.EmptyDirVolumeSource{Medium: medium}
+ source = &v1.EmptyDirVolumeSource{Medium: medium}
pod = testPodWithVolume(image, volumePath, source)
)
@@ -296,36 +296,36 @@ func doTest0777(f *framework.Framework, image string, medium api.StorageMedium)
"perms of file \"/test-volume/test-file\": -rwxrwxrwx",
"content of file \"/test-volume/test-file\": mount-tester new file",
}
- if medium == api.StorageMediumMemory {
+ if medium == v1.StorageMediumMemory {
out = append(out, "mount type of \"/test-volume\": tmpfs")
}
f.TestContainerOutput(msg, pod, 0, out)
}
-func formatMedium(medium api.StorageMedium) string {
- if medium == api.StorageMediumMemory {
+func formatMedium(medium v1.StorageMedium) string {
+ if medium == v1.StorageMediumMemory {
return "tmpfs"
}
return "node default medium"
}
-func testPodWithVolume(image, path string, source *api.EmptyDirVolumeSource) *api.Pod {
+func testPodWithVolume(image, path string, source *v1.EmptyDirVolumeSource) *v1.Pod {
podName := "pod-" + string(uuid.NewUUID())
- return &api.Pod{
+ return &v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
- APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
+ APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: containerName,
Image: image,
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: path,
@@ -333,16 +333,16 @@ func testPodWithVolume(image, path string, source *api.EmptyDirVolumeSource) *ap
},
},
},
- SecurityContext: &api.PodSecurityContext{
- SELinuxOptions: &api.SELinuxOptions{
+ SecurityContext: &v1.PodSecurityContext{
+ SELinuxOptions: &v1.SELinuxOptions{
Level: "s0",
},
},
- RestartPolicy: api.RestartPolicyNever,
- Volumes: []api.Volume{
+ RestartPolicy: v1.RestartPolicyNever,
+ Volumes: []v1.Volume{
{
Name: volumeName,
- VolumeSource: api.VolumeSource{
+ VolumeSource: v1.VolumeSource{
EmptyDir: source,
},
},
diff --git a/test/e2e/common/expansion.go b/test/e2e/common/expansion.go
index c7be116541e..27154792a54 100644
--- a/test/e2e/common/expansion.go
+++ b/test/e2e/common/expansion.go
@@ -17,7 +17,7 @@ limitations under the License.
package common
import (
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
@@ -31,18 +31,18 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
It("should allow composing env vars into new env vars [Conformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"},
- Env: []api.EnvVar{
+ Env: []v1.EnvVar{
{
Name: "FOO",
Value: "foo-value",
@@ -58,7 +58,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
},
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
@@ -71,18 +71,18 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
It("should allow substituting values in a container's command [Conformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "TEST_VAR=wrong echo \"$(TEST_VAR)\""},
- Env: []api.EnvVar{
+ Env: []v1.EnvVar{
{
Name: "TEST_VAR",
Value: "test-value",
@@ -90,7 +90,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
},
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
@@ -101,19 +101,19 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
It("should allow substituting values in a container's args [Conformance]", func() {
podName := "var-expansion-" + string(uuid.NewUUID())
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "dapi-container",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c"},
Args: []string{"TEST_VAR=wrong echo \"$(TEST_VAR)\""},
- Env: []api.EnvVar{
+ Env: []v1.EnvVar{
{
Name: "TEST_VAR",
Value: "test-value",
@@ -121,7 +121,7 @@ var _ = framework.KubeDescribe("Variable Expansion", func() {
},
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
diff --git a/test/e2e/common/host_path.go b/test/e2e/common/host_path.go
index a05af03e972..2ee90614c33 100644
--- a/test/e2e/common/host_path.go
+++ b/test/e2e/common/host_path.go
@@ -21,8 +21,8 @@ import (
"os"
"path"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/test/e2e/framework"
@@ -41,7 +41,7 @@ var _ = framework.KubeDescribe("HostPath", func() {
It("should give a volume the correct mode [Conformance]", func() {
volumePath := "/test-volume"
- source := &api.HostPathVolumeSource{
+ source := &v1.HostPathVolumeSource{
Path: "/tmp",
}
pod := testPodWithHostVol(volumePath, source)
@@ -60,7 +60,7 @@ var _ = framework.KubeDescribe("HostPath", func() {
volumePath := "/test-volume"
filePath := path.Join(volumePath, "test-file")
retryDuration := 180
- source := &api.HostPathVolumeSource{
+ source := &v1.HostPathVolumeSource{
Path: "/tmp",
}
pod := testPodWithHostVol(volumePath, source)
@@ -90,7 +90,7 @@ var _ = framework.KubeDescribe("HostPath", func() {
filePathInWriter := path.Join(volumePath, fileName)
filePathInReader := path.Join(volumePath, subPath, fileName)
- source := &api.HostPathVolumeSource{
+ source := &v1.HostPathVolumeSource{
Path: "/tmp",
}
pod := testPodWithHostVol(volumePath, source)
@@ -118,11 +118,11 @@ var _ = framework.KubeDescribe("HostPath", func() {
const containerName1 = "test-container-1"
const containerName2 = "test-container-2"
-func mount(source *api.HostPathVolumeSource) []api.Volume {
- return []api.Volume{
+func mount(source *v1.HostPathVolumeSource) []v1.Volume {
+ return []v1.Volume{
{
Name: volumeName,
- VolumeSource: api.VolumeSource{
+ VolumeSource: v1.VolumeSource{
HostPath: source,
},
},
@@ -130,23 +130,23 @@ func mount(source *api.HostPathVolumeSource) []api.Volume {
}
//TODO: To merge this with the emptyDir tests, we can make source a lambda.
-func testPodWithHostVol(path string, source *api.HostPathVolumeSource) *api.Pod {
+func testPodWithHostVol(path string, source *v1.HostPathVolumeSource) *v1.Pod {
podName := "pod-host-path-test"
- return &api.Pod{
+ return &v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
- APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
+ APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: containerName1,
Image: "gcr.io/google_containers/mounttest:0.7",
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: path,
@@ -156,7 +156,7 @@ func testPodWithHostVol(path string, source *api.HostPathVolumeSource) *api.Pod
{
Name: containerName2,
Image: "gcr.io/google_containers/mounttest:0.7",
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: path,
@@ -164,7 +164,7 @@ func testPodWithHostVol(path string, source *api.HostPathVolumeSource) *api.Pod
},
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
Volumes: mount(source),
},
}
diff --git a/test/e2e/common/init_container.go b/test/e2e/common/init_container.go
index ce5f1f027a3..1496fcf5e20 100644
--- a/test/e2e/common/init_container.go
+++ b/test/e2e/common/init_container.go
@@ -21,9 +21,10 @@ import (
"strconv"
"time"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
- client "k8s.io/kubernetes/pkg/client/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+ podutil "k8s.io/kubernetes/pkg/api/v1/pod"
+ "k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/watch"
"k8s.io/kubernetes/test/e2e/framework"
@@ -45,17 +46,17 @@ var _ = framework.KubeDescribe("InitContainer", func() {
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
- Spec: api.PodSpec{
- RestartPolicy: api.RestartPolicyNever,
- InitContainers: []api.Container{
+ Spec: v1.PodSpec{
+ RestartPolicy: v1.RestartPolicyNever,
+ InitContainers: []v1.Container{
{
Name: "init1",
Image: "gcr.io/google_containers/busybox:1.24",
@@ -67,7 +68,7 @@ var _ = framework.KubeDescribe("InitContainer", func() {
Command: []string{"/bin/true"},
},
},
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: "run1",
Image: "gcr.io/google_containers/busybox:1.24",
@@ -76,19 +77,25 @@ var _ = framework.KubeDescribe("InitContainer", func() {
},
},
}
+ if err := podutil.SetInitContainersAnnotations(pod); err != nil {
+ Expect(err).To(BeNil())
+ }
startedPod := podClient.Create(pod)
- w, err := podClient.Watch(api.SingleObject(startedPod.ObjectMeta))
+ w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
- event, err := watch.Until(framework.PodStartTimeout, wr, client.PodCompleted)
+ event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodCompleted)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
- endPod := event.Object.(*api.Pod)
+ endPod := event.Object.(*v1.Pod)
+ if err := podutil.SetInitContainersAndStatuses(endPod); err != nil {
+ Expect(err).To(BeNil())
+ }
- Expect(endPod.Status.Phase).To(Equal(api.PodSucceeded))
- _, init := api.GetPodCondition(&endPod.Status, api.PodInitialized)
+ Expect(endPod.Status.Phase).To(Equal(v1.PodSucceeded))
+ _, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
- Expect(init.Status).To(Equal(api.ConditionTrue))
+ Expect(init.Status).To(Equal(v1.ConditionTrue))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
for _, status := range endPod.Status.InitContainerStatuses {
@@ -104,16 +111,16 @@ var _ = framework.KubeDescribe("InitContainer", func() {
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
- Spec: api.PodSpec{
- InitContainers: []api.Container{
+ Spec: v1.PodSpec{
+ InitContainers: []v1.Container{
{
Name: "init1",
Image: "gcr.io/google_containers/busybox:1.24",
@@ -125,33 +132,39 @@ var _ = framework.KubeDescribe("InitContainer", func() {
Command: []string{"/bin/true"},
},
},
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: "run1",
Image: framework.GetPauseImageName(f.ClientSet),
- Resources: api.ResourceRequirements{
- Limits: api.ResourceList{
- api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
- api.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
+ Resources: v1.ResourceRequirements{
+ Limits: v1.ResourceList{
+ v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
+ v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
},
},
},
},
},
}
+ if err := podutil.SetInitContainersAnnotations(pod); err != nil {
+ Expect(err).To(BeNil())
+ }
startedPod := podClient.Create(pod)
- w, err := podClient.Watch(api.SingleObject(startedPod.ObjectMeta))
+ w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
- event, err := watch.Until(framework.PodStartTimeout, wr, client.PodRunning)
+ event, err := watch.Until(framework.PodStartTimeout, wr, conditions.PodRunning)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
- endPod := event.Object.(*api.Pod)
+ endPod := event.Object.(*v1.Pod)
- Expect(endPod.Status.Phase).To(Equal(api.PodRunning))
- _, init := api.GetPodCondition(&endPod.Status, api.PodInitialized)
+ Expect(endPod.Status.Phase).To(Equal(v1.PodRunning))
+ _, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
- Expect(init.Status).To(Equal(api.ConditionTrue))
+ Expect(init.Status).To(Equal(v1.ConditionTrue))
+ if err := podutil.SetInitContainersAndStatuses(endPod); err != nil {
+ Expect(err).To(BeNil())
+ }
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
for _, status := range endPod.Status.InitContainerStatuses {
@@ -167,16 +180,17 @@ var _ = framework.KubeDescribe("InitContainer", func() {
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
- Spec: api.PodSpec{
- InitContainers: []api.Container{
+ Spec: v1.PodSpec{
+ InitContainers: []v1.Container{
{
Name: "init1",
Image: "gcr.io/google_containers/busybox:1.24",
@@ -188,22 +202,25 @@ var _ = framework.KubeDescribe("InitContainer", func() {
Command: []string{"/bin/true"},
},
},
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: "run1",
Image: framework.GetPauseImageName(f.ClientSet),
- Resources: api.ResourceRequirements{
- Limits: api.ResourceList{
- api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
- api.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
+ Resources: v1.ResourceRequirements{
+ Limits: v1.ResourceList{
+ v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
+ v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
},
},
},
},
},
}
+ if err := podutil.SetInitContainersAnnotations(pod); err != nil {
+ Expect(err).To(BeNil())
+ }
startedPod := podClient.Create(pod)
- w, err := podClient.Watch(api.SingleObject(startedPod.ObjectMeta))
+ w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
@@ -212,7 +229,10 @@ var _ = framework.KubeDescribe("InitContainer", func() {
// check for the first container to fail at least once
func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) {
- case *api.Pod:
+ case *v1.Pod:
+ if err := podutil.SetInitContainersAndStatuses(t); err != nil {
+ Expect(err).To(BeNil())
+ }
for _, status := range t.Status.ContainerStatuses {
if status.State.Waiting == nil {
return false, fmt.Errorf("container %q should not be out of waiting: %#v", status.Name, status)
@@ -244,7 +264,10 @@ var _ = framework.KubeDescribe("InitContainer", func() {
// verify we get two restarts
func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) {
- case *api.Pod:
+ case *v1.Pod:
+ if err := podutil.SetInitContainersAndStatuses(t); err != nil {
+ Expect(err).To(BeNil())
+ }
status := t.Status.InitContainerStatuses[0]
if status.RestartCount < 3 {
return false, nil
@@ -259,12 +282,15 @@ var _ = framework.KubeDescribe("InitContainer", func() {
)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
- endPod := event.Object.(*api.Pod)
+ endPod := event.Object.(*v1.Pod)
+ if err := podutil.SetInitContainersAndStatuses(endPod); err != nil {
+ Expect(err).To(BeNil())
+ }
- Expect(endPod.Status.Phase).To(Equal(api.PodPending))
- _, init := api.GetPodCondition(&endPod.Status, api.PodInitialized)
+ Expect(endPod.Status.Phase).To(Equal(v1.PodPending))
+ _, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
- Expect(init.Status).To(Equal(api.ConditionFalse))
+ Expect(init.Status).To(Equal(v1.ConditionFalse))
Expect(init.Reason).To(Equal("ContainersNotInitialized"))
Expect(init.Message).To(Equal("containers with incomplete status: [init1 init2]"))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
@@ -276,17 +302,17 @@ var _ = framework.KubeDescribe("InitContainer", func() {
By("creating the pod")
name := "pod-init-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
- Spec: api.PodSpec{
- RestartPolicy: api.RestartPolicyNever,
- InitContainers: []api.Container{
+ Spec: v1.PodSpec{
+ RestartPolicy: v1.RestartPolicyNever,
+ InitContainers: []v1.Container{
{
Name: "init1",
Image: "gcr.io/google_containers/busybox:1.24",
@@ -298,24 +324,27 @@ var _ = framework.KubeDescribe("InitContainer", func() {
Command: []string{"/bin/false"},
},
},
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: "run1",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/true"},
- Resources: api.ResourceRequirements{
- Limits: api.ResourceList{
- api.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
- api.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
+ Resources: v1.ResourceRequirements{
+ Limits: v1.ResourceList{
+ v1.ResourceCPU: *resource.NewMilliQuantity(100, resource.DecimalSI),
+ v1.ResourceMemory: *resource.NewQuantity(30*1024*1024, resource.DecimalSI),
},
},
},
},
},
}
+ if err := podutil.SetInitContainersAnnotations(pod); err != nil {
+ Expect(err).To(BeNil())
+ }
startedPod := podClient.Create(pod)
- w, err := podClient.Watch(api.SingleObject(startedPod.ObjectMeta))
+ w, err := podClient.Watch(v1.SingleObject(startedPod.ObjectMeta))
Expect(err).NotTo(HaveOccurred(), "error watching a pod")
wr := watch.NewRecorder(w)
@@ -324,7 +353,10 @@ var _ = framework.KubeDescribe("InitContainer", func() {
// check for the second container to fail at least once
func(evt watch.Event) (bool, error) {
switch t := evt.Object.(type) {
- case *api.Pod:
+ case *v1.Pod:
+ if err := podutil.SetInitContainersAndStatuses(t); err != nil {
+ Expect(err).To(BeNil())
+ }
for _, status := range t.Status.ContainerStatuses {
if status.State.Waiting == nil {
return false, fmt.Errorf("container %q should not be out of waiting: %#v", status.Name, status)
@@ -358,16 +390,16 @@ var _ = framework.KubeDescribe("InitContainer", func() {
return false, fmt.Errorf("unexpected object: %#v", t)
}
},
- client.PodCompleted,
+ conditions.PodCompleted,
)
Expect(err).To(BeNil())
framework.CheckInvariants(wr.Events(), framework.ContainerInitInvariant)
- endPod := event.Object.(*api.Pod)
+ endPod := event.Object.(*v1.Pod)
- Expect(endPod.Status.Phase).To(Equal(api.PodFailed))
- _, init := api.GetPodCondition(&endPod.Status, api.PodInitialized)
+ Expect(endPod.Status.Phase).To(Equal(v1.PodFailed))
+ _, init := v1.GetPodCondition(&endPod.Status, v1.PodInitialized)
Expect(init).NotTo(BeNil())
- Expect(init.Status).To(Equal(api.ConditionFalse))
+ Expect(init.Status).To(Equal(v1.ConditionFalse))
Expect(init.Reason).To(Equal("ContainersNotInitialized"))
Expect(init.Message).To(Equal("containers with incomplete status: [init2]"))
Expect(len(endPod.Status.InitContainerStatuses)).To(Equal(2))
diff --git a/test/e2e/common/kubelet_etc_hosts.go b/test/e2e/common/kubelet_etc_hosts.go
index 49715695d0a..69203081989 100644
--- a/test/e2e/common/kubelet_etc_hosts.go
+++ b/test/e2e/common/kubelet_etc_hosts.go
@@ -22,7 +22,7 @@ import (
"github.com/golang/glog"
. "github.com/onsi/ginkgo"
- api "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
@@ -34,8 +34,8 @@ const (
)
type KubeletManagedHostConfig struct {
- hostNetworkPod *api.Pod
- pod *api.Pod
+ hostNetworkPod *v1.Pod
+ pod *v1.Pod
f *framework.Framework
}
@@ -128,17 +128,17 @@ func (config *KubeletManagedHostConfig) getEtcHostsContent(podName, containerNam
return config.f.ExecCommandInContainer(podName, containerName, "cat", "/etc/hosts")
}
-func (config *KubeletManagedHostConfig) createPodSpec(podName string) *api.Pod {
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+func (config *KubeletManagedHostConfig) createPodSpec(podName string) *v1.Pod {
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "busybox-1",
Image: etcHostsImageName,
- ImagePullPolicy: api.PullIfNotPresent,
+ ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
@@ -147,7 +147,7 @@ func (config *KubeletManagedHostConfig) createPodSpec(podName string) *api.Pod {
{
Name: "busybox-2",
Image: etcHostsImageName,
- ImagePullPolicy: api.PullIfNotPresent,
+ ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
@@ -156,12 +156,12 @@ func (config *KubeletManagedHostConfig) createPodSpec(podName string) *api.Pod {
{
Name: "busybox-3",
Image: etcHostsImageName,
- ImagePullPolicy: api.PullIfNotPresent,
+ ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: "host-etc-hosts",
MountPath: "/etc/hosts",
@@ -169,11 +169,11 @@ func (config *KubeletManagedHostConfig) createPodSpec(podName string) *api.Pod {
},
},
},
- Volumes: []api.Volume{
+ Volumes: []v1.Volume{
{
Name: "host-etc-hosts",
- VolumeSource: api.VolumeSource{
- HostPath: &api.HostPathVolumeSource{
+ VolumeSource: v1.VolumeSource{
+ HostPath: &v1.HostPathVolumeSource{
Path: "/etc/hosts",
},
},
@@ -184,20 +184,19 @@ func (config *KubeletManagedHostConfig) createPodSpec(podName string) *api.Pod {
return pod
}
-func (config *KubeletManagedHostConfig) createPodSpecWithHostNetwork(podName string) *api.Pod {
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+func (config *KubeletManagedHostConfig) createPodSpecWithHostNetwork(podName string) *v1.Pod {
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
},
- Spec: api.PodSpec{
- SecurityContext: &api.PodSecurityContext{
- HostNetwork: true,
- },
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+HostNetwork: true,
+SecurityContext: &v1.PodSecurityContext{},
+ Containers: []v1.Container{
{
Name: "busybox-1",
Image: etcHostsImageName,
- ImagePullPolicy: api.PullIfNotPresent,
+ ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
@@ -206,7 +205,7 @@ func (config *KubeletManagedHostConfig) createPodSpecWithHostNetwork(podName str
{
Name: "busybox-2",
Image: etcHostsImageName,
- ImagePullPolicy: api.PullIfNotPresent,
+ ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"sleep",
"900",
diff --git a/test/e2e/common/pods.go b/test/e2e/common/pods.go
index 36ba4229eaf..bc49a7af816 100644
--- a/test/e2e/common/pods.go
+++ b/test/e2e/common/pods.go
@@ -26,7 +26,7 @@ import (
"golang.org/x/net/websocket"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/intstr"
@@ -46,7 +46,7 @@ var (
)
// testHostIP tests that a pod gets a host IP
-func testHostIP(podClient *framework.PodClient, pod *api.Pod) {
+func testHostIP(podClient *framework.PodClient, pod *v1.Pod) {
By("creating pod")
podClient.CreateSync(pod)
@@ -69,7 +69,7 @@ func testHostIP(podClient *framework.PodClient, pod *api.Pod) {
}
}
-func startPodAndGetBackOffs(podClient *framework.PodClient, pod *api.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) {
+func startPodAndGetBackOffs(podClient *framework.PodClient, pod *v1.Pod, sleepAmount time.Duration) (time.Duration, time.Duration) {
podClient.CreateSync(pod)
time.Sleep(sleepAmount)
Expect(pod.Spec.Containers).NotTo(BeEmpty())
@@ -102,7 +102,7 @@ func getRestartDelay(podClient *framework.PodClient, podName string, containerNa
time.Sleep(time.Second)
pod, err := podClient.Get(podName)
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
- status, ok := api.GetContainerStatus(pod.Status.ContainerStatuses, containerName)
+ status, ok := v1.GetContainerStatus(pod.Status.ContainerStatuses, containerName)
if !ok {
framework.Logf("getRestartDelay: status missing")
continue
@@ -127,12 +127,12 @@ var _ = framework.KubeDescribe("Pods", func() {
It("should get a host IP [Conformance]", func() {
name := "pod-hostip-" + string(uuid.NewUUID())
- testHostIP(podClient, &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ testHostIP(podClient, &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "test",
Image: framework.GetPauseImageName(f.ClientSet),
@@ -146,16 +146,16 @@ var _ = framework.KubeDescribe("Pods", func() {
By("creating the pod")
name := "pod-submit-remove-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "nginx",
Image: "gcr.io/google_containers/nginx-slim:0.7",
@@ -166,12 +166,12 @@ var _ = framework.KubeDescribe("Pods", func() {
By("setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
- options := api.ListOptions{LabelSelector: selector}
+ options := v1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(0))
- options = api.ListOptions{
- LabelSelector: selector,
+ options = v1.ListOptions{
+ LabelSelector: selector.String(),
ResourceVersion: pods.ListMeta.ResourceVersion,
}
w, err := podClient.Watch(options)
@@ -182,7 +182,7 @@ var _ = framework.KubeDescribe("Pods", func() {
By("verifying the pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
- options = api.ListOptions{LabelSelector: selector}
+ options = v1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(1))
@@ -206,7 +206,7 @@ var _ = framework.KubeDescribe("Pods", func() {
framework.Logf("running pod: %#v", pod)
By("deleting the pod gracefully")
- err = podClient.Delete(pod.Name, api.NewDeleteOptions(30))
+ err = podClient.Delete(pod.Name, v1.NewDeleteOptions(30))
Expect(err).NotTo(HaveOccurred(), "failed to delete pod")
By("verifying the kubelet observed the termination notice")
@@ -233,13 +233,13 @@ var _ = framework.KubeDescribe("Pods", func() {
By("verifying pod deletion was observed")
deleted := false
timeout := false
- var lastPod *api.Pod
+ var lastPod *v1.Pod
timer := time.After(30 * time.Second)
for !deleted && !timeout {
select {
case event, _ := <-w.ResultChan():
if event.Type == watch.Deleted {
- lastPod = event.Object.(*api.Pod)
+ lastPod = event.Object.(*v1.Pod)
deleted = true
}
case <-timer:
@@ -254,7 +254,7 @@ var _ = framework.KubeDescribe("Pods", func() {
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero())
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
- options = api.ListOptions{LabelSelector: selector}
+ options = v1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(0))
@@ -264,16 +264,16 @@ var _ = framework.KubeDescribe("Pods", func() {
By("creating the pod")
name := "pod-update-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "nginx",
Image: "gcr.io/google_containers/nginx-slim:0.7",
@@ -287,13 +287,13 @@ var _ = framework.KubeDescribe("Pods", func() {
By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
- options := api.ListOptions{LabelSelector: selector}
+ options := v1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(1))
By("updating the pod")
- podClient.Update(name, func(pod *api.Pod) {
+ podClient.Update(name, func(pod *v1.Pod) {
value = strconv.Itoa(time.Now().Nanosecond())
pod.Labels["time"] = value
})
@@ -302,7 +302,7 @@ var _ = framework.KubeDescribe("Pods", func() {
By("verifying the updated pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
- options = api.ListOptions{LabelSelector: selector}
+ options = v1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(1))
@@ -313,16 +313,16 @@ var _ = framework.KubeDescribe("Pods", func() {
By("creating the pod")
name := "pod-update-activedeadlineseconds-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "nginx",
Image: "gcr.io/google_containers/nginx-slim:0.7",
@@ -336,13 +336,13 @@ var _ = framework.KubeDescribe("Pods", func() {
By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
- options := api.ListOptions{LabelSelector: selector}
+ options := v1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(1))
By("updating the pod")
- podClient.Update(name, func(pod *api.Pod) {
+ podClient.Update(name, func(pod *v1.Pod) {
newDeadline := int64(5)
pod.Spec.ActiveDeadlineSeconds = &newDeadline
})
@@ -354,17 +354,17 @@ var _ = framework.KubeDescribe("Pods", func() {
// Make a pod that will be a service.
// This pod serves its hostname via HTTP.
serverName := "server-envvars-" + string(uuid.NewUUID())
- serverPod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ serverPod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: serverName,
Labels: map[string]string{"name": serverName},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "srv",
Image: "gcr.io/google_containers/serve_hostname:v1.4",
- Ports: []api.ContainerPort{{ContainerPort: 9376}},
+ Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},
},
@@ -379,15 +379,15 @@ var _ = framework.KubeDescribe("Pods", func() {
// to match the service. Another is to rethink environment variable names and possibly
// allow overriding the prefix in the service manifest.
svcName := "fooservice"
- svc := &api.Service{
- ObjectMeta: api.ObjectMeta{
+ svc := &v1.Service{
+ ObjectMeta: v1.ObjectMeta{
Name: svcName,
Labels: map[string]string{
"name": svcName,
},
},
- Spec: api.ServiceSpec{
- Ports: []api.ServicePort{{
+ Spec: v1.ServiceSpec{
+ Ports: []v1.ServicePort{{
Port: 8765,
TargetPort: intstr.FromInt(8080),
}},
@@ -402,20 +402,20 @@ var _ = framework.KubeDescribe("Pods", func() {
// Make a client pod that verifies that it has the service environment variables.
podName := "client-envvars-" + string(uuid.NewUUID())
const containerName = "env3cont"
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: containerName,
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"},
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
@@ -442,12 +442,12 @@ var _ = framework.KubeDescribe("Pods", func() {
By("creating the pod")
name := "pod-exec-websocket-" + string(uuid.NewUUID())
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "main",
Image: "gcr.io/google_containers/busybox:1.24",
@@ -512,12 +512,12 @@ var _ = framework.KubeDescribe("Pods", func() {
By("creating the pod")
name := "pod-logs-websocket-" + string(uuid.NewUUID())
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "main",
Image: "gcr.io/google_containers/busybox:1.24",
@@ -566,13 +566,13 @@ var _ = framework.KubeDescribe("Pods", func() {
It("should have their auto-restart back-off timer reset on image update [Slow]", func() {
podName := "pod-back-off-image"
containerName := "back-off"
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"test": "back-off-image"},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: containerName,
Image: "gcr.io/google_containers/busybox:1.24",
@@ -585,7 +585,7 @@ var _ = framework.KubeDescribe("Pods", func() {
delay1, delay2 := startPodAndGetBackOffs(podClient, pod, buildBackOffDuration)
By("updating the image")
- podClient.Update(podName, func(pod *api.Pod) {
+ podClient.Update(podName, func(pod *v1.Pod) {
pod.Spec.Containers[0].Image = "gcr.io/google_containers/nginx-slim:0.7"
})
@@ -607,13 +607,13 @@ var _ = framework.KubeDescribe("Pods", func() {
It("should cap back-off at MaxContainerBackOff [Slow]", func() {
podName := "back-off-cap"
containerName := "back-off-cap"
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"test": "liveness"},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: containerName,
Image: "gcr.io/google_containers/busybox:1.24",
diff --git a/test/e2e/common/privileged.go b/test/e2e/common/privileged.go
index a45d1c301de..95792be1d7d 100644
--- a/test/e2e/common/privileged.go
+++ b/test/e2e/common/privileged.go
@@ -23,7 +23,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
@@ -40,9 +40,9 @@ const (
)
type PrivilegedPodTestConfig struct {
- privilegedPod *api.Pod
+ privilegedPod *v1.Pod
f *framework.Framework
- hostExecPod *api.Pod
+ hostExecPod *v1.Pod
}
var _ = framework.KubeDescribe("PrivilegedPod", func() {
@@ -96,21 +96,21 @@ func (config *PrivilegedPodTestConfig) dialFromContainer(containerIP string, con
return output
}
-func (config *PrivilegedPodTestConfig) createPrivilegedPodSpec() *api.Pod {
+func (config *PrivilegedPodTestConfig) createPrivilegedPodSpec() *v1.Pod {
isPrivileged := true
notPrivileged := false
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: privilegedPodName,
Namespace: config.f.Namespace.Name,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: privilegedContainerName,
Image: privilegedContainerImage,
- ImagePullPolicy: api.PullIfNotPresent,
- SecurityContext: &api.SecurityContext{Privileged: &isPrivileged},
+ ImagePullPolicy: v1.PullIfNotPresent,
+ SecurityContext: &v1.SecurityContext{Privileged: &isPrivileged},
Command: []string{
"/netexec",
fmt.Sprintf("--http-port=%d", privilegedHttpPort),
@@ -120,8 +120,8 @@ func (config *PrivilegedPodTestConfig) createPrivilegedPodSpec() *api.Pod {
{
Name: notPrivilegedContainerName,
Image: privilegedContainerImage,
- ImagePullPolicy: api.PullIfNotPresent,
- SecurityContext: &api.SecurityContext{Privileged: ¬Privileged},
+ ImagePullPolicy: v1.PullIfNotPresent,
+ SecurityContext: &v1.SecurityContext{Privileged: ¬Privileged},
Command: []string{
"/netexec",
fmt.Sprintf("--http-port=%d", notPrivilegedHttpPort),
diff --git a/test/e2e/common/secrets.go b/test/e2e/common/secrets.go
index 5d3401ca846..196fc1ffd05 100644
--- a/test/e2e/common/secrets.go
+++ b/test/e2e/common/secrets.go
@@ -20,7 +20,7 @@ import (
"fmt"
"os"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
@@ -50,7 +50,7 @@ var _ = framework.KubeDescribe("Secrets", func() {
It("should be able to mount in a volume regardless of a different secret existing with same name in different namespace", func() {
var (
- namespace2 *api.Namespace
+ namespace2 *v1.Namespace
err error
secret2Name = "secret-test-" + string(uuid.NewUUID())
)
@@ -88,37 +88,37 @@ var _ = framework.KubeDescribe("Secrets", func() {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
},
- Spec: api.PodSpec{
- Volumes: []api.Volume{
+ Spec: v1.PodSpec{
+ Volumes: []v1.Volume{
{
Name: volumeName,
- VolumeSource: api.VolumeSource{
- Secret: &api.SecretVolumeSource{
+ VolumeSource: v1.VolumeSource{
+ Secret: &v1.SecretVolumeSource{
SecretName: name,
},
},
},
{
Name: volumeName2,
- VolumeSource: api.VolumeSource{
- Secret: &api.SecretVolumeSource{
+ VolumeSource: v1.VolumeSource{
+ Secret: &v1.SecretVolumeSource{
SecretName: name,
},
},
},
},
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{
"--file_content=/etc/secret-volume/data-1",
"--file_mode=/etc/secret-volume/data-1"},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
@@ -132,7 +132,7 @@ var _ = framework.KubeDescribe("Secrets", func() {
},
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
@@ -152,22 +152,22 @@ var _ = framework.KubeDescribe("Secrets", func() {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "secret-env-test",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sh", "-c", "env"},
- Env: []api.EnvVar{
+ Env: []v1.EnvVar{
{
Name: "SECRET_DATA",
- ValueFrom: &api.EnvVarSource{
- SecretKeyRef: &api.SecretKeySelector{
- LocalObjectReference: api.LocalObjectReference{
+ ValueFrom: &v1.EnvVarSource{
+ SecretKeyRef: &v1.SecretKeySelector{
+ LocalObjectReference: v1.LocalObjectReference{
Name: name,
},
Key: "data-1",
@@ -177,7 +177,7 @@ var _ = framework.KubeDescribe("Secrets", func() {
},
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
@@ -187,9 +187,9 @@ var _ = framework.KubeDescribe("Secrets", func() {
})
})
-func secretForTest(namespace, name string) *api.Secret {
- return &api.Secret{
- ObjectMeta: api.ObjectMeta{
+func secretForTest(namespace, name string) *v1.Secret {
+ return &v1.Secret{
+ ObjectMeta: v1.ObjectMeta{
Namespace: namespace,
Name: name,
},
@@ -214,30 +214,30 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
Namespace: f.Namespace.Name,
},
- Spec: api.PodSpec{
- Volumes: []api.Volume{
+ Spec: v1.PodSpec{
+ Volumes: []v1.Volume{
{
Name: volumeName,
- VolumeSource: api.VolumeSource{
- Secret: &api.SecretVolumeSource{
+ VolumeSource: v1.VolumeSource{
+ Secret: &v1.SecretVolumeSource{
SecretName: secretName,
},
},
},
},
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{
"--file_content=/etc/secret-volume/data-1",
"--file_mode=/etc/secret-volume/data-1"},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
@@ -245,7 +245,7 @@ func doSecretE2EWithoutMapping(f *framework.Framework, defaultMode *int32, secre
},
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
@@ -279,18 +279,18 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
},
- Spec: api.PodSpec{
- Volumes: []api.Volume{
+ Spec: v1.PodSpec{
+ Volumes: []v1.Volume{
{
Name: volumeName,
- VolumeSource: api.VolumeSource{
- Secret: &api.SecretVolumeSource{
+ VolumeSource: v1.VolumeSource{
+ Secret: &v1.SecretVolumeSource{
SecretName: name,
- Items: []api.KeyToPath{
+ Items: []v1.KeyToPath{
{
Key: "data-1",
Path: "new-path-data-1",
@@ -300,14 +300,14 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
},
},
},
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: "secret-volume-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{
"--file_content=/etc/secret-volume/new-path-data-1",
"--file_mode=/etc/secret-volume/new-path-data-1"},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
@@ -315,7 +315,7 @@ func doSecretE2EWithMapping(f *framework.Framework, mode *int32) {
},
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
diff --git a/test/e2e/common/sysctl.go b/test/e2e/common/sysctl.go
index 5aa089f989f..2dbf5228e74 100644
--- a/test/e2e/common/sysctl.go
+++ b/test/e2e/common/sysctl.go
@@ -19,7 +19,7 @@ package common
import (
"fmt"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/sysctl"
"k8s.io/kubernetes/pkg/util/uuid"
@@ -34,29 +34,29 @@ var _ = framework.KubeDescribe("Sysctls", func() {
f := framework.NewDefaultFramework("sysctl")
var podClient *framework.PodClient
- testPod := func() *api.Pod {
+ testPod := func() *v1.Pod {
podName := "sysctl-" + string(uuid.NewUUID())
- pod := api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
Annotations: map[string]string{},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "test-container",
Image: "gcr.io/google_containers/busybox:1.24",
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
return &pod
}
- waitForPodErrorEventOrStarted := func(pod *api.Pod) (*api.Event, error) {
- var ev *api.Event
+ waitForPodErrorEventOrStarted := func(pod *v1.Pod) (*v1.Event, error) {
+ var ev *v1.Event
err := wait.Poll(framework.Poll, framework.PodStartTimeout, func() (bool, error) {
evnts, err := f.ClientSet.Core().Events(pod.Namespace).Search(pod)
if err != nil {
@@ -82,7 +82,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
It("should support sysctls", func() {
pod := testPod()
- pod.Annotations[api.SysctlsPodAnnotationKey] = api.PodAnnotationsFromSysctls([]api.Sysctl{
+ pod.Annotations[v1.SysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "kernel.shm_rmid_forced",
Value: "1",
@@ -111,7 +111,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
Expect(err).NotTo(HaveOccurred())
By("Checking that the pod succeeded")
- Expect(pod.Status.Phase).To(Equal(api.PodSucceeded))
+ Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
By("Getting logs from the pod")
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
@@ -123,7 +123,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
It("should support unsafe sysctls which are actually whitelisted", func() {
pod := testPod()
- pod.Annotations[api.UnsafeSysctlsPodAnnotationKey] = api.PodAnnotationsFromSysctls([]api.Sysctl{
+ pod.Annotations[v1.UnsafeSysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "kernel.shm_rmid_forced",
Value: "1",
@@ -152,7 +152,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
Expect(err).NotTo(HaveOccurred())
By("Checking that the pod succeeded")
- Expect(pod.Status.Phase).To(Equal(api.PodSucceeded))
+ Expect(pod.Status.Phase).To(Equal(v1.PodSucceeded))
By("Getting logs from the pod")
log, err := framework.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, pod.Spec.Containers[0].Name)
@@ -164,7 +164,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
It("should reject invalid sysctls", func() {
pod := testPod()
- pod.Annotations[api.SysctlsPodAnnotationKey] = api.PodAnnotationsFromSysctls([]api.Sysctl{
+ pod.Annotations[v1.SysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "foo-",
Value: "bar",
@@ -178,7 +178,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
Value: "100000000",
},
})
- pod.Annotations[api.UnsafeSysctlsPodAnnotationKey] = api.PodAnnotationsFromSysctls([]api.Sysctl{
+ pod.Annotations[v1.UnsafeSysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "kernel.shmall",
Value: "100000000",
@@ -206,7 +206,7 @@ var _ = framework.KubeDescribe("Sysctls", func() {
It("should not launch unsafe, but not explicitly enabled sysctls on the node", func() {
pod := testPod()
- pod.Annotations[api.SysctlsPodAnnotationKey] = api.PodAnnotationsFromSysctls([]api.Sysctl{
+ pod.Annotations[v1.SysctlsPodAnnotationKey] = v1.PodAnnotationsFromSysctls([]v1.Sysctl{
{
Name: "kernel.msgmax",
Value: "10000000000",
diff --git a/test/e2e/common/volumes.go b/test/e2e/common/volumes.go
index 57ebe961dd7..532d363a587 100644
--- a/test/e2e/common/volumes.go
+++ b/test/e2e/common/volumes.go
@@ -47,10 +47,10 @@ import (
"strings"
"time"
- "k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/test/e2e/framework"
"github.com/golang/glog"
@@ -79,31 +79,31 @@ type VolumeTestConfig struct {
// Starts a container specified by config.serverImage and exports all
// config.serverPorts from it. The returned pod should be used to get the server
// IP address and create appropriate VolumeSource.
-func startVolumeServer(f *framework.Framework, config VolumeTestConfig) *api.Pod {
+func startVolumeServer(f *framework.Framework, config VolumeTestConfig) *v1.Pod {
podClient := f.PodClient()
portCount := len(config.serverPorts)
- serverPodPorts := make([]api.ContainerPort, portCount)
+ serverPodPorts := make([]v1.ContainerPort, portCount)
for i := 0; i < portCount; i++ {
portName := fmt.Sprintf("%s-%d", config.prefix, i)
- serverPodPorts[i] = api.ContainerPort{
+ serverPodPorts[i] = v1.ContainerPort{
Name: portName,
ContainerPort: int32(config.serverPorts[i]),
- Protocol: api.ProtocolTCP,
+ Protocol: v1.ProtocolTCP,
}
}
volumeCount := len(config.volumes)
- volumes := make([]api.Volume, volumeCount)
- mounts := make([]api.VolumeMount, volumeCount)
+ volumes := make([]v1.Volume, volumeCount)
+ mounts := make([]v1.VolumeMount, volumeCount)
i := 0
for src, dst := range config.volumes {
mountName := fmt.Sprintf("path%d", i)
volumes[i].Name = mountName
- volumes[i].VolumeSource.HostPath = &api.HostPathVolumeSource{
+ volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
Path: src,
}
@@ -117,24 +117,24 @@ func startVolumeServer(f *framework.Framework, config VolumeTestConfig) *api.Pod
By(fmt.Sprint("creating ", config.prefix, " server pod"))
privileged := new(bool)
*privileged = true
- serverPod := &api.Pod{
+ serverPod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-server",
Labels: map[string]string{
"role": config.prefix + "-server",
},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: config.prefix + "-server",
Image: config.serverImage,
- SecurityContext: &api.SecurityContext{
+ SecurityContext: &v1.SecurityContext{
Privileged: privileged,
},
Args: config.serverArgs,
@@ -191,21 +191,21 @@ func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) {
// Start a client pod using given VolumeSource (exported by startVolumeServer())
// and check that the pod sees the data from the server pod.
-func testVolumeClient(f *framework.Framework, config VolumeTestConfig, volume api.VolumeSource, fsGroup *int64, expectedContent string) {
+func testVolumeClient(f *framework.Framework, config VolumeTestConfig, volume v1.VolumeSource, fsGroup *int64, expectedContent string) {
By(fmt.Sprint("starting ", config.prefix, " client"))
- clientPod := &api.Pod{
+ clientPod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-client",
Labels: map[string]string{
"role": config.prefix + "-client",
},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: config.prefix + "-client",
Image: "gcr.io/google_containers/busybox:1.24",
@@ -218,7 +218,7 @@ func testVolumeClient(f *framework.Framework, config VolumeTestConfig, volume ap
"-c",
"while true ; do cat /opt/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done ",
},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: config.prefix + "-volume",
MountPath: "/opt/",
@@ -226,12 +226,12 @@ func testVolumeClient(f *framework.Framework, config VolumeTestConfig, volume ap
},
},
},
- SecurityContext: &api.PodSecurityContext{
- SELinuxOptions: &api.SELinuxOptions{
+ SecurityContext: &v1.PodSecurityContext{
+ SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1",
},
},
- Volumes: []api.Volume{
+ Volumes: []v1.Volume{
{
Name: config.prefix + "-volume",
VolumeSource: volume,
@@ -265,29 +265,29 @@ func testVolumeClient(f *framework.Framework, config VolumeTestConfig, volume ap
// Insert index.html with given content into given volume. It does so by
// starting and auxiliary pod which writes the file there.
// The volume must be writable.
-func injectHtml(client clientset.Interface, config VolumeTestConfig, volume api.VolumeSource, content string) {
+func injectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.VolumeSource, content string) {
By(fmt.Sprint("starting ", config.prefix, " injector"))
podClient := client.Core().Pods(config.namespace)
- injectPod := &api.Pod{
+ injectPod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-injector",
Labels: map[string]string{
"role": config.prefix + "-injector",
},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: config.prefix + "-injector",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh"},
Args: []string{"-c", "echo '" + content + "' > /mnt/index.html && chmod o+rX /mnt /mnt/index.html"},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: config.prefix + "-volume",
MountPath: "/mnt",
@@ -295,13 +295,13 @@ func injectHtml(client clientset.Interface, config VolumeTestConfig, volume api.
},
},
},
- SecurityContext: &api.PodSecurityContext{
- SELinuxOptions: &api.SELinuxOptions{
+ SecurityContext: &v1.PodSecurityContext{
+ SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1",
},
},
- RestartPolicy: api.RestartPolicyNever,
- Volumes: []api.Volume{
+ RestartPolicy: v1.RestartPolicyNever,
+ Volumes: []v1.Volume{
{
Name: config.prefix + "-volume",
VolumeSource: volume,
@@ -350,7 +350,7 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
// note that namespace deletion is handled by delete-namespace flag
clean := true
// filled in BeforeEach
- var namespace *api.Namespace
+ var namespace *v1.Namespace
BeforeEach(func() {
if !isTestEnabled(f.ClientSet) {
@@ -381,8 +381,8 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
serverIP := pod.Status.PodIP
framework.Logf("NFS server IP address: %v", serverIP)
- volume := api.VolumeSource{
- NFS: &api.NFSVolumeSource{
+ volume := v1.VolumeSource{
+ NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/",
ReadOnly: true,
@@ -416,26 +416,26 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
framework.Logf("Gluster server IP address: %v", serverIP)
// create Endpoints for the server
- endpoints := api.Endpoints{
+ endpoints := v1.Endpoints{
TypeMeta: unversioned.TypeMeta{
Kind: "Endpoints",
APIVersion: "v1",
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-server",
},
- Subsets: []api.EndpointSubset{
+ Subsets: []v1.EndpointSubset{
{
- Addresses: []api.EndpointAddress{
+ Addresses: []v1.EndpointAddress{
{
IP: serverIP,
},
},
- Ports: []api.EndpointPort{
+ Ports: []v1.EndpointPort{
{
Name: "gluster",
Port: 24007,
- Protocol: api.ProtocolTCP,
+ Protocol: v1.ProtocolTCP,
},
},
},
@@ -454,8 +454,8 @@ var _ = framework.KubeDescribe("GCP Volumes", func() {
framework.Failf("Failed to create endpoints for Gluster server: %v", err)
}
- volume := api.VolumeSource{
- Glusterfs: &api.GlusterfsVolumeSource{
+ volume := v1.VolumeSource{
+ Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: config.prefix + "-server",
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
Path: "test_vol",
diff --git a/test/e2e/cronjob.go b/test/e2e/cronjob.go
index 7964e162c9f..4b85689eab4 100644
--- a/test/e2e/cronjob.go
+++ b/test/e2e/cronjob.go
@@ -23,10 +23,11 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
- "k8s.io/kubernetes/pkg/apis/batch"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ batchv1 "k8s.io/kubernetes/pkg/apis/batch/v1"
+ batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/controller/job"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
@@ -62,7 +63,7 @@ var _ = framework.KubeDescribe("CronJob", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring at least two running jobs exists by listing jobs explicitly")
- jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{})
+ jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
activeJobs := filterActiveJobs(jobs)
Expect(len(activeJobs) >= 2).To(BeTrue())
@@ -85,7 +86,7 @@ var _ = framework.KubeDescribe("CronJob", func() {
Expect(err).To(HaveOccurred())
By("Ensuring no job exists by listing jobs explicitly")
- jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{})
+ jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(jobs.Items).To(HaveLen(0))
@@ -111,7 +112,7 @@ var _ = framework.KubeDescribe("CronJob", func() {
Expect(cronJob.Status.Active).Should(HaveLen(1))
By("Ensuring exaclty one running job exists by listing jobs explicitly")
- jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{})
+ jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
activeJobs := filterActiveJobs(jobs)
Expect(activeJobs).To(HaveLen(1))
@@ -142,7 +143,7 @@ var _ = framework.KubeDescribe("CronJob", func() {
Expect(cronJob.Status.Active).Should(HaveLen(1))
By("Ensuring exaclty one running job exists by listing jobs explicitly")
- jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(api.ListOptions{})
+ jobs, err := f.ClientSet.Batch().Jobs(f.Namespace.Name).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
activeJobs := filterActiveJobs(jobs)
Expect(activeJobs).To(HaveLen(1))
@@ -184,7 +185,7 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batch.ConcurrencyPo
parallelism := int32(1)
completions := int32(1)
sj := &batch.CronJob{
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
Spec: batch.CronJobSpec{
@@ -194,22 +195,22 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batch.ConcurrencyPo
Spec: batch.JobSpec{
Parallelism: ¶llelism,
Completions: &completions,
- Template: api.PodTemplateSpec{
- Spec: api.PodSpec{
- RestartPolicy: api.RestartPolicyOnFailure,
- Volumes: []api.Volume{
+ Template: v1.PodTemplateSpec{
+ Spec: v1.PodSpec{
+ RestartPolicy: v1.RestartPolicyOnFailure,
+ Volumes: []v1.Volume{
{
Name: "data",
- VolumeSource: api.VolumeSource{
- EmptyDir: &api.EmptyDirVolumeSource{},
+ VolumeSource: v1.VolumeSource{
+ EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: "c",
Image: "gcr.io/google_containers/busybox:1.24",
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
MountPath: "/data",
Name: "data",
@@ -230,21 +231,21 @@ func newTestCronJob(name, schedule string, concurrencyPolicy batch.ConcurrencyPo
}
func createCronJob(c clientset.Interface, ns string, cronJob *batch.CronJob) (*batch.CronJob, error) {
- return c.Batch().CronJobs(ns).Create(cronJob)
+ return c.BatchV2alpha1().CronJobs(ns).Create(cronJob)
}
func getCronJob(c clientset.Interface, ns, name string) (*batch.CronJob, error) {
- return c.Batch().CronJobs(ns).Get(name)
+ return c.BatchV2alpha1().CronJobs(ns).Get(name)
}
func deleteCronJob(c clientset.Interface, ns, name string) error {
- return c.Batch().CronJobs(ns).Delete(name, nil)
+ return c.BatchV2alpha1().CronJobs(ns).Delete(name, nil)
}
// Wait for at least given amount of active jobs.
func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
- curr, err := c.Batch().CronJobs(ns).Get(cronJobName)
+ curr, err := c.BatchV2alpha1().CronJobs(ns).Get(cronJobName)
if err != nil {
return false, err
}
@@ -255,7 +256,7 @@ func waitForActiveJobs(c clientset.Interface, ns, cronJobName string, active int
// Wait for no jobs to appear.
func waitForNoJobs(c clientset.Interface, ns, jobName string) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
- curr, err := c.Batch().CronJobs(ns).Get(jobName)
+ curr, err := c.BatchV2alpha1().CronJobs(ns).Get(jobName)
if err != nil {
return false, err
}
@@ -267,7 +268,7 @@ func waitForNoJobs(c clientset.Interface, ns, jobName string) error {
// Wait for a job to be replaced with a new one.
func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
- jobs, err := c.Batch().Jobs(ns).List(api.ListOptions{})
+ jobs, err := c.Batch().Jobs(ns).List(v1.ListOptions{})
if err != nil {
return false, err
}
@@ -284,7 +285,7 @@ func waitForJobReplaced(c clientset.Interface, ns, previousJobName string) error
// waitForJobsAtLeast waits for at least a number of jobs to appear.
func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
- jobs, err := c.Batch().Jobs(ns).List(api.ListOptions{})
+ jobs, err := c.Batch().Jobs(ns).List(v1.ListOptions{})
if err != nil {
return false, err
}
@@ -295,7 +296,7 @@ func waitForJobsAtLeast(c clientset.Interface, ns string, atLeast int) error {
// waitForAnyFinishedJob waits for any completed job to appear.
func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
return wait.Poll(framework.Poll, cronJobTimeout, func() (bool, error) {
- jobs, err := c.Batch().Jobs(ns).List(api.ListOptions{})
+ jobs, err := c.Batch().Jobs(ns).List(v1.ListOptions{})
if err != nil {
return false, err
}
@@ -311,7 +312,7 @@ func waitForAnyFinishedJob(c clientset.Interface, ns string) error {
// checkNoUnexpectedEvents checks unexpected events didn't happen.
// Currently only "UnexpectedJob" is checked.
func checkNoUnexpectedEvents(c clientset.Interface, ns, cronJobName string) error {
- sj, err := c.Batch().CronJobs(ns).Get(cronJobName)
+ sj, err := c.BatchV2alpha1().CronJobs(ns).Get(cronJobName)
if err != nil {
return fmt.Errorf("error in getting cronjob %s/%s: %v", ns, cronJobName, err)
}
@@ -327,7 +328,7 @@ func checkNoUnexpectedEvents(c clientset.Interface, ns, cronJobName string) erro
return nil
}
-func filterActiveJobs(jobs *batch.JobList) (active []*batch.Job) {
+func filterActiveJobs(jobs *batchv1.JobList) (active []*batchv1.Job) {
for i := range jobs.Items {
j := jobs.Items[i]
if !job.IsJobFinished(&j) {
diff --git a/test/e2e/daemon_restart.go b/test/e2e/daemon_restart.go
index 728d8229f3f..1d646877f39 100644
--- a/test/e2e/daemon_restart.go
+++ b/test/e2e/daemon_restart.go
@@ -21,9 +21,9 @@ import (
"strconv"
"time"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/runtime"
@@ -134,8 +134,8 @@ type podTracker struct {
cache.ThreadSafeStore
}
-func (p *podTracker) remember(pod *api.Pod, eventType string) {
- if eventType == UPDATE && pod.Status.Phase == api.PodRunning {
+func (p *podTracker) remember(pod *v1.Pod, eventType string) {
+ if eventType == UPDATE && pod.Status.Phase == v1.PodRunning {
return
}
p.Add(fmt.Sprintf("[%v] %v: %v", time.Now(), eventType, pod.Name), pod)
@@ -147,7 +147,7 @@ func (p *podTracker) String() (msg string) {
if !exists {
continue
}
- pod := obj.(*api.Pod)
+ pod := obj.(*v1.Pod)
msg += fmt.Sprintf("%v Phase %v Host %v\n", k, pod.Status.Phase, pod.Spec.NodeName)
}
return
@@ -159,7 +159,7 @@ func newPodTracker() *podTracker {
}
// replacePods replaces content of the store with the given pods.
-func replacePods(pods []*api.Pod, store cache.Store) {
+func replacePods(pods []*v1.Pod, store cache.Store) {
found := make([]interface{}, 0, len(pods))
for i := range pods {
found = append(found, pods[i])
@@ -170,7 +170,7 @@ func replacePods(pods []*api.Pod, store cache.Store) {
// getContainerRestarts returns the count of container restarts across all pods matching the given labelSelector,
// and a list of nodenames across which these containers restarted.
func getContainerRestarts(c clientset.Interface, ns string, labelSelector labels.Selector) (int, []string) {
- options := api.ListOptions{LabelSelector: labelSelector}
+ options := v1.ListOptions{LabelSelector: labelSelector.String()}
pods, err := c.Core().Pods(ns).List(options)
framework.ExpectNoError(err)
failedContainers := 0
@@ -205,12 +205,13 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
// All the restart tests need an rc and a watch on pods of the rc.
// Additionally some of them might scale the rc during the test.
config = testutils.RCConfig{
- Client: f.ClientSet,
- Name: rcName,
- Namespace: ns,
- Image: framework.GetPauseImageName(f.ClientSet),
- Replicas: numPods,
- CreatedPods: &[]*api.Pod{},
+ Client: f.ClientSet,
+ InternalClient: f.InternalClientset,
+ Name: rcName,
+ Namespace: ns,
+ Image: framework.GetPauseImageName(f.ClientSet),
+ Replicas: numPods,
+ CreatedPods: &[]*v1.Pod{},
}
Expect(framework.RunRC(config)).NotTo(HaveOccurred())
replacePods(*config.CreatedPods, existingPods)
@@ -219,27 +220,27 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
tracker = newPodTracker()
newPods, controller = cache.NewInformer(
&cache.ListWatch{
- ListFunc: func(options api.ListOptions) (runtime.Object, error) {
- options.LabelSelector = labelSelector
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ options.LabelSelector = labelSelector.String()
obj, err := f.ClientSet.Core().Pods(ns).List(options)
return runtime.Object(obj), err
},
- WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
- options.LabelSelector = labelSelector
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ options.LabelSelector = labelSelector.String()
return f.ClientSet.Core().Pods(ns).Watch(options)
},
},
- &api.Pod{},
+ &v1.Pod{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
- tracker.remember(obj.(*api.Pod), ADD)
+ tracker.remember(obj.(*v1.Pod), ADD)
},
UpdateFunc: func(oldObj, newObj interface{}) {
- tracker.remember(newObj.(*api.Pod), UPDATE)
+ tracker.remember(newObj.(*v1.Pod), UPDATE)
},
DeleteFunc: func(obj interface{}) {
- tracker.remember(obj.(*api.Pod), DEL)
+ tracker.remember(obj.(*v1.Pod), DEL)
},
},
)
@@ -263,7 +264,7 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
// that it had the opportunity to create/delete pods, if it were going to do so. Scaling the RC
// to the same size achieves this, because the scale operation advances the RC's sequence number
// and awaits it to be observed and reported back in the RC's status.
- framework.ScaleRC(f.ClientSet, ns, rcName, numPods, true)
+ framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods, true)
// Only check the keys, the pods can be different if the kubelet updated it.
// TODO: Can it really?
@@ -294,9 +295,9 @@ var _ = framework.KubeDescribe("DaemonRestart [Disruptive]", func() {
restarter.kill()
// This is best effort to try and create pods while the scheduler is down,
// since we don't know exactly when it is restarted after the kill signal.
- framework.ExpectNoError(framework.ScaleRC(f.ClientSet, ns, rcName, numPods+5, false))
+ framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods+5, false))
restarter.waitUp()
- framework.ExpectNoError(framework.ScaleRC(f.ClientSet, ns, rcName, numPods+5, true))
+ framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, rcName, numPods+5, true))
})
It("Kubelet should not restart containers across restart", func() {
diff --git a/test/e2e/daemon_set.go b/test/e2e/daemon_set.go
index aa0f59aad32..7b73001a2df 100644
--- a/test/e2e/daemon_set.go
+++ b/test/e2e/daemon_set.go
@@ -25,9 +25,11 @@ import (
"k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
- "k8s.io/kubernetes/pkg/apis/extensions"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
+ extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
@@ -58,12 +60,12 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
var f *framework.Framework
AfterEach(func() {
- if daemonsets, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).List(api.ListOptions{}); err == nil {
+ if daemonsets, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).List(v1.ListOptions{}); err == nil {
framework.Logf("daemonset: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), daemonsets))
} else {
framework.Logf("unable to dump daemonsets: %v", err)
}
- if pods, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(api.ListOptions{}); err == nil {
+ if pods, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(v1.ListOptions{}); err == nil {
framework.Logf("pods: %s", runtime.EncodeOrDie(api.Codecs.LegacyCodec(registered.EnabledVersions()...), pods))
} else {
framework.Logf("unable to dump pods: %v", err)
@@ -93,20 +95,20 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
framework.Logf("Creating simple daemon set %s", dsName)
_, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: dsName,
},
Spec: extensions.DaemonSetSpec{
- Template: api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
+ Template: v1.PodTemplateSpec{
+ ObjectMeta: v1.ObjectMeta{
Labels: label,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: dsName,
Image: image,
- Ports: []api.ContainerPort{{ContainerPort: 9376}},
+ Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},
},
@@ -116,7 +118,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("Check that reaper kills all daemon pods for %s", dsName)
- dsReaper, err := kubectl.ReaperFor(extensions.Kind("DaemonSet"), f.ClientSet)
+ dsReaper, err := kubectl.ReaperFor(extensionsinternal.Kind("DaemonSet"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred())
err = dsReaper.Stop(ns, dsName, 0, nil)
Expect(err).NotTo(HaveOccurred())
@@ -135,7 +137,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
podClient := c.Core().Pods(ns)
selector := labels.Set(label).AsSelector()
- options := api.ListOptions{LabelSelector: selector}
+ options := v1.ListOptions{LabelSelector: selector.String()}
podList, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred())
Expect(len(podList.Items)).To(BeNumerically(">", 0))
@@ -152,22 +154,22 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
framework.Logf("Creating daemon with a node selector %s", dsName)
_, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: dsName,
},
Spec: extensions.DaemonSetSpec{
Selector: &unversioned.LabelSelector{MatchLabels: complexLabel},
- Template: api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
+ Template: v1.PodTemplateSpec{
+ ObjectMeta: v1.ObjectMeta{
Labels: complexLabel,
},
- Spec: api.PodSpec{
+ Spec: v1.PodSpec{
NodeSelector: nodeSelector,
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: dsName,
Image: image,
- Ports: []api.ContainerPort{{ContainerPort: 9376}},
+ Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},
},
@@ -208,7 +210,7 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
nodeSelector := map[string]string{daemonsetColorLabel: "blue"}
framework.Logf("Creating daemon with a node affinity %s", dsName)
affinity := map[string]string{
- api.AffinityAnnotationKey: fmt.Sprintf(`
+ v1.AffinityAnnotationKey: fmt.Sprintf(`
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [{
"matchExpressions": [{
@@ -220,22 +222,22 @@ var _ = framework.KubeDescribe("Daemon set [Serial]", func() {
}}}`, daemonsetColorLabel, nodeSelector[daemonsetColorLabel]),
}
_, err := c.Extensions().DaemonSets(ns).Create(&extensions.DaemonSet{
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: dsName,
},
Spec: extensions.DaemonSetSpec{
Selector: &unversioned.LabelSelector{MatchLabels: complexLabel},
- Template: api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
+ Template: v1.PodTemplateSpec{
+ ObjectMeta: v1.ObjectMeta{
Labels: complexLabel,
Annotations: affinity,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: dsName,
Image: image,
- Ports: []api.ContainerPort{{ContainerPort: 9376}},
+ Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},
},
@@ -296,9 +298,9 @@ func clearDaemonSetNodeLabels(c clientset.Interface) error {
return nil
}
-func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*api.Node, error) {
+func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[string]string) (*v1.Node, error) {
nodeClient := c.Core().Nodes()
- var newNode *api.Node
+ var newNode *v1.Node
var newLabels map[string]string
err := wait.Poll(dsRetryPeriod, dsRetryTimeout, func() (bool, error) {
node, err := nodeClient.Get(nodeName)
@@ -339,7 +341,7 @@ func setDaemonSetNodeLabels(c clientset.Interface, nodeName string, labels map[s
func checkDaemonPodOnNodes(f *framework.Framework, selector map[string]string, nodeNames []string) func() (bool, error) {
return func() (bool, error) {
selector := labels.Set(selector).AsSelector()
- options := api.ListOptions{LabelSelector: selector}
+ options := v1.ListOptions{LabelSelector: selector.String()}
podList, err := f.ClientSet.Core().Pods(f.Namespace.Name).List(options)
if err != nil {
return false, nil
@@ -368,7 +370,7 @@ func checkDaemonPodOnNodes(f *framework.Framework, selector map[string]string, n
func checkRunningOnAllNodes(f *framework.Framework, selector map[string]string) func() (bool, error) {
return func() (bool, error) {
- nodeList, err := f.ClientSet.Core().Nodes().List(api.ListOptions{})
+ nodeList, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{})
framework.ExpectNoError(err)
nodeNames := make([]string, 0)
for _, node := range nodeList.Items {
@@ -385,7 +387,7 @@ func checkRunningOnNoNodes(f *framework.Framework, selector map[string]string) f
func checkDaemonStatus(f *framework.Framework, dsName string) error {
ds, err := f.ClientSet.Extensions().DaemonSets(f.Namespace.Name).Get(dsName)
if err != nil {
- return fmt.Errorf("Could not get daemon set from api.")
+ return fmt.Errorf("Could not get daemon set from v1.")
}
desired, scheduled, ready := ds.Status.DesiredNumberScheduled, ds.Status.CurrentNumberScheduled, ds.Status.NumberReady
if desired != scheduled && desired != ready {
diff --git a/test/e2e/density.go b/test/e2e/density.go
index 93c8fc20e27..3e0aa558e26 100644
--- a/test/e2e/density.go
+++ b/test/e2e/density.go
@@ -28,8 +28,10 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
@@ -54,10 +56,11 @@ const (
var MaxContainerFailures = 0
type DensityTestConfig struct {
- Configs []testutils.RCConfig
- ClientSet internalclientset.Interface
- PollInterval time.Duration
- PodCount int
+ Configs []testutils.RCConfig
+ ClientSet clientset.Interface
+ InternalClientset internalclientset.Interface
+ PollInterval time.Duration
+ PodCount int
}
func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceConstraint {
@@ -159,9 +162,9 @@ func density30AddonResourceVerifier(numNodes int) map[string]framework.ResourceC
return constraints
}
-func logPodStartupStatus(c internalclientset.Interface, expectedPods int, observedLabels map[string]string, period time.Duration, stopCh chan struct{}) {
+func logPodStartupStatus(c clientset.Interface, expectedPods int, observedLabels map[string]string, period time.Duration, stopCh chan struct{}) {
label := labels.SelectorFromSet(labels.Set(observedLabels))
- podStore := testutils.NewPodStore(c, api.NamespaceAll, label, fields.Everything())
+ podStore := testutils.NewPodStore(c, v1.NamespaceAll, label, fields.Everything())
defer podStore.Stop()
ticker := time.NewTicker(period)
defer ticker.Stop()
@@ -209,7 +212,7 @@ func runDensityTest(dtc DensityTestConfig) time.Duration {
// Print some data about Pod to Node allocation
By("Printing Pod to Node allocation data")
- podList, err := dtc.ClientSet.Core().Pods(api.NamespaceAll).List(api.ListOptions{})
+ podList, err := dtc.ClientSet.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{})
framework.ExpectNoError(err)
pausePodAllocation := make(map[string]int)
systemPodAllocation := make(map[string][]string)
@@ -238,14 +241,14 @@ func cleanupDensityTest(dtc DensityTestConfig) {
for i := range dtc.Configs {
rcName := dtc.Configs[i].Name
rc, err := dtc.ClientSet.Core().ReplicationControllers(dtc.Configs[i].Namespace).Get(rcName)
- if err == nil && rc.Spec.Replicas != 0 {
+ if err == nil && *(rc.Spec.Replicas) != 0 {
if framework.TestContext.GarbageCollectorEnabled {
By("Cleaning up only the replication controller, garbage collector will clean up the pods")
err := framework.DeleteRCAndWaitForGC(dtc.ClientSet, dtc.Configs[i].Namespace, rcName)
framework.ExpectNoError(err)
} else {
By("Cleaning up the replication controller and pods")
- err := framework.DeleteRCAndPods(dtc.ClientSet, dtc.Configs[i].Namespace, rcName)
+ err := framework.DeleteRCAndPods(dtc.ClientSet, dtc.InternalClientset, dtc.Configs[i].Namespace, rcName)
framework.ExpectNoError(err)
}
}
@@ -260,7 +263,7 @@ func cleanupDensityTest(dtc DensityTestConfig) {
// results will not be representative for control-plane performance as we'll start hitting
// limits on Docker's concurrent container startup.
var _ = framework.KubeDescribe("Density", func() {
- var c internalclientset.Interface
+ var c clientset.Interface
var nodeCount int
var RCName string
var additionalPodsPrefix string
@@ -270,7 +273,7 @@ var _ = framework.KubeDescribe("Density", func() {
var totalPods int
var nodeCpuCapacity int64
var nodeMemCapacity int64
- var nodes *api.NodeList
+ var nodes *v1.NodeList
var masters sets.String
// Gathers data prior to framework namespace teardown
@@ -332,10 +335,10 @@ var _ = framework.KubeDescribe("Density", func() {
for _, node := range nodes.Items {
var internalIP, externalIP string
for _, address := range node.Status.Addresses {
- if address.Type == api.NodeInternalIP {
+ if address.Type == v1.NodeInternalIP {
internalIP = address.Address
}
- if address.Type == api.NodeExternalIP {
+ if address.Type == v1.NodeExternalIP {
externalIP = address.Address
}
}
@@ -399,12 +402,13 @@ var _ = framework.KubeDescribe("Density", func() {
podThroughput := 20
timeout := time.Duration(totalPods/podThroughput)*time.Second + 3*time.Minute
// createClients is defined in load.go
- clients, err := createClients(numberOfRCs)
+ clients, internalClients, err := createClients(numberOfRCs)
for i := 0; i < numberOfRCs; i++ {
RCName := fmt.Sprintf("density%v-%v-%v", totalPods, i, uuid)
nsName := namespaces[i].Name
RCConfigs[i] = testutils.RCConfig{
Client: clients[i],
+ InternalClient: internalClients[i],
Image: framework.GetPauseImageName(f.ClientSet),
Name: RCName,
Namespace: nsName,
@@ -421,10 +425,11 @@ var _ = framework.KubeDescribe("Density", func() {
}
dConfig := DensityTestConfig{
- ClientSet: f.ClientSet,
- Configs: RCConfigs,
- PodCount: totalPods,
- PollInterval: DensityPollInterval,
+ ClientSet: f.ClientSet,
+ InternalClientset: f.InternalClientset,
+ Configs: RCConfigs,
+ PodCount: totalPods,
+ PollInterval: DensityPollInterval,
}
e2eStartupTime = runDensityTest(dConfig)
if itArg.runLatencyTest {
@@ -437,12 +442,12 @@ var _ = framework.KubeDescribe("Density", func() {
watchTimes := make(map[string]unversioned.Time, 0)
var mutex sync.Mutex
- checkPod := func(p *api.Pod) {
+ checkPod := func(p *v1.Pod) {
mutex.Lock()
defer mutex.Unlock()
defer GinkgoRecover()
- if p.Status.Phase == api.PodRunning {
+ if p.Status.Phase == v1.PodRunning {
if _, found := watchTimes[p.Name]; !found {
watchTimes[p.Name] = unversioned.Now()
createTimes[p.Name] = p.CreationTimestamp
@@ -472,31 +477,31 @@ var _ = framework.KubeDescribe("Density", func() {
nsName := namespaces[i].Name
latencyPodsStore, controller := cache.NewInformer(
&cache.ListWatch{
- ListFunc: func(options api.ListOptions) (runtime.Object, error) {
- options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix})
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String()
obj, err := c.Core().Pods(nsName).List(options)
return runtime.Object(obj), err
},
- WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
- options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix})
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ options.LabelSelector = labels.SelectorFromSet(labels.Set{"type": additionalPodsPrefix}).String()
return c.Core().Pods(nsName).Watch(options)
},
},
- &api.Pod{},
+ &v1.Pod{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
- p, ok := obj.(*api.Pod)
+ p, ok := obj.(*v1.Pod)
if !ok {
- framework.Logf("Failed to cast observed object to *api.Pod.")
+ framework.Logf("Failed to cast observed object to *v1.Pod.")
}
Expect(ok).To(Equal(true))
go checkPod(p)
},
UpdateFunc: func(oldObj, newObj interface{}) {
- p, ok := newObj.(*api.Pod)
+ p, ok := newObj.(*v1.Pod)
if !ok {
- framework.Logf("Failed to cast observed object to *api.Pod.")
+ framework.Logf("Failed to cast observed object to *v1.Pod.")
}
Expect(ok).To(Equal(true))
go checkPod(p)
@@ -545,7 +550,7 @@ var _ = framework.KubeDescribe("Density", func() {
nodeToLatencyPods := make(map[string]int)
for i := range latencyPodStores {
for _, item := range latencyPodStores[i].List() {
- pod := item.(*api.Pod)
+ pod := item.(*v1.Pod)
nodeToLatencyPods[pod.Spec.NodeName]++
}
for node, count := range nodeToLatencyPods {
@@ -560,9 +565,9 @@ var _ = framework.KubeDescribe("Density", func() {
selector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.namespace": nsName,
- "source": api.DefaultSchedulerName,
- }.AsSelector()
- options := api.ListOptions{FieldSelector: selector}
+ "source": v1.DefaultSchedulerName,
+ }.AsSelector().String()
+ options := v1.ListOptions{FieldSelector: selector}
schedEvents, err := c.Core().Events(nsName).List(options)
framework.ExpectNoError(err)
for k := range createTimes {
@@ -683,39 +688,39 @@ var _ = framework.KubeDescribe("Density", func() {
})
})
-func createRunningPodFromRC(wg *sync.WaitGroup, c internalclientset.Interface, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) {
+func createRunningPodFromRC(wg *sync.WaitGroup, c clientset.Interface, name, ns, image, podType string, cpuRequest, memRequest resource.Quantity) {
defer GinkgoRecover()
defer wg.Done()
labels := map[string]string{
"type": podType,
"name": name,
}
- rc := &api.ReplicationController{
- ObjectMeta: api.ObjectMeta{
+ rc := &v1.ReplicationController{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: labels,
},
- Spec: api.ReplicationControllerSpec{
- Replicas: 1,
+ Spec: v1.ReplicationControllerSpec{
+ Replicas: func(i int) *int32 { x := int32(i); return &x }(1),
Selector: labels,
- Template: &api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
+ Template: &v1.PodTemplateSpec{
+ ObjectMeta: v1.ObjectMeta{
Labels: labels,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: name,
Image: image,
- Resources: api.ResourceRequirements{
- Requests: api.ResourceList{
- api.ResourceCPU: cpuRequest,
- api.ResourceMemory: memRequest,
+ Resources: v1.ResourceRequirements{
+ Requests: v1.ResourceList{
+ v1.ResourceCPU: cpuRequest,
+ v1.ResourceMemory: memRequest,
},
},
},
},
- DNSPolicy: api.DNSDefault,
+ DNSPolicy: v1.DNSDefault,
},
},
},
diff --git a/test/e2e/deployment.go b/test/e2e/deployment.go
index 1e5f38c2f63..91783a39c89 100644
--- a/test/e2e/deployment.go
+++ b/test/e2e/deployment.go
@@ -28,9 +28,12 @@ import (
"k8s.io/kubernetes/pkg/api/annotations"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned"
- "k8s.io/kubernetes/pkg/apis/extensions"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
- client "k8s.io/kubernetes/pkg/client/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+ extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
+ extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
+ "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
+ extensionsclient "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/extensions/v1beta1"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels"
@@ -109,23 +112,23 @@ var _ = framework.KubeDescribe("Deployment", func() {
func newDeployment(deploymentName string, replicas int32, podLabels map[string]string, imageName string, image string, strategyType extensions.DeploymentStrategyType, revisionHistoryLimit *int32) *extensions.Deployment {
zero := int64(0)
return &extensions.Deployment{
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: deploymentName,
},
Spec: extensions.DeploymentSpec{
- Replicas: replicas,
+ Replicas: func(i int32) *int32 { return &i }(replicas),
Selector: &unversioned.LabelSelector{MatchLabels: podLabels},
Strategy: extensions.DeploymentStrategy{
Type: strategyType,
},
RevisionHistoryLimit: revisionHistoryLimit,
- Template: api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
+ Template: v1.PodTemplateSpec{
+ ObjectMeta: v1.ObjectMeta{
Labels: podLabels,
},
- Spec: api.PodSpec{
+ Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero,
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: imageName,
Image: image,
@@ -168,20 +171,20 @@ func checkDeploymentRevision(c clientset.Interface, ns, deploymentName, revision
return deployment, newRS
}
-func stopDeploymentOverlap(c clientset.Interface, ns, deploymentName, overlapWith string) {
- stopDeploymentMaybeOverlap(c, ns, deploymentName, overlapWith)
+func stopDeploymentOverlap(c clientset.Interface, internalClient internalclientset.Interface, ns, deploymentName, overlapWith string) {
+ stopDeploymentMaybeOverlap(c, internalClient, ns, deploymentName, overlapWith)
}
-func stopDeployment(c clientset.Interface, ns, deploymentName string) {
- stopDeploymentMaybeOverlap(c, ns, deploymentName, "")
+func stopDeployment(c clientset.Interface, internalClient internalclientset.Interface, ns, deploymentName string) {
+ stopDeploymentMaybeOverlap(c, internalClient, ns, deploymentName, "")
}
-func stopDeploymentMaybeOverlap(c clientset.Interface, ns, deploymentName, overlapWith string) {
+func stopDeploymentMaybeOverlap(c clientset.Interface, internalClient internalclientset.Interface, ns, deploymentName, overlapWith string) {
deployment, err := c.Extensions().Deployments(ns).Get(deploymentName)
Expect(err).NotTo(HaveOccurred())
framework.Logf("Deleting deployment %s", deploymentName)
- reaper, err := kubectl.ReaperFor(extensions.Kind("Deployment"), c)
+ reaper, err := kubectl.ReaperFor(extensionsinternal.Kind("Deployment"), internalClient)
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
err = reaper.Stop(ns, deployment.Name, timeout, api.NewDeleteOptions(0))
@@ -194,7 +197,7 @@ func stopDeploymentMaybeOverlap(c clientset.Interface, ns, deploymentName, overl
framework.Logf("Ensuring deployment %s's RSes were deleted", deploymentName)
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
- options := api.ListOptions{LabelSelector: selector}
+ options := v1.ListOptions{LabelSelector: selector.String()}
rss, err := c.Extensions().ReplicaSets(ns).List(options)
Expect(err).NotTo(HaveOccurred())
// RSes may be created by overlapping deployments right after this deployment is deleted, ignore them
@@ -210,7 +213,7 @@ func stopDeploymentMaybeOverlap(c clientset.Interface, ns, deploymentName, overl
Expect(noOverlapRSes).Should(HaveLen(0))
}
framework.Logf("Ensuring deployment %s's Pods were deleted", deploymentName)
- var pods *api.PodList
+ var pods *v1.PodList
if err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
pods, err = c.Core().Pods(ns).List(options)
if err != nil {
@@ -220,7 +223,7 @@ func stopDeploymentMaybeOverlap(c clientset.Interface, ns, deploymentName, overl
if len(overlapWith) == 0 && len(pods.Items) == 0 {
return true, nil
} else if len(overlapWith) != 0 {
- noOverlapPods := []api.Pod{}
+ noOverlapPods := []v1.Pod{}
for _, pod := range pods.Items {
if !strings.HasPrefix(pod.Name, overlapWith) {
noOverlapPods = append(noOverlapPods, pod)
@@ -270,6 +273,7 @@ func testNewDeployment(f *framework.Framework) {
func testDeleteDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
+ internalClient := f.InternalClientset
deploymentName := "test-new-deployment"
podLabels := map[string]string{"name": nginxImageName}
@@ -295,7 +299,7 @@ func testDeleteDeployment(f *framework.Framework) {
err = fmt.Errorf("expected a replica set, got nil")
Expect(err).NotTo(HaveOccurred())
}
- stopDeployment(c, ns, deploymentName)
+ stopDeployment(c, internalClient, ns, deploymentName)
}
func testRollingUpdateDeployment(f *framework.Framework) {
@@ -481,11 +485,11 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
deploymentName := "test-cleanup-deployment"
framework.Logf("Creating deployment %s", deploymentName)
- pods, err := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
+ pods, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
Expect(err).NotTo(HaveOccurred(), "Failed to query for pods: %v", err)
}
- options := api.ListOptions{
+ options := v1.ListOptions{
ResourceVersion: pods.ListMeta.ResourceVersion,
}
stopCh := make(chan struct{})
@@ -504,7 +508,7 @@ func testDeploymentCleanUpPolicy(f *framework.Framework) {
if numPodCreation < 0 {
framework.Failf("Expect only one pod creation, the second creation event: %#v\n", event)
}
- pod, ok := event.Object.(*api.Pod)
+ pod, ok := event.Object.(*v1.Pod)
if !ok {
Fail("Expect event Object to be a pod")
}
@@ -556,8 +560,8 @@ func testRolloverDeployment(f *framework.Framework) {
framework.Logf("Creating deployment %s", deploymentName)
newDeployment := newDeployment(deploymentName, deploymentReplicas, deploymentPodLabels, deploymentImageName, deploymentImage, deploymentStrategyType, nil)
newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{
- MaxUnavailable: intstr.FromInt(1),
- MaxSurge: intstr.FromInt(1),
+ MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(1),
+ MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(1),
}
_, err = c.Extensions().Deployments(ns).Create(newDeployment)
Expect(err).NotTo(HaveOccurred())
@@ -571,7 +575,7 @@ func testRolloverDeployment(f *framework.Framework) {
_, newRS := checkDeploymentRevision(c, ns, deploymentName, "1", deploymentImageName, deploymentImage)
// Before the deployment finishes, update the deployment to rollover the above 2 ReplicaSets and bring up redis pods.
- Expect(newRS.Spec.Replicas).Should(BeNumerically("<", deploymentReplicas))
+ Expect(*newRS.Spec.Replicas).Should(BeNumerically("<", deploymentReplicas))
updatedDeploymentImageName, updatedDeploymentImage := redisImageName, redisImage
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, newDeployment.Name, func(update *extensions.Deployment) {
update.Spec.Template.Spec.Containers[0].Name = updatedDeploymentImageName
@@ -629,7 +633,7 @@ func testPausedDeployment(f *framework.Framework) {
if err != nil {
Expect(err).NotTo(HaveOccurred())
}
- opts := api.ListOptions{LabelSelector: selector}
+ opts := v1.ListOptions{LabelSelector: selector.String()}
w, err := c.Extensions().ReplicaSets(ns).Watch(opts)
Expect(err).NotTo(HaveOccurred())
@@ -973,7 +977,7 @@ func testDeploymentLabelAdopted(f *framework.Framework) {
// All pods targeted by the deployment should contain pod-template-hash in their labels, and there should be only 3 pods
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
- options := api.ListOptions{LabelSelector: selector}
+ options := v1.ListOptions{LabelSelector: selector.String()}
pods, err := c.Core().Pods(ns).List(options)
Expect(err).NotTo(HaveOccurred())
err = framework.CheckPodHashLabel(pods)
@@ -1015,7 +1019,7 @@ func testScalePausedDeployment(f *framework.Framework) {
framework.Logf("Scaling up the paused deployment %q", deploymentName)
newReplicas := int32(5)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
- update.Spec.Replicas = newReplicas
+ update.Spec.Replicas = &newReplicas
})
Expect(err).NotTo(HaveOccurred())
@@ -1025,8 +1029,8 @@ func testScalePausedDeployment(f *framework.Framework) {
rs, err = deploymentutil.GetNewReplicaSet(deployment, c)
Expect(err).NotTo(HaveOccurred())
- if rs.Spec.Replicas != newReplicas {
- err = fmt.Errorf("Expected %d replicas for the new replica set, got %d", newReplicas, rs.Spec.Replicas)
+ if *(rs.Spec.Replicas) != newReplicas {
+ err = fmt.Errorf("Expected %d replicas for the new replica set, got %d", newReplicas, *(rs.Spec.Replicas))
Expect(err).NotTo(HaveOccurred())
}
}
@@ -1042,8 +1046,8 @@ func testScaledRolloutDeployment(f *framework.Framework) {
deploymentName := "nginx"
d := newDeployment(deploymentName, replicas, podLabels, nginxImageName, nginxImage, extensions.RollingUpdateDeploymentStrategyType, nil)
d.Spec.Strategy.RollingUpdate = new(extensions.RollingUpdateDeployment)
- d.Spec.Strategy.RollingUpdate.MaxSurge = intstr.FromInt(3)
- d.Spec.Strategy.RollingUpdate.MaxUnavailable = intstr.FromInt(2)
+ d.Spec.Strategy.RollingUpdate.MaxSurge = func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(3)
+ d.Spec.Strategy.RollingUpdate.MaxUnavailable = func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(2)
By(fmt.Sprintf("Creating deployment %q", deploymentName))
deployment, err := c.Extensions().Deployments(ns).Create(d)
@@ -1054,7 +1058,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
// Verify that the required pods have come up.
By("Waiting for all required pods to come up")
- err = framework.VerifyPods(f.ClientSet, ns, nginxImageName, false, deployment.Spec.Replicas)
+ err = framework.VerifyPods(f.ClientSet, ns, nginxImageName, false, *(deployment.Spec.Replicas))
if err != nil {
framework.Logf("error in waiting for pods to come up: %s", err)
Expect(err).NotTo(HaveOccurred())
@@ -1090,18 +1094,18 @@ func testScaledRolloutDeployment(f *framework.Framework) {
first, err = c.Extensions().ReplicaSets(first.Namespace).Get(first.Name)
Expect(err).NotTo(HaveOccurred())
- firstCond := client.ReplicaSetHasDesiredReplicas(c.Extensions(), first)
+ firstCond := replicaSetHasDesiredReplicas(c.Extensions(), first)
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, firstCond)
Expect(err).NotTo(HaveOccurred())
- secondCond := client.ReplicaSetHasDesiredReplicas(c.Extensions(), second)
+ secondCond := replicaSetHasDesiredReplicas(c.Extensions(), second)
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, secondCond)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Updating the size (up) and template at the same time for deployment %q", deploymentName))
newReplicas := int32(20)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
- update.Spec.Replicas = newReplicas
+ update.Spec.Replicas = &newReplicas
update.Spec.Template.Spec.Containers[0].Image = nautilusImage
})
Expect(err).NotTo(HaveOccurred())
@@ -1118,7 +1122,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
for _, rs := range append(oldRSs, rs) {
By(fmt.Sprintf("Ensuring replica set %q has the correct desiredReplicas annotation", rs.Name))
desired, ok := deploymentutil.GetDesiredReplicasAnnotation(rs)
- if !ok || desired == deployment.Spec.Replicas {
+ if !ok || desired == *(deployment.Spec.Replicas) {
continue
}
err = fmt.Errorf("unexpected desiredReplicas annotation %d for replica set %q", desired, rs.Name)
@@ -1150,18 +1154,18 @@ func testScaledRolloutDeployment(f *framework.Framework) {
newRs, err := deploymentutil.GetNewReplicaSet(deployment, c)
Expect(err).NotTo(HaveOccurred())
- oldCond := client.ReplicaSetHasDesiredReplicas(c.Extensions(), oldRs)
+ oldCond := replicaSetHasDesiredReplicas(c.Extensions(), oldRs)
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, oldCond)
Expect(err).NotTo(HaveOccurred())
- newCond := client.ReplicaSetHasDesiredReplicas(c.Extensions(), newRs)
+ newCond := replicaSetHasDesiredReplicas(c.Extensions(), newRs)
err = wait.PollImmediate(10*time.Millisecond, 1*time.Minute, newCond)
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Updating the size (down) and template at the same time for deployment %q", deploymentName))
newReplicas = int32(5)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
- update.Spec.Replicas = newReplicas
+ update.Spec.Replicas = &newReplicas
update.Spec.Template.Spec.Containers[0].Image = kittenImage
})
Expect(err).NotTo(HaveOccurred())
@@ -1178,7 +1182,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
for _, rs := range append(oldRSs, rs) {
By(fmt.Sprintf("Ensuring replica set %q has the correct desiredReplicas annotation", rs.Name))
desired, ok := deploymentutil.GetDesiredReplicasAnnotation(rs)
- if !ok || desired == deployment.Spec.Replicas {
+ if !ok || desired == *(deployment.Spec.Replicas) {
continue
}
err = fmt.Errorf("unexpected desiredReplicas annotation %d for replica set %q", desired, rs.Name)
@@ -1189,6 +1193,7 @@ func testScaledRolloutDeployment(f *framework.Framework) {
func testOverlappingDeployment(f *framework.Framework) {
ns := f.Namespace.Name
c := f.ClientSet
+ internalClient := f.InternalClientset
deploymentName := "first-deployment"
podLabels := map[string]string{"name": redisImageName}
@@ -1219,7 +1224,7 @@ func testOverlappingDeployment(f *framework.Framework) {
// Only the first deployment is synced
By("Checking only the first overlapping deployment is synced")
- options := api.ListOptions{}
+ options := v1.ListOptions{}
rsList, err := c.Extensions().ReplicaSets(ns).List(options)
Expect(err).NotTo(HaveOccurred(), "Failed listing all replica sets in namespace %s", ns)
Expect(rsList.Items).To(HaveLen(int(replicas)))
@@ -1227,7 +1232,7 @@ func testOverlappingDeployment(f *framework.Framework) {
Expect(rsList.Items[0].Spec.Template.Spec.Containers[0].Image).To(Equal(deploy.Spec.Template.Spec.Containers[0].Image))
By("Deleting the first deployment")
- stopDeploymentOverlap(c, ns, deploy.Name, deployOverlapping.Name)
+ stopDeploymentOverlap(c, internalClient, ns, deploy.Name, deployOverlapping.Name)
// Wait for overlapping annotation cleared
By("Waiting for the second deployment to clear overlapping annotation")
@@ -1335,11 +1340,11 @@ func randomScale(d *extensions.Deployment, i int) {
switch r := rand.Float32(); {
case r < 0.3:
framework.Logf("%02d: scaling up", i)
- d.Spec.Replicas++
+ *(d.Spec.Replicas)++
case r < 0.6:
- if d.Spec.Replicas > 1 {
+ if *(d.Spec.Replicas) > 1 {
framework.Logf("%02d: scaling down", i)
- d.Spec.Replicas--
+ *(d.Spec.Replicas)--
}
}
}
@@ -1375,7 +1380,7 @@ func testIterativeDeployments(f *framework.Framework) {
// trigger a new deployment
framework.Logf("%02d: triggering a new rollout for deployment %q", i, deployment.Name)
deployment, err = framework.UpdateDeploymentWithRetries(c, ns, deployment.Name, func(update *extensions.Deployment) {
- newEnv := api.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
+ newEnv := v1.EnvVar{Name: "A", Value: fmt.Sprintf("%d", i)}
update.Spec.Template.Spec.Containers[0].Env = append(update.Spec.Template.Spec.Containers[0].Env, newEnv)
randomScale(update, i)
})
@@ -1421,7 +1426,7 @@ func testIterativeDeployments(f *framework.Framework) {
framework.Logf("%02d: arbitrarily deleting one or more deployment pods for deployment %q", i, deployment.Name)
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
Expect(err).NotTo(HaveOccurred())
- opts := api.ListOptions{LabelSelector: selector}
+ opts := v1.ListOptions{LabelSelector: selector.String()}
podList, err := c.Core().Pods(ns).List(opts)
Expect(err).NotTo(HaveOccurred())
if len(podList.Items) == 0 {
@@ -1460,3 +1465,14 @@ func testIterativeDeployments(f *framework.Framework) {
framework.Logf("Checking deployment %q for a complete condition", deploymentName)
Expect(framework.WaitForDeploymentWithCondition(c, ns, deploymentName, deploymentutil.NewRSAvailableReason, extensions.DeploymentProgressing)).NotTo(HaveOccurred())
}
+
+func replicaSetHasDesiredReplicas(rsClient extensionsclient.ReplicaSetsGetter, replicaSet *extensions.ReplicaSet) wait.ConditionFunc {
+ desiredGeneration := replicaSet.Generation
+ return func() (bool, error) {
+ rs, err := rsClient.ReplicaSets(replicaSet.Namespace).Get(replicaSet.Name)
+ if err != nil {
+ return false, err
+ }
+ return rs.Status.ObservedGeneration >= desiredGeneration && rs.Status.Replicas == *(rs.Spec.Replicas), nil
+ }
+}
diff --git a/test/e2e/disruption.go b/test/e2e/disruption.go
index 68d643b0f63..84beed85546 100644
--- a/test/e2e/disruption.go
+++ b/test/e2e/disruption.go
@@ -24,7 +24,7 @@ import (
. "github.com/onsi/gomega"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/unversioned"
- api "k8s.io/client-go/pkg/api/v1"
+ "k8s.io/client-go/pkg/api/v1"
extensions "k8s.io/client-go/pkg/apis/extensions/v1beta1"
policy "k8s.io/client-go/pkg/apis/policy/v1beta1"
"k8s.io/client-go/pkg/util/intstr"
@@ -127,15 +127,15 @@ var _ = framework.KubeDescribe("DisruptionController", func() {
}
// Locate a running pod.
- var pod api.Pod
+ var pod v1.Pod
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
- podList, err := cs.Pods(ns).List(api.ListOptions{})
+ podList, err := cs.Pods(ns).List(v1.ListOptions{})
if err != nil {
return false, err
}
for i := range podList.Items {
- if podList.Items[i].Status.Phase == api.PodRunning {
+ if podList.Items[i].Status.Phase == v1.PodRunning {
pod = podList.Items[i]
return true, nil
}
@@ -146,7 +146,7 @@ var _ = framework.KubeDescribe("DisruptionController", func() {
Expect(err).NotTo(HaveOccurred())
e := &policy.Eviction{
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: pod.Name,
Namespace: ns,
},
@@ -184,7 +184,7 @@ var _ = framework.KubeDescribe("DisruptionController", func() {
func createPodDisruptionBudgetOrDie(cs *kubernetes.Clientset, ns string, minAvailable intstr.IntOrString) {
pdb := policy.PodDisruptionBudget{
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: "foo",
Namespace: ns,
},
@@ -199,20 +199,20 @@ func createPodDisruptionBudgetOrDie(cs *kubernetes.Clientset, ns string, minAvai
func createPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
for i := 0; i < n; i++ {
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("pod-%d", i),
Namespace: ns,
Labels: map[string]string{"foo": "bar"},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "busybox",
Image: "gcr.io/google_containers/echoserver:1.4",
},
},
- RestartPolicy: api.RestartPolicyAlways,
+ RestartPolicy: v1.RestartPolicyAlways,
},
}
@@ -224,7 +224,7 @@ func createPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
func waitForPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
By("Waiting for all pods to be running")
err := wait.PollImmediate(framework.Poll, schedulingTimeout, func() (bool, error) {
- pods, err := cs.Core().Pods(ns).List(api.ListOptions{LabelSelector: "foo=bar"})
+ pods, err := cs.Core().Pods(ns).List(v1.ListOptions{LabelSelector: "foo=bar"})
if err != nil {
return false, err
}
@@ -237,7 +237,7 @@ func waitForPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
}
ready := 0
for i := 0; i < n; i++ {
- if pods.Items[i].Status.Phase == api.PodRunning {
+ if pods.Items[i].Status.Phase == v1.PodRunning {
ready++
}
}
@@ -251,18 +251,18 @@ func waitForPodsOrDie(cs *kubernetes.Clientset, ns string, n int) {
}
func createReplicaSetOrDie(cs *kubernetes.Clientset, ns string, size int32, exclusive bool) {
- container := api.Container{
+ container := v1.Container{
Name: "busybox",
Image: "gcr.io/google_containers/echoserver:1.4",
}
if exclusive {
- container.Ports = []api.ContainerPort{
+ container.Ports = []v1.ContainerPort{
{HostPort: 5555, ContainerPort: 5555},
}
}
rs := &extensions.ReplicaSet{
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: "rs",
Namespace: ns,
},
@@ -271,12 +271,12 @@ func createReplicaSetOrDie(cs *kubernetes.Clientset, ns string, size int32, excl
Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"},
},
- Template: api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
+ Template: v1.PodTemplateSpec{
+ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"foo": "bar"},
},
- Spec: api.PodSpec{
- Containers: []api.Container{container},
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{container},
},
},
},
diff --git a/test/e2e/dns.go b/test/e2e/dns.go
index 027ab34ccde..3bafd1f256d 100644
--- a/test/e2e/dns.go
+++ b/test/e2e/dns.go
@@ -24,10 +24,11 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api"
- "k8s.io/kubernetes/pkg/api/pod"
"k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/apimachinery/registered"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait"
@@ -42,37 +43,37 @@ var dnsServiceLabelSelector = labels.Set{
"kubernetes.io/cluster-service": "true",
}.AsSelector()
-func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd string, useAnnotation bool) *api.Pod {
- dnsPod := &api.Pod{
+func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd string, useAnnotation bool) *v1.Pod {
+ dnsPod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
- APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
+ APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: "dns-test-" + string(uuid.NewUUID()),
Namespace: namespace,
},
- Spec: api.PodSpec{
- Volumes: []api.Volume{
+ Spec: v1.PodSpec{
+ Volumes: []v1.Volume{
{
Name: "results",
- VolumeSource: api.VolumeSource{
- EmptyDir: &api.EmptyDirVolumeSource{},
+ VolumeSource: v1.VolumeSource{
+ EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
- Containers: []api.Container{
+ Containers: []v1.Container{
// TODO: Consider scraping logs instead of running a webserver.
{
Name: "webserver",
Image: "gcr.io/google_containers/test-webserver:e2e",
- Ports: []api.ContainerPort{
+ Ports: []v1.ContainerPort{
{
Name: "http",
ContainerPort: 80,
},
},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: "results",
MountPath: "/results",
@@ -83,7 +84,7 @@ func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd string, useAnnotatio
Name: "querier",
Image: "gcr.io/google_containers/dnsutils:e2e",
Command: []string{"sh", "-c", wheezyProbeCmd},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: "results",
MountPath: "/results",
@@ -94,7 +95,7 @@ func createDNSPod(namespace, wheezyProbeCmd, jessieProbeCmd string, useAnnotatio
Name: "jessie-querier",
Image: "gcr.io/google_containers/jessie-dnsutils:e2e",
Command: []string{"sh", "-c", jessieProbeCmd},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: "results",
MountPath: "/results",
@@ -171,11 +172,11 @@ func createTargetedProbeCommand(nameToResolve string, lookup string, fileNamePre
return probeCmd, fileName
}
-func assertFilesExist(fileNames []string, fileDir string, pod *api.Pod, client clientset.Interface) {
+func assertFilesExist(fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface) {
assertFilesContain(fileNames, fileDir, pod, client, false, "")
}
-func assertFilesContain(fileNames []string, fileDir string, pod *api.Pod, client clientset.Interface, check bool, expected string) {
+func assertFilesContain(fileNames []string, fileDir string, pod *v1.Pod, client clientset.Interface, check bool, expected string) {
var failed []string
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
@@ -220,14 +221,14 @@ func assertFilesContain(fileNames []string, fileDir string, pod *api.Pod, client
Expect(len(failed)).To(Equal(0))
}
-func validateDNSResults(f *framework.Framework, pod *api.Pod, fileNames []string) {
+func validateDNSResults(f *framework.Framework, pod *v1.Pod, fileNames []string) {
By("submitting the pod to kubernetes")
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
defer func() {
By("deleting the pod")
defer GinkgoRecover()
- podClient.Delete(pod.Name, api.NewDeleteOptions(0))
+ podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
}()
if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
@@ -249,14 +250,14 @@ func validateDNSResults(f *framework.Framework, pod *api.Pod, fileNames []string
framework.Logf("DNS probes using %s succeeded\n", pod.Name)
}
-func validateTargetedProbeOutput(f *framework.Framework, pod *api.Pod, fileNames []string, value string) {
+func validateTargetedProbeOutput(f *framework.Framework, pod *v1.Pod, fileNames []string, value string) {
By("submitting the pod to kubernetes")
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
defer func() {
By("deleting the pod")
defer GinkgoRecover()
- podClient.Delete(pod.Name, api.NewDeleteOptions(0))
+ podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
}()
if _, err := podClient.Create(pod); err != nil {
framework.Failf("Failed to create %s pod: %v", pod.Name, err)
@@ -279,7 +280,7 @@ func validateTargetedProbeOutput(f *framework.Framework, pod *api.Pod, fileNames
func verifyDNSPodIsRunning(f *framework.Framework) {
systemClient := f.ClientSet.Core().Pods(api.NamespaceSystem)
By("Waiting for DNS Service to be Running")
- options := api.ListOptions{LabelSelector: dnsServiceLabelSelector}
+ options := v1.ListOptions{LabelSelector: dnsServiceLabelSelector.String()}
dnsPods, err := systemClient.List(options)
if err != nil {
framework.Failf("Failed to list all dns service pods")
@@ -291,20 +292,20 @@ func verifyDNSPodIsRunning(f *framework.Framework) {
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, &pod))
}
-func createServiceSpec(serviceName, externalName string, isHeadless bool, selector map[string]string) *api.Service {
- headlessService := &api.Service{
- ObjectMeta: api.ObjectMeta{
+func createServiceSpec(serviceName, externalName string, isHeadless bool, selector map[string]string) *v1.Service {
+ headlessService := &v1.Service{
+ ObjectMeta: v1.ObjectMeta{
Name: serviceName,
},
- Spec: api.ServiceSpec{
+ Spec: v1.ServiceSpec{
Selector: selector,
},
}
if externalName != "" {
- headlessService.Spec.Type = api.ServiceTypeExternalName
+ headlessService.Spec.Type = v1.ServiceTypeExternalName
headlessService.Spec.ExternalName = externalName
} else {
- headlessService.Spec.Ports = []api.ServicePort{
+ headlessService.Spec.Ports = []v1.ServicePort{
{Port: 80, Name: "http", Protocol: "TCP"},
}
}
@@ -463,7 +464,7 @@ var _ = framework.KubeDescribe("DNS", func() {
// Test changing the externalName field
By("changing the externalName to bar.example.com")
- _, err = updateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *api.Service) {
+ _, err = updateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
s.Spec.ExternalName = "bar.example.com"
})
Expect(err).NotTo(HaveOccurred())
@@ -480,10 +481,10 @@ var _ = framework.KubeDescribe("DNS", func() {
// Test changing type from ExternalName to ClusterIP
By("changing the service to type=ClusterIP")
- _, err = updateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *api.Service) {
- s.Spec.Type = api.ServiceTypeClusterIP
+ _, err = updateService(f.ClientSet, f.Namespace.Name, serviceName, func(s *v1.Service) {
+ s.Spec.Type = v1.ServiceTypeClusterIP
s.Spec.ClusterIP = "127.1.2.3"
- s.Spec.Ports = []api.ServicePort{
+ s.Spec.Ports = []v1.ServicePort{
{Port: 80, Name: "http", Protocol: "TCP"},
}
})
diff --git a/test/e2e/dns_autoscaling.go b/test/e2e/dns_autoscaling.go
index 0d4ca8ff5bf..badc6f1b2d9 100644
--- a/test/e2e/dns_autoscaling.go
+++ b/test/e2e/dns_autoscaling.go
@@ -23,8 +23,8 @@ import (
"strings"
"time"
- "k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
@@ -144,7 +144,7 @@ var _ = framework.KubeDescribe("DNS horizontal autoscaling", func() {
})
})
-func fetchDNSScalingConfigMap(c clientset.Interface) (*api.ConfigMap, error) {
+func fetchDNSScalingConfigMap(c clientset.Interface) (*v1.ConfigMap, error) {
cm, err := c.Core().ConfigMaps(DNSNamespace).Get(DNSAutoscalerLabelName)
if err != nil {
return nil, err
@@ -160,15 +160,15 @@ func deleteDNSScalingConfigMap(c clientset.Interface) error {
return nil
}
-func packDNSScalingConfigMap(params map[string]string) *api.ConfigMap {
- configMap := api.ConfigMap{}
+func packDNSScalingConfigMap(params map[string]string) *v1.ConfigMap {
+ configMap := v1.ConfigMap{}
configMap.ObjectMeta.Name = DNSAutoscalerLabelName
configMap.ObjectMeta.Namespace = DNSNamespace
configMap.Data = params
return &configMap
}
-func updateDNSScalingConfigMap(c clientset.Interface, configMap *api.ConfigMap) error {
+func updateDNSScalingConfigMap(c clientset.Interface, configMap *v1.ConfigMap) error {
_, err := c.Core().ConfigMaps(DNSNamespace).Update(configMap)
if err != nil {
return err
@@ -179,7 +179,7 @@ func updateDNSScalingConfigMap(c clientset.Interface, configMap *api.ConfigMap)
func getDNSReplicas(c clientset.Interface) (int, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: KubeDNSLabelName}))
- listOpts := api.ListOptions{LabelSelector: label}
+ listOpts := v1.ListOptions{LabelSelector: label.String()}
deployments, err := c.Extensions().Deployments(DNSNamespace).List(listOpts)
if err != nil {
return 0, err
@@ -187,12 +187,12 @@ func getDNSReplicas(c clientset.Interface) (int, error) {
Expect(len(deployments.Items)).Should(Equal(1))
deployment := deployments.Items[0]
- return int(deployment.Spec.Replicas), nil
+ return int(*(deployment.Spec.Replicas)), nil
}
func deleteDNSAutoscalerPod(c clientset.Interface) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{ClusterAddonLabelKey: DNSAutoscalerLabelName}))
- listOpts := api.ListOptions{LabelSelector: label}
+ listOpts := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(DNSNamespace).List(listOpts)
if err != nil {
return err
@@ -227,7 +227,7 @@ func waitForDNSReplicasSatisfied(c clientset.Interface, expected int, timeout ti
return nil
}
-func waitForDNSConfigMapCreated(c clientset.Interface, timeout time.Duration) (configMap *api.ConfigMap, err error) {
+func waitForDNSConfigMapCreated(c clientset.Interface, timeout time.Duration) (configMap *v1.ConfigMap, err error) {
framework.Logf("Waiting up to %v for DNS autoscaling ConfigMap got re-created", timeout)
condition := func() (bool, error) {
configMap, err = fetchDNSScalingConfigMap(c)
diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go
index ba0da19a614..22449966cb3 100644
--- a/test/e2e/e2e.go
+++ b/test/e2e/e2e.go
@@ -31,6 +31,7 @@ import (
"github.com/onsi/gomega"
"k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/util/logs"
"k8s.io/kubernetes/pkg/util/runtime"
@@ -94,19 +95,15 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
framework.Failf("Failed to setup provider config: %v", err)
}
- c, err := framework.LoadInternalClientset()
+ c, err := framework.LoadClientset()
if err != nil {
glog.Fatal("Error loading client: ", err)
}
- clientset, err := framework.LoadClientset()
- if err != nil {
- glog.Fatal("Error loading clientset: ", err)
- }
// Delete any namespaces except default and kube-system. This ensures no
// lingering resources are left over from a previous test run.
if framework.TestContext.CleanStart {
- deleted, err := framework.DeleteNamespaces(c, nil /* deleteFilter */, []string{api.NamespaceSystem, api.NamespaceDefault})
+ deleted, err := framework.DeleteNamespaces(c, nil /* deleteFilter */, []string{api.NamespaceSystem, v1.NamespaceDefault})
if err != nil {
framework.Failf("Error deleting orphaned namespaces: %v", err)
}
@@ -127,9 +124,9 @@ var _ = ginkgo.SynchronizedBeforeSuite(func() []byte {
// ready will fail).
podStartupTimeout := framework.TestContext.SystemPodsStartupTimeout
if err := framework.WaitForPodsRunningReady(c, api.NamespaceSystem, int32(framework.TestContext.MinStartupPods), podStartupTimeout, framework.ImagePullerLabels); err != nil {
- framework.DumpAllNamespaceInfo(c, clientset, api.NamespaceSystem)
+ framework.DumpAllNamespaceInfo(c, api.NamespaceSystem)
framework.LogFailedContainers(c, api.NamespaceSystem, framework.Logf)
- framework.RunKubernetesServiceTestContainer(c, api.NamespaceDefault)
+ framework.RunKubernetesServiceTestContainer(c, v1.NamespaceDefault)
framework.Failf("Error waiting for all pods to be running and ready: %v", err)
}
diff --git a/test/e2e/empty_dir_wrapper.go b/test/e2e/empty_dir_wrapper.go
index 77d57e57794..28a1e0a0e20 100644
--- a/test/e2e/empty_dir_wrapper.go
+++ b/test/e2e/empty_dir_wrapper.go
@@ -17,8 +17,8 @@ limitations under the License.
package e2e
import (
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
@@ -57,8 +57,8 @@ var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() {
volumeName := "secret-volume"
volumeMountPath := "/etc/secret-volume"
- secret := &api.Secret{
- ObjectMeta: api.ObjectMeta{
+ secret := &v1.Secret{
+ ObjectMeta: v1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
@@ -77,35 +77,35 @@ var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() {
gitURL, gitRepo, gitCleanup := createGitServer(f)
defer gitCleanup()
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "pod-secrets-" + string(uuid.NewUUID()),
},
- Spec: api.PodSpec{
- Volumes: []api.Volume{
+ Spec: v1.PodSpec{
+ Volumes: []v1.Volume{
{
Name: volumeName,
- VolumeSource: api.VolumeSource{
- Secret: &api.SecretVolumeSource{
+ VolumeSource: v1.VolumeSource{
+ Secret: &v1.SecretVolumeSource{
SecretName: name,
},
},
},
{
Name: gitVolumeName,
- VolumeSource: api.VolumeSource{
- GitRepo: &api.GitRepoVolumeSource{
+ VolumeSource: v1.VolumeSource{
+ GitRepo: &v1.GitRepoVolumeSource{
Repository: gitURL,
Directory: gitRepo,
},
},
},
},
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: "secret-test",
Image: "gcr.io/google_containers/test-webserver:e2e",
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumeMountPath,
@@ -128,7 +128,7 @@ var _ = framework.KubeDescribe("EmptyDir wrapper volumes", func() {
framework.Failf("unable to delete secret %v: %v", secret.Name, err)
}
By("Cleaning up the git vol pod")
- if err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0)); err != nil {
+ if err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete git vol pod %v: %v", pod.Name, err)
}
}()
@@ -177,18 +177,18 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
labels := map[string]string{"name": gitServerPodName}
- gitServerPod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ gitServerPod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: gitServerPodName,
Labels: labels,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "git-repo",
Image: "gcr.io/google_containers/fakegitserver:0.1",
ImagePullPolicy: "IfNotPresent",
- Ports: []api.ContainerPort{
+ Ports: []v1.ContainerPort{
{ContainerPort: int32(containerPort)},
},
},
@@ -200,13 +200,13 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
// Portal IP and port
httpPort := 2345
- gitServerSvc := &api.Service{
- ObjectMeta: api.ObjectMeta{
+ gitServerSvc := &v1.Service{
+ ObjectMeta: v1.ObjectMeta{
Name: "git-server-svc",
},
- Spec: api.ServiceSpec{
+ Spec: v1.ServiceSpec{
Selector: labels,
- Ports: []api.ServicePort{
+ Ports: []v1.ServicePort{
{
Name: "http-portal",
Port: int32(httpPort),
@@ -222,7 +222,7 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
By("Cleaning up the git server pod")
- if err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(gitServerPod.Name, api.NewDeleteOptions(0)); err != nil {
+ if err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(gitServerPod.Name, v1.NewDeleteOptions(0)); err != nil {
framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
}
By("Cleaning up the git server svc")
@@ -232,19 +232,19 @@ func createGitServer(f *framework.Framework) (gitURL string, gitRepo string, cle
}
}
-func makeGitRepoVolumes(gitURL, gitRepo string) (volumes []api.Volume, volumeMounts []api.VolumeMount) {
+func makeGitRepoVolumes(gitURL, gitRepo string) (volumes []v1.Volume, volumeMounts []v1.VolumeMount) {
for i := 0; i < wrappedVolumeRaceGitRepoVolumeCount; i++ {
volumeName := fmt.Sprintf("racey-git-repo-%d", i)
- volumes = append(volumes, api.Volume{
+ volumes = append(volumes, v1.Volume{
Name: volumeName,
- VolumeSource: api.VolumeSource{
- GitRepo: &api.GitRepoVolumeSource{
+ VolumeSource: v1.VolumeSource{
+ GitRepo: &v1.GitRepoVolumeSource{
Repository: gitURL,
Directory: gitRepo,
},
},
})
- volumeMounts = append(volumeMounts, api.VolumeMount{
+ volumeMounts = append(volumeMounts, v1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/etc/git-volume-%d", i),
})
@@ -257,8 +257,8 @@ func createConfigmapsForRace(f *framework.Framework) (configMapNames []string) {
for i := 0; i < wrappedVolumeRaceConfigMapVolumeCount; i++ {
configMapName := fmt.Sprintf("racey-configmap-%d", i)
configMapNames = append(configMapNames, configMapName)
- configMap := &api.ConfigMap{
- ObjectMeta: api.ObjectMeta{
+ configMap := &v1.ConfigMap{
+ ObjectMeta: v1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: configMapName,
},
@@ -280,17 +280,17 @@ func deleteConfigMaps(f *framework.Framework, configMapNames []string) {
}
}
-func makeConfigMapVolumes(configMapNames []string) (volumes []api.Volume, volumeMounts []api.VolumeMount) {
+func makeConfigMapVolumes(configMapNames []string) (volumes []v1.Volume, volumeMounts []v1.VolumeMount) {
for i, configMapName := range configMapNames {
volumeName := fmt.Sprintf("racey-configmap-%d", i)
- volumes = append(volumes, api.Volume{
+ volumes = append(volumes, v1.Volume{
Name: volumeName,
- VolumeSource: api.VolumeSource{
- ConfigMap: &api.ConfigMapVolumeSource{
- LocalObjectReference: api.LocalObjectReference{
+ VolumeSource: v1.VolumeSource{
+ ConfigMap: &v1.ConfigMapVolumeSource{
+ LocalObjectReference: v1.LocalObjectReference{
Name: configMapName,
},
- Items: []api.KeyToPath{
+ Items: []v1.KeyToPath{
{
Key: "data-1",
Path: "data-1",
@@ -299,7 +299,7 @@ func makeConfigMapVolumes(configMapNames []string) (volumes []api.Volume, volume
},
},
})
- volumeMounts = append(volumeMounts, api.VolumeMount{
+ volumeMounts = append(volumeMounts, v1.VolumeMount{
Name: volumeName,
MountPath: fmt.Sprintf("/etc/config-%d", i),
})
@@ -307,7 +307,7 @@ func makeConfigMapVolumes(configMapNames []string) (volumes []api.Volume, volume
return
}
-func testNoWrappedVolumeRace(f *framework.Framework, volumes []api.Volume, volumeMounts []api.VolumeMount, podCount int32) {
+func testNoWrappedVolumeRace(f *framework.Framework, volumes []v1.Volume, volumeMounts []v1.VolumeMount, podCount int32) {
rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID())
nodeList := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodeList.Items)).To(BeNumerically(">", 0))
@@ -315,7 +315,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []api.Volume, volum
By("Creating RC which spawns configmap-volume pods")
affinity := map[string]string{
- api.AffinityAnnotationKey: fmt.Sprintf(`
+ v1.AffinityAnnotationKey: fmt.Sprintf(`
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [{
"matchExpressions": [{
@@ -327,35 +327,35 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []api.Volume, volum
}}}`, targetNode.Name),
}
- rc := &api.ReplicationController{
- ObjectMeta: api.ObjectMeta{
+ rc := &v1.ReplicationController{
+ ObjectMeta: v1.ObjectMeta{
Name: rcName,
},
- Spec: api.ReplicationControllerSpec{
- Replicas: podCount,
+ Spec: v1.ReplicationControllerSpec{
+ Replicas: &podCount,
Selector: map[string]string{
"name": rcName,
},
- Template: &api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
+ Template: &v1.PodTemplateSpec{
+ ObjectMeta: v1.ObjectMeta{
Annotations: affinity,
Labels: map[string]string{"name": rcName},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "test-container",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sleep", "10000"},
- Resources: api.ResourceRequirements{
- Requests: api.ResourceList{
- api.ResourceCPU: resource.MustParse("10m"),
+ Resources: v1.ResourceRequirements{
+ Requests: v1.ResourceList{
+ v1.ResourceCPU: resource.MustParse("10m"),
},
},
VolumeMounts: volumeMounts,
},
},
- DNSPolicy: api.DNSDefault,
+ DNSPolicy: v1.DNSDefault,
Volumes: volumes,
},
},
@@ -365,7 +365,7 @@ func testNoWrappedVolumeRace(f *framework.Framework, volumes []api.Volume, volum
Expect(err).NotTo(HaveOccurred(), "error creating replication controller")
defer func() {
- err := framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, rcName)
+ err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
framework.ExpectNoError(err)
}()
diff --git a/test/e2e/etcd_failure.go b/test/e2e/etcd_failure.go
index f9eba185430..d18fd8ef86f 100644
--- a/test/e2e/etcd_failure.go
+++ b/test/e2e/etcd_failure.go
@@ -19,7 +19,7 @@ package e2e
import (
"time"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
@@ -106,7 +106,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
By("deleting pods from existing replication controller")
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
- options := api.ListOptions{LabelSelector: rcSelector}
+ options := v1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(options)
if err != nil {
framework.Logf("apiserver returned error, as expected before recovery: %v", err)
@@ -116,7 +116,7 @@ func checkExistingRCRecovers(f *framework.Framework) {
return false, nil
}
for _, pod := range pods.Items {
- err = podClient.Delete(pod.Name, api.NewDeleteOptions(0))
+ err = podClient.Delete(pod.Name, v1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
}
framework.Logf("apiserver has recovered")
@@ -125,11 +125,11 @@ func checkExistingRCRecovers(f *framework.Framework) {
By("waiting for replication controller to recover")
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*60, func() (bool, error) {
- options := api.ListOptions{LabelSelector: rcSelector}
+ options := v1.ListOptions{LabelSelector: rcSelector.String()}
pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
- if pod.DeletionTimestamp == nil && api.IsPodReady(&pod) {
+ if pod.DeletionTimestamp == nil && v1.IsPodReady(&pod) {
return true, nil
}
}
diff --git a/test/e2e/events.go b/test/e2e/events.go
index 0575799cd76..1049bc1fd53 100644
--- a/test/e2e/events.go
+++ b/test/e2e/events.go
@@ -21,7 +21,7 @@ import (
"strconv"
"time"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/uuid"
@@ -42,20 +42,20 @@ var _ = framework.KubeDescribe("Events", func() {
By("creating the pod")
name := "send-events-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "p",
Image: "gcr.io/google_containers/serve_hostname:v1.4",
- Ports: []api.ContainerPort{{ContainerPort: 80}},
+ Ports: []v1.ContainerPort{{ContainerPort: 80}},
},
},
},
@@ -74,7 +74,7 @@ var _ = framework.KubeDescribe("Events", func() {
By("verifying the pod is in kubernetes")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
- options := api.ListOptions{LabelSelector: selector}
+ options := v1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
Expect(len(pods.Items)).To(Equal(1))
@@ -84,7 +84,7 @@ var _ = framework.KubeDescribe("Events", func() {
framework.Failf("Failed to get pod: %v", err)
}
fmt.Printf("%+v\n", podWithUid)
- var events *api.EventList
+ var events *v1.EventList
// Check for scheduler event about the pod.
By("checking for scheduler event about the pod")
framework.ExpectNoError(wait.Poll(time.Second*2, time.Second*60, func() (bool, error) {
@@ -92,9 +92,9 @@ var _ = framework.KubeDescribe("Events", func() {
"involvedObject.kind": "Pod",
"involvedObject.uid": string(podWithUid.UID),
"involvedObject.namespace": f.Namespace.Name,
- "source": api.DefaultSchedulerName,
- }.AsSelector()
- options := api.ListOptions{FieldSelector: selector}
+ "source": v1.DefaultSchedulerName,
+ }.AsSelector().String()
+ options := v1.ListOptions{FieldSelector: selector}
events, err := f.ClientSet.Core().Events(f.Namespace.Name).List(options)
if err != nil {
return false, err
@@ -113,8 +113,8 @@ var _ = framework.KubeDescribe("Events", func() {
"involvedObject.kind": "Pod",
"involvedObject.namespace": f.Namespace.Name,
"source": "kubelet",
- }.AsSelector()
- options := api.ListOptions{FieldSelector: selector}
+ }.AsSelector().String()
+ options := v1.ListOptions{FieldSelector: selector}
events, err = f.ClientSet.Core().Events(f.Namespace.Name).List(options)
if err != nil {
return false, err
diff --git a/test/e2e/example_cluster_dns.go b/test/e2e/example_cluster_dns.go
index f3867b02603..f89717fd965 100644
--- a/test/e2e/example_cluster_dns.go
+++ b/test/e2e/example_cluster_dns.go
@@ -21,8 +21,8 @@ import (
"path/filepath"
"time"
- "k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework"
@@ -73,7 +73,7 @@ var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() {
// we need two namespaces anyway, so let's forget about
// the one created in BeforeEach and create two new ones.
- namespaces := []*api.Namespace{nil, nil}
+ namespaces := []*v1.Namespace{nil, nil}
for i := range namespaces {
var err error
namespaces[i], err = f.CreateNamespace(fmt.Sprintf("dnsexample%d", i), nil)
@@ -97,7 +97,7 @@ var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() {
// the application itself may have not been initialized. Just query the application.
for _, ns := range namespaces {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendRcName}))
- options := api.ListOptions{LabelSelector: label}
+ options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns.Name).List(options)
Expect(err).NotTo(HaveOccurred())
err = framework.PodsResponding(c, ns.Name, backendPodName, false, pods)
@@ -117,7 +117,7 @@ var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() {
// dns error or timeout.
// This code is probably unnecessary, but let's stay on the safe side.
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": backendPodName}))
- options := api.ListOptions{LabelSelector: label}
+ options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(namespaces[0].Name).List(options)
if err != nil || pods == nil || len(pods.Items) == 0 {
@@ -151,6 +151,6 @@ var _ = framework.KubeDescribe("ClusterDns [Feature:Example]", func() {
})
})
-func getNsCmdFlag(ns *api.Namespace) string {
+func getNsCmdFlag(ns *v1.Namespace) string {
return fmt.Sprintf("--namespace=%v", ns.Name)
}
diff --git a/test/e2e/example_k8petstore.go b/test/e2e/example_k8petstore.go
index b112529b462..6a03420ac29 100644
--- a/test/e2e/example_k8petstore.go
+++ b/test/e2e/example_k8petstore.go
@@ -25,7 +25,7 @@ import (
"syscall"
"time"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
diff --git a/test/e2e/examples.go b/test/e2e/examples.go
index 70343a246bb..06dbd5fcaf6 100644
--- a/test/e2e/examples.go
+++ b/test/e2e/examples.go
@@ -26,8 +26,8 @@ import (
"sync"
"time"
- "k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
@@ -49,11 +49,11 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
return f.NewClusterVerification(
framework.PodStateVerification{
Selectors: map[string]string{selectorKey: selectorValue},
- ValidPhases: []api.PodPhase{api.PodRunning},
+ ValidPhases: []v1.PodPhase{v1.PodRunning},
})
}
// Customized ForEach wrapper for this test.
- forEachPod := func(selectorKey string, selectorValue string, fn func(api.Pod)) {
+ forEachPod := func(selectorKey string, selectorValue string, fn func(v1.Pod)) {
clusterState(selectorKey, selectorValue).ForEach(fn)
}
var c clientset.Interface
@@ -113,7 +113,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
- forEachPod(selectorKey, selectorValue, func(pod api.Pod) {
+ forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
if pod.Name != bootstrapPodName {
_, err := framework.LookForStringInLog(ns, pod.Name, "redis", expectedOnServer, serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
@@ -123,7 +123,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
label = labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
- forEachPod(selectorKey, selectorValue, func(pod api.Pod) {
+ forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
if pod.Name != bootstrapPodName {
_, err := framework.LookForStringInLog(ns, pod.Name, "sentinel", expectedOnSentinel, serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
@@ -164,7 +164,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
framework.Logf("Now polling for Master startup...")
// Only one master pod: But its a natural way to look up pod names.
- forEachPod(selectorKey, selectorValue, func(pod api.Pod) {
+ forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
framework.Logf("Now waiting for master to startup in %v", pod.Name)
_, err := framework.LookForStringInLog(ns, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
@@ -173,7 +173,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
By("waiting for master endpoint")
err = framework.WaitForEndpoint(c, ns, "spark-master")
Expect(err).NotTo(HaveOccurred())
- forEachPod(selectorKey, selectorValue, func(pod api.Pod) {
+ forEachPod(selectorKey, selectorValue, func(pod v1.Pod) {
_, maErr := framework.LookForStringInLog(f.Namespace.Name, pod.Name, "spark-master", "Starting Spark master at", serverStartTimeout)
if maErr != nil {
framework.Failf("Didn't find target string. error:", maErr)
@@ -194,7 +194,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
framework.Logf("Now polling for worker startup...")
forEachPod(selectorKey, selectorValue,
- func(pod api.Pod) {
+ func(pod v1.Pod) {
_, slaveErr := framework.LookForStringInLog(ns, pod.Name, "spark-worker", "Successfully registered with master", serverStartTimeout)
Expect(slaveErr).NotTo(HaveOccurred())
})
@@ -226,7 +226,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
- forEachPod("app", "cassandra", func(pod api.Pod) {
+ forEachPod("app", "cassandra", func(pod v1.Pod) {
framework.Logf("Verifying pod %v ", pod.Name)
// TODO how do we do this better? Ready Probe?
_, err = framework.LookForStringInLog(ns, pod.Name, "cassandra", "Starting listening for CQL clients", serverStartTimeout)
@@ -234,7 +234,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
})
By("Finding each node in the nodetool status lines")
- forEachPod("app", "cassandra", func(pod api.Pod) {
+ forEachPod("app", "cassandra", func(pod v1.Pod) {
output := framework.RunKubectlOrDie("exec", pod.Name, nsFlag, "--", "nodetool", "status")
matched, _ := regexp.MatchString("UN.*"+pod.Status.PodIP, output)
if matched != true {
@@ -281,7 +281,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
label := labels.SelectorFromSet(labels.Set(map[string]string{"app": "cassandra"}))
err = wait.PollImmediate(statefulsetPoll, statefulsetTimeout,
func() (bool, error) {
- podList, err := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: label})
+ podList, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: label.String()})
if err != nil {
return false, fmt.Errorf("Unable to get list of pods in statefulset %s", label)
}
@@ -294,9 +294,9 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPets, len(podList.Items))
}
for _, p := range podList.Items {
- isReady := api.IsPodReady(&p)
- if p.Status.Phase != api.PodRunning || !isReady {
- framework.Logf("Waiting for pod %v to enter %v - Ready=True, currently %v - Ready=%v", p.Name, api.PodRunning, p.Status.Phase, isReady)
+ isReady := v1.IsPodReady(&p)
+ if p.Status.Phase != v1.PodRunning || !isReady {
+ framework.Logf("Waiting for pod %v to enter %v - Ready=True, currently %v - Ready=%v", p.Name, v1.PodRunning, p.Status.Phase, isReady)
return false, nil
}
}
@@ -305,7 +305,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
Expect(err).NotTo(HaveOccurred())
By("Finding each node in the nodetool status lines")
- forEachPod("app", "cassandra", func(pod api.Pod) {
+ forEachPod("app", "cassandra", func(pod v1.Pod) {
output := framework.RunKubectlOrDie("exec", pod.Name, nsFlag, "--", "nodetool", "status")
matched, _ := regexp.MatchString("UN.*"+pod.Status.PodIP, output)
if matched != true {
@@ -357,7 +357,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "storm-worker"}))
err = testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
- forEachPod("name", "storm-worker", func(pod api.Pod) {
+ forEachPod("name", "storm-worker", func(pod v1.Pod) {
//do nothing, just wait for the pod to be running
})
// TODO: Add logging configuration to nimbus & workers images and then
@@ -398,7 +398,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
for t := time.Now(); time.Since(t) < timeout; time.Sleep(framework.Poll) {
pod, err := c.Core().Pods(ns).Get(podName)
framework.ExpectNoError(err, fmt.Sprintf("getting pod %s", podName))
- stat := api.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName)
+ stat := v1.GetExistingContainerStatus(pod.Status.ContainerStatuses, podName)
framework.Logf("Pod: %s, restart count:%d", stat.Name, stat.RestartCount)
if stat.RestartCount > 0 {
framework.Logf("Saw %v restart, succeeded...", podName)
@@ -494,7 +494,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
checkDbInstances := func() {
- forEachPod("db", "rethinkdb", func(pod api.Pod) {
+ forEachPod("db", "rethinkdb", func(pod v1.Pod) {
_, err = framework.LookForStringInLog(ns, pod.Name, "rethinkdb", "Server ready", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
})
@@ -504,7 +504,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
Expect(err).NotTo(HaveOccurred())
By("scaling rethinkdb")
- framework.ScaleRC(f.ClientSet, ns, "rethinkdb-rc", 2, true)
+ framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, "rethinkdb-rc", 2, true)
checkDbInstances()
By("starting admin")
@@ -536,7 +536,7 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": "hazelcast"}))
err := testutils.WaitForPodsWithLabelRunning(c, ns, label)
Expect(err).NotTo(HaveOccurred())
- forEachPod("name", "hazelcast", func(pod api.Pod) {
+ forEachPod("name", "hazelcast", func(pod v1.Pod) {
_, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [1]", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
_, err = framework.LookForStringInLog(ns, pod.Name, "hazelcast", "is STARTED", serverStartTimeout)
@@ -547,8 +547,8 @@ var _ = framework.KubeDescribe("[Feature:Example]", func() {
Expect(err).NotTo(HaveOccurred())
By("scaling hazelcast")
- framework.ScaleRC(f.ClientSet, ns, "hazelcast", 2, true)
- forEachPod("name", "hazelcast", func(pod api.Pod) {
+ framework.ScaleRC(f.ClientSet, f.InternalClientset, ns, "hazelcast", 2, true)
+ forEachPod("name", "hazelcast", func(pod v1.Pod) {
_, err := framework.LookForStringInLog(ns, pod.Name, "hazelcast", "Members [2]", serverStartTimeout)
Expect(err).NotTo(HaveOccurred())
})
diff --git a/test/e2e/federated-namespace.go b/test/e2e/federated-namespace.go
index 5b5be964d34..42818380643 100644
--- a/test/e2e/federated-namespace.go
+++ b/test/e2e/federated-namespace.go
@@ -23,8 +23,8 @@ import (
"time"
clientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5/typed/core/v1"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
+ "k8s.io/kubernetes/pkg/api/v1"
api_v1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
@@ -111,7 +111,7 @@ var _ = framework.KubeDescribe("Federation namespace [Feature:Federation]", func
// Create resources in the namespace.
event := api_v1.Event{
ObjectMeta: api_v1.ObjectMeta{
- Name: api.SimpleNameGenerator.GenerateName(eventNamePrefix),
+ Name: v1.SimpleNameGenerator.GenerateName(eventNamePrefix),
Namespace: nsName,
},
InvolvedObject: api_v1.ObjectReference{
@@ -185,7 +185,7 @@ func verifyNsCascadingDeletion(nsClient clientset.NamespaceInterface, clusters m
func createNamespace(nsClient clientset.NamespaceInterface) string {
ns := api_v1.Namespace{
ObjectMeta: api_v1.ObjectMeta{
- Name: api.SimpleNameGenerator.GenerateName(namespacePrefix),
+ Name: v1.SimpleNameGenerator.GenerateName(namespacePrefix),
},
}
By(fmt.Sprintf("Creating namespace %s", ns.Name))
diff --git a/test/e2e/federated-secret.go b/test/e2e/federated-secret.go
index ae7a5dba2d0..ae765bad0d3 100644
--- a/test/e2e/federated-secret.go
+++ b/test/e2e/federated-secret.go
@@ -25,7 +25,6 @@ import (
. "github.com/onsi/gomega"
fedclientset "k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_5"
"k8s.io/kubernetes/federation/pkg/federation-controller/util"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
kubeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
@@ -156,7 +155,7 @@ func createSecretOrFail(clientset *fedclientset.Clientset, nsName string) *v1.Se
secret := &v1.Secret{
ObjectMeta: v1.ObjectMeta{
- Name: api.SimpleNameGenerator.GenerateName(secretNamePrefix),
+ Name: v1.SimpleNameGenerator.GenerateName(secretNamePrefix),
Namespace: nsName,
},
}
diff --git a/test/e2e/federation-util.go b/test/e2e/federation-util.go
index f51ff4c0e63..f01694cf6f9 100644
--- a/test/e2e/federation-util.go
+++ b/test/e2e/federation-util.go
@@ -347,7 +347,7 @@ func cleanupServiceShardLoadBalancer(clusterName string, service *v1.Service, ti
return fmt.Errorf("cloud provider undefined")
}
- internalSvc := &api.Service{}
+ internalSvc := &v1.Service{}
err := api.Scheme.Convert(service, internalSvc, nil)
if err != nil {
return fmt.Errorf("failed to convert versioned service object to internal type: %v", err)
@@ -415,19 +415,19 @@ func discoverService(f *framework.Framework, name string, exists bool, podName s
command := []string{"sh", "-c", fmt.Sprintf("until nslookup '%s'; do sleep 10; done", name)}
By(fmt.Sprintf("Looking up %q", name))
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "federated-service-discovery-container",
Image: "gcr.io/google_containers/busybox:1.24",
Command: command,
},
},
- RestartPolicy: api.RestartPolicyOnFailure,
+ RestartPolicy: v1.RestartPolicyOnFailure,
},
}
@@ -438,7 +438,7 @@ func discoverService(f *framework.Framework, name string, exists bool, podName s
By(fmt.Sprintf("Successfully created pod %q in namespace %q", pod.Name, nsName))
defer func() {
By(fmt.Sprintf("Deleting pod %q from namespace %q", podName, nsName))
- err := f.ClientSet.Core().Pods(nsName).Delete(podName, api.NewDeleteOptions(0))
+ err := f.ClientSet.Core().Pods(nsName).Delete(podName, v1.NewDeleteOptions(0))
framework.ExpectNoError(err, "Deleting pod %q from namespace %q", podName, nsName)
By(fmt.Sprintf("Deleted pod %q from namespace %q", podName, nsName))
}()
diff --git a/test/e2e/framework/exec_util.go b/test/e2e/framework/exec_util.go
index 2a553bd112a..f35de2e78ca 100644
--- a/test/e2e/framework/exec_util.go
+++ b/test/e2e/framework/exec_util.go
@@ -23,6 +23,7 @@ import (
"strings"
"k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/unversioned/remotecommand"
remotecommandserver "k8s.io/kubernetes/pkg/kubelet/server/remotecommand"
@@ -62,7 +63,7 @@ func (f *Framework) ExecWithOptions(options ExecOptions) (string, string, error)
Namespace(options.Namespace).
SubResource("exec").
Param("container", options.ContainerName)
- req.VersionedParams(&api.PodExecOptions{
+ req.VersionedParams(&v1.PodExecOptions{
Container: options.ContainerName,
Command: options.Command,
Stdin: options.Stdin != nil,
diff --git a/test/e2e/framework/framework.go b/test/e2e/framework/framework.go
index bbede98aab1..ae4dd7bd96f 100644
--- a/test/e2e/framework/framework.go
+++ b/test/e2e/framework/framework.go
@@ -36,7 +36,7 @@ import (
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
- "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/fields"
@@ -60,15 +60,15 @@ const (
type Framework struct {
BaseName string
- // ClientSet uses internal objects, you should use ClientSet_1_5 where possible.
- ClientSet internalclientset.Interface
+ // ClientSet uses internal objects, you should use ClientSet where possible.
+ ClientSet clientset.Interface
- ClientSet_1_5 *release_1_5.Clientset
- StagingClient *staging.Clientset
- ClientPool dynamic.ClientPool
+ InternalClientset *internalclientset.Clientset
+ StagingClient *staging.Clientset
+ ClientPool dynamic.ClientPool
- Namespace *api.Namespace // Every test has at least one namespace
- namespacesToDelete []*api.Namespace // Some tests have more than one.
+ Namespace *v1.Namespace // Every test has at least one namespace
+ namespacesToDelete []*v1.Namespace // Some tests have more than one.
NamespaceDeletionTimeout time.Duration
gatherer *containerResourceGatherer
@@ -130,7 +130,7 @@ func NewDefaultGroupVersionFramework(baseName string, groupVersion unversioned.G
return f
}
-func NewFramework(baseName string, options FrameworkOptions, client internalclientset.Interface) *Framework {
+func NewFramework(baseName string, options FrameworkOptions, client clientset.Interface) *Framework {
f := &Framework{
BaseName: baseName,
AddonResourceConstraints: make(map[string]ResourceConstraint),
@@ -193,9 +193,9 @@ func (f *Framework) BeforeEach() {
if TestContext.KubeAPIContentType != "" {
config.ContentType = TestContext.KubeAPIContentType
}
- f.ClientSet, err = internalclientset.NewForConfig(config)
+ f.ClientSet, err = clientset.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
- f.ClientSet_1_5, err = release_1_5.NewForConfig(config)
+ f.InternalClientset, err = internalclientset.NewForConfig(config)
Expect(err).NotTo(HaveOccurred())
clientRepoConfig := getClientRepoConfig(config)
f.StagingClient, err = staging.NewForConfig(clientRepoConfig)
@@ -369,7 +369,7 @@ func (f *Framework) AfterEach() {
// Print events if the test failed.
if CurrentGinkgoTestDescription().Failed && TestContext.DumpLogsOnFailure {
// Pass both unversioned client and and versioned clientset, till we have removed all uses of the unversioned client.
- DumpAllNamespaceInfo(f.ClientSet, f.ClientSet_1_5, f.Namespace.Name)
+ DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name)
By(fmt.Sprintf("Dumping a list of prepulled images on each node"))
LogContainersInPodsWithLabels(f.ClientSet, api.NamespaceSystem, ImagePullerLabels, "image-puller", Logf)
if f.federated {
@@ -439,7 +439,7 @@ func (f *Framework) AfterEach() {
}
}
-func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (*api.Namespace, error) {
+func (f *Framework) CreateNamespace(baseName string, labels map[string]string) (*v1.Namespace, error) {
createTestingNS := TestContext.CreateTestingNS
if createTestingNS == nil {
createTestingNS = CreateTestingNS
@@ -507,14 +507,14 @@ func (f *Framework) WaitForPodNoLongerRunning(podName string) error {
// TestContainerOutput runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using a substring matcher.
-func (f *Framework) TestContainerOutput(scenarioName string, pod *api.Pod, containerIndex int, expectedOutput []string) {
+func (f *Framework) TestContainerOutput(scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, ContainSubstring)
}
// TestContainerOutputRegexp runs the given pod in the given namespace and waits
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using a regexp matcher.
-func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *api.Pod, containerIndex int, expectedOutput []string) {
+func (f *Framework) TestContainerOutputRegexp(scenarioName string, pod *v1.Pod, containerIndex int, expectedOutput []string) {
f.testContainerOutputMatcher(scenarioName, pod, containerIndex, expectedOutput, MatchRegexp)
}
@@ -524,13 +524,13 @@ func (f *Framework) WaitForAnEndpoint(serviceName string) error {
for {
// TODO: Endpoints client should take a field selector so we
// don't have to list everything.
- list, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(api.ListOptions{})
+ list, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(v1.ListOptions{})
if err != nil {
return err
}
rv := list.ResourceVersion
- isOK := func(e *api.Endpoints) bool {
+ isOK := func(e *v1.Endpoints) bool {
return e.Name == serviceName && len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0
}
for i := range list.Items {
@@ -539,8 +539,8 @@ func (f *Framework) WaitForAnEndpoint(serviceName string) error {
}
}
- options := api.ListOptions{
- FieldSelector: fields.Set{"metadata.name": serviceName}.AsSelector(),
+ options := v1.ListOptions{
+ FieldSelector: fields.Set{"metadata.name": serviceName}.AsSelector().String(),
ResourceVersion: rv,
}
w, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).Watch(options)
@@ -555,7 +555,7 @@ func (f *Framework) WaitForAnEndpoint(serviceName string) error {
// reget and re-watch
break
}
- if e, ok := val.Object.(*api.Endpoints); ok {
+ if e, ok := val.Object.(*v1.Endpoints); ok {
if isOK(e) {
return nil
}
@@ -604,7 +604,7 @@ func (f *Framework) CheckFileSizeViaContainer(podName, containerName, path strin
}
// CreateServiceForSimpleAppWithPods is a convenience wrapper to create a service and its matching pods all at once.
-func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int, appName string, podSpec func(n api.Node) api.PodSpec, count int, block bool) (error, *api.Service) {
+func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int, appName string, podSpec func(n v1.Node) v1.PodSpec, count int, block bool) (error, *v1.Service) {
var err error = nil
theService := f.CreateServiceForSimpleApp(contPort, svcPort, appName)
f.CreatePodsPerNodeForSimpleApp(appName, podSpec, count)
@@ -615,7 +615,7 @@ func (f *Framework) CreateServiceForSimpleAppWithPods(contPort int, svcPort int,
}
// CreateServiceForSimpleApp returns a service that selects/exposes pods (send -1 ports if no exposure needed) with an app label.
-func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName string) *api.Service {
+func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName string) *v1.Service {
if appName == "" {
panic(fmt.Sprintf("no app name provided"))
}
@@ -625,11 +625,11 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
}
// For convenience, user sending ports are optional.
- portsFunc := func() []api.ServicePort {
+ portsFunc := func() []v1.ServicePort {
if contPort < 1 || svcPort < 1 {
return nil
} else {
- return []api.ServicePort{{
+ return []v1.ServicePort{{
Protocol: "TCP",
Port: int32(svcPort),
TargetPort: intstr.FromInt(contPort),
@@ -637,14 +637,14 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
}
}
Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
- service, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(&api.Service{
- ObjectMeta: api.ObjectMeta{
+ service, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(&v1.Service{
+ ObjectMeta: v1.ObjectMeta{
Name: "service-for-" + appName,
Labels: map[string]string{
"app": appName + "-service",
},
},
- Spec: api.ServiceSpec{
+ Spec: v1.ServiceSpec{
Ports: portsFunc(),
Selector: serviceSelector,
},
@@ -654,7 +654,7 @@ func (f *Framework) CreateServiceForSimpleApp(contPort, svcPort int, appName str
}
// CreatePodsPerNodeForSimpleApp Creates pods w/ labels. Useful for tests which make a bunch of pods w/o any networking.
-func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n api.Node) api.PodSpec, maxCount int) map[string]string {
+func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n v1.Node) v1.PodSpec, maxCount int) map[string]string {
nodes := GetReadySchedulableNodesOrDie(f.ClientSet)
labels := map[string]string{
"app": appName + "-pod",
@@ -663,8 +663,8 @@ func (f *Framework) CreatePodsPerNodeForSimpleApp(appName string, podSpec func(n
// one per node, but no more than maxCount.
if i <= maxCount {
Logf("%v/%v : Creating container with label app=%v-pod", i, maxCount, appName)
- _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(&api.Pod{
- ObjectMeta: api.ObjectMeta{
+ _, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(&v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf(appName+"-pod-%v", i),
Labels: labels,
},
@@ -834,22 +834,22 @@ type PodStateVerification struct {
Selectors map[string]string
// Required: The phases which are valid for your pod.
- ValidPhases []api.PodPhase
+ ValidPhases []v1.PodPhase
// Optional: only pods passing this function will pass the filter
// Verify a pod.
// As an optimization, in addition to specfying filter (boolean),
// this function allows specifying an error as well.
// The error indicates that the polling of the pod spectrum should stop.
- Verify func(api.Pod) (bool, error)
+ Verify func(v1.Pod) (bool, error)
// Optional: only pods with this name will pass the filter.
PodName string
}
type ClusterVerification struct {
- client internalclientset.Interface
- namespace *api.Namespace // pointer rather than string, since ns isn't created until before each.
+ client clientset.Interface
+ namespace *v1.Namespace // pointer rather than string, since ns isn't created until before each.
podState PodStateVerification
}
@@ -861,11 +861,11 @@ func (f *Framework) NewClusterVerification(filter PodStateVerification) *Cluster
}
}
-func passesPodNameFilter(pod api.Pod, name string) bool {
+func passesPodNameFilter(pod v1.Pod, name string) bool {
return name == "" || strings.Contains(pod.Name, name)
}
-func passesVerifyFilter(pod api.Pod, verify func(p api.Pod) (bool, error)) (bool, error) {
+func passesVerifyFilter(pod v1.Pod, verify func(p v1.Pod) (bool, error)) (bool, error) {
if verify == nil {
return true, nil
} else {
@@ -879,7 +879,7 @@ func passesVerifyFilter(pod api.Pod, verify func(p api.Pod) (bool, error)) (bool
}
}
-func passesPhasesFilter(pod api.Pod, validPhases []api.PodPhase) bool {
+func passesPhasesFilter(pod v1.Pod, validPhases []v1.PodPhase) bool {
passesPhaseFilter := false
for _, phase := range validPhases {
if pod.Status.Phase == phase {
@@ -890,18 +890,18 @@ func passesPhasesFilter(pod api.Pod, validPhases []api.PodPhase) bool {
}
// filterLabels returns a list of pods which have labels.
-func filterLabels(selectors map[string]string, cli internalclientset.Interface, ns string) (*api.PodList, error) {
+func filterLabels(selectors map[string]string, cli clientset.Interface, ns string) (*v1.PodList, error) {
var err error
var selector labels.Selector
- var pl *api.PodList
+ var pl *v1.PodList
// List pods based on selectors. This might be a tiny optimization rather then filtering
// everything manually.
if len(selectors) > 0 {
selector = labels.SelectorFromSet(labels.Set(selectors))
- options := api.ListOptions{LabelSelector: selector}
+ options := v1.ListOptions{LabelSelector: selector.String()}
pl, err = cli.Core().Pods(ns).List(options)
} else {
- pl, err = cli.Core().Pods(ns).List(api.ListOptions{})
+ pl, err = cli.Core().Pods(ns).List(v1.ListOptions{})
}
return pl, err
}
@@ -909,20 +909,20 @@ func filterLabels(selectors map[string]string, cli internalclientset.Interface,
// filter filters pods which pass a filter. It can be used to compose
// the more useful abstractions like ForEach, WaitFor, and so on, which
// can be used directly by tests.
-func (p *PodStateVerification) filter(c internalclientset.Interface, namespace *api.Namespace) ([]api.Pod, error) {
+func (p *PodStateVerification) filter(c clientset.Interface, namespace *v1.Namespace) ([]v1.Pod, error) {
if len(p.ValidPhases) == 0 || namespace == nil {
panic(fmt.Errorf("Need to specify a valid pod phases (%v) and namespace (%v). ", p.ValidPhases, namespace))
}
ns := namespace.Name
- pl, err := filterLabels(p.Selectors, c, ns) // Build an api.PodList to operate against.
+ pl, err := filterLabels(p.Selectors, c, ns) // Build an v1.PodList to operate against.
Logf("Selector matched %v pods for %v", len(pl.Items), p.Selectors)
if len(pl.Items) == 0 || err != nil {
return pl.Items, err
}
unfilteredPods := pl.Items
- filteredPods := []api.Pod{}
+ filteredPods := []v1.Pod{}
ReturnPodsSoFar:
// Next: Pod must match at least one of the states that the user specified
for _, pod := range unfilteredPods {
@@ -943,8 +943,8 @@ ReturnPodsSoFar:
// WaitFor waits for some minimum number of pods to be verified, according to the PodStateVerification
// definition.
-func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]api.Pod, error) {
- pods := []api.Pod{}
+func (cl *ClusterVerification) WaitFor(atLeast int, timeout time.Duration) ([]v1.Pod, error) {
+ pods := []v1.Pod{}
var returnedErr error
err := wait.Poll(1*time.Second, timeout, func() (bool, error) {
@@ -983,7 +983,7 @@ func (cl *ClusterVerification) WaitForOrFail(atLeast int, timeout time.Duration)
//
// For example, if you require at least 5 pods to be running before your test will pass,
// its smart to first call "clusterVerification.WaitFor(5)" before you call clusterVerification.ForEach.
-func (cl *ClusterVerification) ForEach(podFunc func(api.Pod)) error {
+func (cl *ClusterVerification) ForEach(podFunc func(v1.Pod)) error {
pods, err := cl.podState.filter(cl.client, cl.namespace)
if err == nil {
if len(pods) == 0 {
diff --git a/test/e2e/framework/kubelet_stats.go b/test/e2e/framework/kubelet_stats.go
index 8856643e355..d5be4aff6c9 100644
--- a/test/e2e/framework/kubelet_stats.go
+++ b/test/e2e/framework/kubelet_stats.go
@@ -29,8 +29,8 @@ import (
cadvisorapi "github.com/google/cadvisor/info/v1"
"github.com/prometheus/common/model"
- "k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
kubeletmetrics "k8s.io/kubernetes/pkg/kubelet/metrics"
kubeletstats "k8s.io/kubernetes/pkg/kubelet/server/stats"
@@ -157,7 +157,7 @@ func NewRuntimeOperationMonitor(c clientset.Interface) *RuntimeOperationMonitor
client: c,
nodesRuntimeOps: make(map[string]NodeRuntimeOperationErrorRate),
}
- nodes, err := m.client.Core().Nodes().List(api.ListOptions{})
+ nodes, err := m.client.Core().Nodes().List(v1.ListOptions{})
if err != nil {
Failf("RuntimeOperationMonitor: unable to get list of nodes: %v", err)
}
@@ -695,7 +695,7 @@ func NewResourceMonitor(c clientset.Interface, containerNames []string, pollingI
func (r *ResourceMonitor) Start() {
// It should be OK to monitor unschedulable Nodes
- nodes, err := r.client.Core().Nodes().List(api.ListOptions{})
+ nodes, err := r.client.Core().Nodes().List(v1.ListOptions{})
if err != nil {
Failf("ResourceMonitor: unable to get list of nodes: %v", err)
}
diff --git a/test/e2e/framework/log_size_monitoring.go b/test/e2e/framework/log_size_monitoring.go
index 4a9da5750c8..16d607c8b0a 100644
--- a/test/e2e/framework/log_size_monitoring.go
+++ b/test/e2e/framework/log_size_monitoring.go
@@ -25,7 +25,7 @@ import (
"text/tabwriter"
"time"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
)
const (
diff --git a/test/e2e/framework/metrics_util.go b/test/e2e/framework/metrics_util.go
index dc832bda1b1..8dcd6360dfa 100644
--- a/test/e2e/framework/metrics_util.go
+++ b/test/e2e/framework/metrics_util.go
@@ -28,7 +28,8 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/metrics"
"k8s.io/kubernetes/pkg/util/sets"
@@ -323,7 +324,7 @@ func getSchedulingLatency(c clientset.Interface) (SchedulingLatency, error) {
result := SchedulingLatency{}
// Check if master Node is registered
- nodes, err := c.Core().Nodes().List(api.ListOptions{})
+ nodes, err := c.Core().Nodes().List(v1.ListOptions{})
ExpectNoError(err)
var data string
diff --git a/test/e2e/framework/networking_utils.go b/test/e2e/framework/networking_utils.go
index 27142807493..14b9f3d24cb 100644
--- a/test/e2e/framework/networking_utils.go
+++ b/test/e2e/framework/networking_utils.go
@@ -24,10 +24,10 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- api "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
- coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
+ coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/rand"
@@ -87,23 +87,23 @@ func getServiceSelector() map[string]string {
type NetworkingTestConfig struct {
// TestContaienrPod is a test pod running the netexec image. It is capable
// of executing tcp/udp requests against ip:port.
- TestContainerPod *api.Pod
+ TestContainerPod *v1.Pod
// HostTestContainerPod is a pod running with hostNetworking=true, and the
// hostexec image.
- HostTestContainerPod *api.Pod
+ HostTestContainerPod *v1.Pod
// EndpointPods are the pods belonging to the Service created by this
// test config. Each invocation of `setup` creates a service with
// 1 pod per node running the netexecImage.
- EndpointPods []*api.Pod
+ EndpointPods []*v1.Pod
f *Framework
podClient *PodClient
// NodePortService is a Service with Type=NodePort spanning over all
// endpointPods.
- NodePortService *api.Service
+ NodePortService *v1.Service
// ExternalAddrs is a list of external IPs of nodes in the cluster.
ExternalAddrs []string
// Nodes is a list of nodes in the cluster.
- Nodes []api.Node
+ Nodes []v1.Node
// MaxTries is the number of retries tolerated for tests run against
// endpoints and services created by this config.
MaxTries int
@@ -298,41 +298,41 @@ func (config *NetworkingTestConfig) GetSelfURL(path string, expected string) {
}
}
-func (config *NetworkingTestConfig) createNetShellPodSpec(podName string, node string) *api.Pod {
- probe := &api.Probe{
+func (config *NetworkingTestConfig) createNetShellPodSpec(podName string, node string) *v1.Pod {
+ probe := &v1.Probe{
InitialDelaySeconds: 10,
TimeoutSeconds: 30,
PeriodSeconds: 10,
SuccessThreshold: 1,
FailureThreshold: 3,
- Handler: api.Handler{
- HTTPGet: &api.HTTPGetAction{
+ Handler: v1.Handler{
+ HTTPGet: &v1.HTTPGetAction{
Path: "/healthz",
Port: intstr.IntOrString{IntVal: EndpointHttpPort},
},
},
}
- pod := &api.Pod{
+ pod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
- APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
+ APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
Namespace: config.Namespace,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "webserver",
Image: NetexecImageName,
- ImagePullPolicy: api.PullIfNotPresent,
+ ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"/netexec",
fmt.Sprintf("--http-port=%d", EndpointHttpPort),
fmt.Sprintf("--udp-port=%d", EndpointUdpPort),
},
- Ports: []api.ContainerPort{
+ Ports: []v1.ContainerPort{
{
Name: "http",
ContainerPort: EndpointHttpPort,
@@ -340,7 +340,7 @@ func (config *NetworkingTestConfig) createNetShellPodSpec(podName string, node s
{
Name: "udp",
ContainerPort: EndpointUdpPort,
- Protocol: api.ProtocolUDP,
+ Protocol: v1.ProtocolUDP,
},
},
LivenessProbe: probe,
@@ -355,28 +355,28 @@ func (config *NetworkingTestConfig) createNetShellPodSpec(podName string, node s
return pod
}
-func (config *NetworkingTestConfig) createTestPodSpec() *api.Pod {
- pod := &api.Pod{
+func (config *NetworkingTestConfig) createTestPodSpec() *v1.Pod {
+ pod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
- APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
+ APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: testPodName,
Namespace: config.Namespace,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "webserver",
Image: NetexecImageName,
- ImagePullPolicy: api.PullIfNotPresent,
+ ImagePullPolicy: v1.PullIfNotPresent,
Command: []string{
"/netexec",
fmt.Sprintf("--http-port=%d", EndpointHttpPort),
fmt.Sprintf("--udp-port=%d", EndpointUdpPort),
},
- Ports: []api.ContainerPort{
+ Ports: []v1.ContainerPort{
{
Name: "http",
ContainerPort: TestContainerHttpPort,
@@ -390,15 +390,15 @@ func (config *NetworkingTestConfig) createTestPodSpec() *api.Pod {
}
func (config *NetworkingTestConfig) createNodePortService(selector map[string]string) {
- serviceSpec := &api.Service{
- ObjectMeta: api.ObjectMeta{
+ serviceSpec := &v1.Service{
+ ObjectMeta: v1.ObjectMeta{
Name: nodePortServiceName,
},
- Spec: api.ServiceSpec{
- Type: api.ServiceTypeNodePort,
- Ports: []api.ServicePort{
- {Port: ClusterHttpPort, Name: "http", Protocol: api.ProtocolTCP, TargetPort: intstr.FromInt(EndpointHttpPort)},
- {Port: ClusterUdpPort, Name: "udp", Protocol: api.ProtocolUDP, TargetPort: intstr.FromInt(EndpointUdpPort)},
+ Spec: v1.ServiceSpec{
+ Type: v1.ServiceTypeNodePort,
+ Ports: []v1.ServicePort{
+ {Port: ClusterHttpPort, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(EndpointHttpPort)},
+ {Port: ClusterUdpPort, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt(EndpointUdpPort)},
},
Selector: selector,
},
@@ -434,7 +434,7 @@ func (config *NetworkingTestConfig) createTestPods() {
}
}
-func (config *NetworkingTestConfig) createService(serviceSpec *api.Service) *api.Service {
+func (config *NetworkingTestConfig) createService(serviceSpec *v1.Service) *v1.Service {
_, err := config.getServiceClient().Create(serviceSpec)
Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to create %s service: %v", serviceSpec.Name, err))
@@ -468,10 +468,10 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
By("Getting node addresses")
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
- config.ExternalAddrs = NodeAddresses(nodeList, api.NodeExternalIP)
+ config.ExternalAddrs = NodeAddresses(nodeList, v1.NodeExternalIP)
if len(config.ExternalAddrs) < 2 {
// fall back to legacy IPs
- config.ExternalAddrs = NodeAddresses(nodeList, api.NodeLegacyHostIP)
+ config.ExternalAddrs = NodeAddresses(nodeList, v1.NodeLegacyHostIP)
}
Expect(len(config.ExternalAddrs)).To(BeNumerically(">=", 2), fmt.Sprintf("At least two nodes necessary with an external or LegacyHostIP"))
config.Nodes = nodeList.Items
@@ -481,9 +481,9 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
for _, p := range config.NodePortService.Spec.Ports {
switch p.Protocol {
- case api.ProtocolUDP:
+ case v1.ProtocolUDP:
config.NodeUdpPort = int(p.NodePort)
- case api.ProtocolTCP:
+ case v1.ProtocolTCP:
config.NodeHttpPort = int(p.NodePort)
default:
continue
@@ -495,7 +495,7 @@ func (config *NetworkingTestConfig) setup(selector map[string]string) {
func (config *NetworkingTestConfig) cleanup() {
nsClient := config.getNamespacesClient()
- nsList, err := nsClient.List(api.ListOptions{})
+ nsList, err := nsClient.List(v1.ListOptions{})
if err == nil {
for _, ns := range nsList.Items {
if strings.Contains(ns.Name, config.f.BaseName) && ns.Name != config.Namespace {
@@ -507,8 +507,8 @@ func (config *NetworkingTestConfig) cleanup() {
// shuffleNodes copies nodes from the specified slice into a copy in random
// order. It returns a new slice.
-func shuffleNodes(nodes []api.Node) []api.Node {
- shuffled := make([]api.Node, len(nodes))
+func shuffleNodes(nodes []v1.Node) []v1.Node {
+ shuffled := make([]v1.Node, len(nodes))
perm := rand.Perm(len(nodes))
for i, j := range perm {
shuffled[j] = nodes[i]
@@ -516,7 +516,7 @@ func shuffleNodes(nodes []api.Node) []api.Node {
return shuffled
}
-func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*api.Pod {
+func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector map[string]string) []*v1.Pod {
ExpectNoError(WaitForAllNodesSchedulable(config.f.ClientSet, 10*time.Minute))
nodeList := GetReadySchedulableNodesOrDie(config.f.ClientSet)
@@ -529,7 +529,7 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector
}
// create pods, one for each node
- createdPods := make([]*api.Pod, 0, len(nodes))
+ createdPods := make([]*v1.Pod, 0, len(nodes))
for i, n := range nodes {
podName := fmt.Sprintf("%s-%d", podName, i)
pod := config.createNetShellPodSpec(podName, n.Name)
@@ -539,7 +539,7 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector
}
// wait that all of them are up
- runningPods := make([]*api.Pod, 0, len(nodes))
+ runningPods := make([]*v1.Pod, 0, len(nodes))
for _, p := range createdPods {
ExpectNoError(config.f.WaitForPodReady(p.Name))
rp, err := config.getPodClient().Get(p.Name)
@@ -552,7 +552,7 @@ func (config *NetworkingTestConfig) createNetProxyPods(podName string, selector
func (config *NetworkingTestConfig) DeleteNetProxyPod() {
pod := config.EndpointPods[0]
- config.getPodClient().Delete(pod.Name, api.NewDeleteOptions(0))
+ config.getPodClient().Delete(pod.Name, v1.NewDeleteOptions(0))
config.EndpointPods = config.EndpointPods[1:]
// wait for pod being deleted.
err := WaitForPodToDisappear(config.f.ClientSet, config.Namespace, pod.Name, labels.Everything(), time.Second, wait.ForeverTestTimeout)
@@ -568,7 +568,7 @@ func (config *NetworkingTestConfig) DeleteNetProxyPod() {
time.Sleep(5 * time.Second)
}
-func (config *NetworkingTestConfig) createPod(pod *api.Pod) *api.Pod {
+func (config *NetworkingTestConfig) createPod(pod *v1.Pod) *v1.Pod {
return config.getPodClient().Create(pod)
}
diff --git a/test/e2e/framework/nodes_util.go b/test/e2e/framework/nodes_util.go
index 7ff5e96ef06..c8eae299aeb 100644
--- a/test/e2e/framework/nodes_util.go
+++ b/test/e2e/framework/nodes_util.go
@@ -22,8 +22,8 @@ import (
"strings"
"time"
- "k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/util/wait"
)
@@ -141,15 +141,15 @@ func nodeUpgradeGKE(v string, img string) error {
// nodes it finds.
func CheckNodesReady(c clientset.Interface, nt time.Duration, expect int) ([]string, error) {
// First, keep getting all of the nodes until we get the number we expect.
- var nodeList *api.NodeList
+ var nodeList *v1.NodeList
var errLast error
start := time.Now()
found := wait.Poll(Poll, nt, func() (bool, error) {
// A rolling-update (GCE/GKE implementation of restart) can complete before the apiserver
// knows about all of the nodes. Thus, we retry the list nodes call
// until we get the expected number of nodes.
- nodeList, errLast = c.Core().Nodes().List(api.ListOptions{
- FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector()})
+ nodeList, errLast = c.Core().Nodes().List(v1.ListOptions{
+ FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String()})
if errLast != nil {
return false, nil
}
diff --git a/test/e2e/framework/pods.go b/test/e2e/framework/pods.go
index 654c012a539..7822d9b022e 100644
--- a/test/e2e/framework/pods.go
+++ b/test/e2e/framework/pods.go
@@ -22,9 +22,9 @@ import (
"sync"
"time"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
- unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
+ "k8s.io/kubernetes/pkg/api/v1"
+ v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait"
@@ -50,11 +50,11 @@ func (f *Framework) PodClient() *PodClient {
type PodClient struct {
f *Framework
- unversionedcore.PodInterface
+ v1core.PodInterface
}
// Create creates a new pod according to the framework specifications (don't wait for it to start).
-func (c *PodClient) Create(pod *api.Pod) *api.Pod {
+func (c *PodClient) Create(pod *v1.Pod) *v1.Pod {
c.mungeSpec(pod)
p, err := c.PodInterface.Create(pod)
ExpectNoError(err, "Error creating Pod")
@@ -62,7 +62,7 @@ func (c *PodClient) Create(pod *api.Pod) *api.Pod {
}
// CreateSync creates a new pod according to the framework specifications, and wait for it to start.
-func (c *PodClient) CreateSync(pod *api.Pod) *api.Pod {
+func (c *PodClient) CreateSync(pod *v1.Pod) *v1.Pod {
p := c.Create(pod)
ExpectNoError(c.f.WaitForPodRunning(p.Name))
// Get the newest pod after it becomes running, some status may change after pod created, such as pod ip.
@@ -72,12 +72,12 @@ func (c *PodClient) CreateSync(pod *api.Pod) *api.Pod {
}
// CreateBatch create a batch of pods. All pods are created before waiting.
-func (c *PodClient) CreateBatch(pods []*api.Pod) []*api.Pod {
- ps := make([]*api.Pod, len(pods))
+func (c *PodClient) CreateBatch(pods []*v1.Pod) []*v1.Pod {
+ ps := make([]*v1.Pod, len(pods))
var wg sync.WaitGroup
for i, pod := range pods {
wg.Add(1)
- go func(i int, pod *api.Pod) {
+ go func(i int, pod *v1.Pod) {
defer wg.Done()
defer GinkgoRecover()
ps[i] = c.CreateSync(pod)
@@ -90,7 +90,7 @@ func (c *PodClient) CreateBatch(pods []*api.Pod) []*api.Pod {
// Update updates the pod object. It retries if there is a conflict, throw out error if
// there is any other errors. name is the pod name, updateFn is the function updating the
// pod object.
-func (c *PodClient) Update(name string, updateFn func(pod *api.Pod)) {
+func (c *PodClient) Update(name string, updateFn func(pod *v1.Pod)) {
ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*30, func() (bool, error) {
pod, err := c.PodInterface.Get(name)
if err != nil {
@@ -112,7 +112,7 @@ func (c *PodClient) Update(name string, updateFn func(pod *api.Pod)) {
// DeleteSync deletes the pod and wait for the pod to disappear for `timeout`. If the pod doesn't
// disappear before the timeout, it will fail the test.
-func (c *PodClient) DeleteSync(name string, options *api.DeleteOptions, timeout time.Duration) {
+func (c *PodClient) DeleteSync(name string, options *v1.DeleteOptions, timeout time.Duration) {
err := c.Delete(name, options)
if err != nil && !errors.IsNotFound(err) {
Failf("Failed to delete pod %q: %v", name, err)
@@ -122,7 +122,7 @@ func (c *PodClient) DeleteSync(name string, options *api.DeleteOptions, timeout
}
// mungeSpec apply test-suite specific transformations to the pod spec.
-func (c *PodClient) mungeSpec(pod *api.Pod) {
+func (c *PodClient) mungeSpec(pod *v1.Pod) {
if !TestContext.NodeE2E {
return
}
@@ -131,7 +131,7 @@ func (c *PodClient) mungeSpec(pod *api.Pod) {
pod.Spec.NodeName = TestContext.NodeName
// Node e2e does not support the default DNSClusterFirst policy. Set
// the policy to DNSDefault, which is configured per node.
- pod.Spec.DNSPolicy = api.DNSDefault
+ pod.Spec.DNSPolicy = v1.DNSDefault
// PrepullImages only works for node e2e now. For cluster e2e, image prepull is not enforced,
// we should not munge ImagePullPolicy for cluster e2e pods.
@@ -142,7 +142,7 @@ func (c *PodClient) mungeSpec(pod *api.Pod) {
// during the test.
for i := range pod.Spec.Containers {
c := &pod.Spec.Containers[i]
- if c.ImagePullPolicy == api.PullAlways {
+ if c.ImagePullPolicy == v1.PullAlways {
// If the image pull policy is PullAlways, the image doesn't need to be in
// the white list or pre-pulled, because the image is expected to be pulled
// in the test anyway.
@@ -153,7 +153,7 @@ func (c *PodClient) mungeSpec(pod *api.Pod) {
Expect(ImageWhiteList.Has(c.Image)).To(BeTrue(), "Image %q is not in the white list, consider adding it to CommonImageWhiteList in test/e2e/common/util.go or NodeImageWhiteList in test/e2e_node/image_list.go", c.Image)
// Do not pull images during the tests because the images in white list should have
// been prepulled.
- c.ImagePullPolicy = api.PullNever
+ c.ImagePullPolicy = v1.PullNever
}
}
@@ -162,11 +162,11 @@ func (c *PodClient) mungeSpec(pod *api.Pod) {
func (c *PodClient) WaitForSuccess(name string, timeout time.Duration) {
f := c.f
Expect(waitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout,
- func(pod *api.Pod) (bool, error) {
+ func(pod *v1.Pod) (bool, error) {
switch pod.Status.Phase {
- case api.PodFailed:
+ case v1.PodFailed:
return true, fmt.Errorf("pod %q failed with reason: %q, message: %q", name, pod.Status.Reason, pod.Status.Message)
- case api.PodSucceeded:
+ case v1.PodSucceeded:
return true, nil
default:
return false, nil
diff --git a/test/e2e/framework/resource_usage_gatherer.go b/test/e2e/framework/resource_usage_gatherer.go
index 16d65443977..70925e4dfc2 100644
--- a/test/e2e/framework/resource_usage_gatherer.go
+++ b/test/e2e/framework/resource_usage_gatherer.go
@@ -29,8 +29,8 @@ import (
"time"
. "github.com/onsi/gomega"
- "k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
"k8s.io/kubernetes/pkg/util/system"
)
@@ -250,7 +250,7 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
finished: false,
})
} else {
- pods, err := c.Core().Pods("kube-system").List(api.ListOptions{})
+ pods, err := c.Core().Pods("kube-system").List(v1.ListOptions{})
if err != nil {
Logf("Error while listing Pods: %v", err)
return nil, err
@@ -262,14 +262,14 @@ func NewResourceUsageGatherer(c clientset.Interface, options ResourceGathererOpt
g.containerIDs = append(g.containerIDs, containerID)
}
}
- nodeList, err := c.Core().Nodes().List(api.ListOptions{})
+ nodeList, err := c.Core().Nodes().List(v1.ListOptions{})
if err != nil {
Logf("Error while listing Nodes: %v", err)
return nil, err
}
for _, node := range nodeList.Items {
- if !options.masterOnly || system.IsMasterNode(&node) {
+ if !options.masterOnly || system.IsMasterNode(node.Name) {
g.workerWg.Add(1)
g.workers = append(g.workers, resourceGatherWorker{
c: c,
diff --git a/test/e2e/framework/util.go b/test/e2e/framework/util.go
index 1864cb9cd98..e059965bcff 100644
--- a/test/e2e/framework/util.go
+++ b/test/e2e/framework/util.go
@@ -48,15 +48,16 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
- "k8s.io/kubernetes/pkg/apis/apps"
- "k8s.io/kubernetes/pkg/apis/batch"
- "k8s.io/kubernetes/pkg/apis/extensions"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
- "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
+ apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
+ batch "k8s.io/kubernetes/pkg/apis/batch/v1"
+ extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
+ extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
+ "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
+ "k8s.io/kubernetes/pkg/client/conditions"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/typed/discovery"
"k8s.io/kubernetes/pkg/client/typed/dynamic"
- client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/client/unversioned/clientcmd"
clientcmdapi "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
@@ -250,10 +251,10 @@ func GetServicesProxyRequest(c clientset.Interface, request *restclient.Request)
// unique identifier of the e2e run
var RunId = uuid.NewUUID()
-type CreateTestingNSFn func(baseName string, c clientset.Interface, labels map[string]string) (*api.Namespace, error)
+type CreateTestingNSFn func(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error)
type ContainerFailures struct {
- status *api.ContainerStateTerminated
+ status *v1.ContainerStateTerminated
Restarts int
}
@@ -377,10 +378,10 @@ var ProvidersWithSSH = []string{"gce", "gke", "aws"}
// providersWithMasterSSH are those providers where master node is accessible with SSH
var providersWithMasterSSH = []string{"gce", "gke", "kubemark", "aws"}
-type podCondition func(pod *api.Pod) (bool, error)
+type podCondition func(pod *v1.Pod) (bool, error)
// logPodStates logs basic info of provided pods for debugging.
-func logPodStates(pods []api.Pod) {
+func logPodStates(pods []v1.Pod) {
// Find maximum widths for pod, node, and phase strings for column printing.
maxPodW, maxNodeW, maxPhaseW, maxGraceW := len("POD"), len("NODE"), len("PHASE"), len("GRACE")
for i := range pods {
@@ -416,7 +417,7 @@ func logPodStates(pods []api.Pod) {
}
// errorBadPodsStates create error message of basic info of bad pods for debugging.
-func errorBadPodsStates(badPods []api.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string {
+func errorBadPodsStates(badPods []v1.Pod, desiredPods int, ns, desiredState string, timeout time.Duration) string {
errStr := fmt.Sprintf("%d / %d pods in namespace %q are NOT in %s state in %v\n", len(badPods), desiredPods, ns, desiredState, timeout)
// Pirnt bad pods info only if there are fewer than 10 bad pods
if len(badPods) > 10 {
@@ -444,10 +445,10 @@ func errorBadPodsStates(badPods []api.Pod, desiredPods int, ns, desiredState str
// pods have been created.
func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[string]string, timeout time.Duration) error {
successPodSelector := labels.SelectorFromSet(successPodLabels)
- start, badPods, desiredPods := time.Now(), []api.Pod{}, 0
+ start, badPods, desiredPods := time.Now(), []v1.Pod{}, 0
if wait.PollImmediate(30*time.Second, timeout, func() (bool, error) {
- podList, err := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: successPodSelector})
+ podList, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: successPodSelector.String()})
if err != nil {
Logf("Error getting pods in namespace %q: %v", ns, err)
return false, nil
@@ -456,10 +457,10 @@ func WaitForPodsSuccess(c clientset.Interface, ns string, successPodLabels map[s
Logf("Waiting for pods to enter Success, but no pods in %q match label %v", ns, successPodLabels)
return true, nil
}
- badPods = []api.Pod{}
+ badPods = []v1.Pod{}
desiredPods = len(podList.Items)
for _, pod := range podList.Items {
- if pod.Status.Phase != api.PodSucceeded {
+ if pod.Status.Phase != v1.PodSucceeded {
badPods = append(badPods, pod)
}
}
@@ -510,7 +511,7 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, ti
wg := sync.WaitGroup{}
wg.Add(1)
var waitForSuccessError error
- badPods := []api.Pod{}
+ badPods := []v1.Pod{}
desiredPods := 0
go func() {
waitForSuccessError = WaitForPodsSuccess(c, ns, ignoreLabels, timeout)
@@ -525,34 +526,34 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, ti
replicas, replicaOk := int32(0), int32(0)
if hasReadyReplicas {
- rcList, err := c.Core().ReplicationControllers(ns).List(api.ListOptions{})
+ rcList, err := c.Core().ReplicationControllers(ns).List(v1.ListOptions{})
if err != nil {
Logf("Error getting replication controllers in namespace '%s': %v", ns, err)
return false, nil
}
for _, rc := range rcList.Items {
- replicas += rc.Spec.Replicas
+ replicas += *rc.Spec.Replicas
replicaOk += rc.Status.ReadyReplicas
}
- rsList, err := c.Extensions().ReplicaSets(ns).List(api.ListOptions{})
+ rsList, err := c.Extensions().ReplicaSets(ns).List(v1.ListOptions{})
if err != nil {
Logf("Error getting replication sets in namespace %q: %v", ns, err)
return false, nil
}
for _, rs := range rsList.Items {
- replicas += rs.Spec.Replicas
+ replicas += *rs.Spec.Replicas
replicaOk += rs.Status.ReadyReplicas
}
}
- podList, err := c.Core().Pods(ns).List(api.ListOptions{})
+ podList, err := c.Core().Pods(ns).List(v1.ListOptions{})
if err != nil {
Logf("Error getting pods in namespace '%s': %v", ns, err)
return false, nil
}
nOk := int32(0)
- badPods = []api.Pod{}
+ badPods = []v1.Pod{}
desiredPods = len(podList.Items)
for _, pod := range podList.Items {
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) {
@@ -562,10 +563,10 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, ti
if res, err := testutils.PodRunningReady(&pod); res && err == nil {
nOk++
} else {
- if pod.Status.Phase != api.PodFailed {
+ if pod.Status.Phase != v1.PodFailed {
Logf("The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed", pod.ObjectMeta.Name, pod.Status.Phase)
badPods = append(badPods, pod)
- } else if _, ok := pod.Annotations[api.CreatedByAnnotation]; !ok {
+ } else if _, ok := pod.Annotations[v1.CreatedByAnnotation]; !ok {
Logf("Pod %s is Failed, but it's not controlled by a controller", pod.ObjectMeta.Name)
badPods = append(badPods, pod)
}
@@ -594,8 +595,8 @@ func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods int32, ti
return nil
}
-func podFromManifest(filename string) (*api.Pod, error) {
- var pod api.Pod
+func podFromManifest(filename string) (*v1.Pod, error) {
+ var pod v1.Pod
Logf("Parsing pod from %v", filename)
data := ReadOrDie(filename)
json, err := utilyaml.ToJSON(data)
@@ -640,7 +641,7 @@ func RunKubernetesServiceTestContainer(c clientset.Interface, ns string) {
}
}
-func kubectlLogPod(c clientset.Interface, pod api.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) {
+func kubectlLogPod(c clientset.Interface, pod v1.Pod, containerNameSubstr string, logFunc func(ftm string, args ...interface{})) {
for _, container := range pod.Spec.Containers {
if strings.Contains(container.Name, containerNameSubstr) {
// Contains() matches all strings if substr is empty
@@ -658,7 +659,7 @@ func kubectlLogPod(c clientset.Interface, pod api.Pod, containerNameSubstr strin
}
func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm string, args ...interface{})) {
- podList, err := c.Core().Pods(ns).List(api.ListOptions{})
+ podList, err := c.Core().Pods(ns).List(v1.ListOptions{})
if err != nil {
logFunc("Error getting pods in namespace '%s': %v", ns, err)
return
@@ -672,7 +673,7 @@ func LogFailedContainers(c clientset.Interface, ns string, logFunc func(ftm stri
}
func LogPodsWithLabels(c clientset.Interface, ns string, match map[string]string, logFunc func(ftm string, args ...interface{})) {
- podList, err := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(match)})
+ podList, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()})
if err != nil {
logFunc("Error getting pods in namespace %q: %v", ns, err)
return
@@ -684,7 +685,7 @@ func LogPodsWithLabels(c clientset.Interface, ns string, match map[string]string
}
func LogContainersInPodsWithLabels(c clientset.Interface, ns string, match map[string]string, containerSubstr string, logFunc func(ftm string, args ...interface{})) {
- podList, err := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: labels.SelectorFromSet(match)})
+ podList, err := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: labels.SelectorFromSet(match).String()})
if err != nil {
Logf("Error getting pods in namespace %q: %v", ns, err)
return
@@ -699,7 +700,7 @@ func LogContainersInPodsWithLabels(c clientset.Interface, ns string, match map[s
// Returns the list of deleted namespaces or an error.
func DeleteNamespaces(c clientset.Interface, deleteFilter, skipFilter []string) ([]string, error) {
By("Deleting namespaces")
- nsList, err := c.Core().Namespaces().List(api.ListOptions{})
+ nsList, err := c.Core().Namespaces().List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
var deleted []string
var wg sync.WaitGroup
@@ -746,7 +747,7 @@ func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeou
//Now POLL until all namespaces have been eradicated.
return wait.Poll(2*time.Second, timeout,
func() (bool, error) {
- nsList, err := c.Core().Namespaces().List(api.ListOptions{})
+ nsList, err := c.Core().Namespaces().List(v1.ListOptions{})
if err != nil {
return false, err
}
@@ -760,11 +761,11 @@ func WaitForNamespacesDeleted(c clientset.Interface, namespaces []string, timeou
}
func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error {
- w, err := c.Core().ServiceAccounts(ns).Watch(api.SingleObject(api.ObjectMeta{Name: serviceAccountName}))
+ w, err := c.Core().ServiceAccounts(ns).Watch(v1.SingleObject(v1.ObjectMeta{Name: serviceAccountName}))
if err != nil {
return err
}
- _, err = watch.Until(timeout, w, client.ServiceAccountHasSecrets)
+ _, err = watch.Until(timeout, w, conditions.ServiceAccountHasSecrets)
return err
}
@@ -795,10 +796,10 @@ func waitForPodCondition(c clientset.Interface, ns, podName, desc string, timeou
// WaitForMatchPodsCondition finds match pods based on the input ListOptions.
// waits and checks if all match pods are in the given podCondition
-func WaitForMatchPodsCondition(c clientset.Interface, opts api.ListOptions, desc string, timeout time.Duration, condition podCondition) error {
+func WaitForMatchPodsCondition(c clientset.Interface, opts v1.ListOptions, desc string, timeout time.Duration, condition podCondition) error {
Logf("Waiting up to %v for matching pods' status to be %s", timeout, desc)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
- pods, err := c.Core().Pods(api.NamespaceAll).List(opts)
+ pods, err := c.Core().Pods(v1.NamespaceAll).List(opts)
if err != nil {
return err
}
@@ -840,7 +841,7 @@ func WaitForFederationApiserverReady(c *federation_release_1_5.Clientset) error
}
// WaitForPersistentVolumePhase waits for a PersistentVolume to be in a specific phase or until timeout occurs, whichever comes first.
-func WaitForPersistentVolumePhase(phase api.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
+func WaitForPersistentVolumePhase(phase v1.PersistentVolumePhase, c clientset.Interface, pvName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolume %s to have phase %s", timeout, pvName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pv, err := c.Core().PersistentVolumes().Get(pvName)
@@ -880,7 +881,7 @@ func WaitForPersistentVolumeDeleted(c clientset.Interface, pvName string, Poll,
}
// WaitForPersistentVolumeClaimPhase waits for a PersistentVolumeClaim to be in a specific phase or until timeout occurs, whichever comes first.
-func WaitForPersistentVolumeClaimPhase(phase api.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
+func WaitForPersistentVolumeClaimPhase(phase v1.PersistentVolumeClaimPhase, c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error {
Logf("Waiting up to %v for PersistentVolumeClaim %s to have phase %s", timeout, pvcName, phase)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
pvc, err := c.Core().PersistentVolumeClaims(ns).Get(pvcName)
@@ -901,22 +902,22 @@ func WaitForPersistentVolumeClaimPhase(phase api.PersistentVolumeClaimPhase, c c
// CreateTestingNS should be used by every test, note that we append a common prefix to the provided test name.
// Please see NewFramework instead of using this directly.
-func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]string) (*api.Namespace, error) {
+func CreateTestingNS(baseName string, c clientset.Interface, labels map[string]string) (*v1.Namespace, error) {
if labels == nil {
labels = map[string]string{}
}
labels["e2e-run"] = string(RunId)
- namespaceObj := &api.Namespace{
- ObjectMeta: api.ObjectMeta{
+ namespaceObj := &v1.Namespace{
+ ObjectMeta: v1.ObjectMeta{
GenerateName: fmt.Sprintf("e2e-tests-%v-", baseName),
Namespace: "",
Labels: labels,
},
- Status: api.NamespaceStatus{},
+ Status: v1.NamespaceStatus{},
}
// Be robust about making the namespace creation call.
- var got *api.Namespace
+ var got *v1.Namespace
if err := wait.PollImmediate(Poll, 30*time.Second, func() (bool, error) {
var err error
got, err = c.Core().Namespaces().Create(namespaceObj)
@@ -958,7 +959,7 @@ func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error {
Logf("Waiting for terminating namespaces to be deleted...")
for start := time.Now(); time.Since(start) < timeout; time.Sleep(15 * time.Second) {
- namespaces, err := c.Core().Namespaces().List(api.ListOptions{})
+ namespaces, err := c.Core().Namespaces().List(v1.ListOptions{})
if err != nil {
Logf("Listing namespaces failed: %v", err)
continue
@@ -966,7 +967,7 @@ func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error {
terminating := 0
for _, ns := range namespaces.Items {
if strings.HasPrefix(ns.ObjectMeta.Name, "e2e-tests-") && ns.ObjectMeta.Name != skip {
- if ns.Status.Phase == api.NamespaceActive {
+ if ns.Status.Phase == v1.NamespaceActive {
return fmt.Errorf("Namespace %s is active", ns.ObjectMeta.Name)
}
terminating++
@@ -1041,7 +1042,7 @@ func deleteNS(c clientset.Interface, clientPool dynamic.ClientPool, namespace st
// logNamespaces logs the number of namespaces by phase
// namespace is the namespace the test was operating against that failed to delete so it can be grepped in logs
func logNamespaces(c clientset.Interface, namespace string) {
- namespaceList, err := c.Core().Namespaces().List(api.ListOptions{})
+ namespaceList, err := c.Core().Namespaces().List(v1.ListOptions{})
if err != nil {
Logf("namespace: %v, unable to list namespaces: %v", namespace, err)
return
@@ -1050,7 +1051,7 @@ func logNamespaces(c clientset.Interface, namespace string) {
numActive := 0
numTerminating := 0
for _, namespace := range namespaceList.Items {
- if namespace.Status.Phase == api.NamespaceActive {
+ if namespace.Status.Phase == v1.NamespaceActive {
numActive++
} else {
numTerminating++
@@ -1076,7 +1077,7 @@ func logNamespace(c clientset.Interface, namespace string) {
// countRemainingPods queries the server to count number of remaining pods, and number of pods that had a missing deletion timestamp.
func countRemainingPods(c clientset.Interface, namespace string) (int, int, error) {
// check for remaining pods
- pods, err := c.Core().Pods(namespace).List(api.ListOptions{})
+ pods, err := c.Core().Pods(namespace).List(v1.ListOptions{})
if err != nil {
return 0, 0, err
}
@@ -1156,8 +1157,8 @@ func hasRemainingContent(c clientset.Interface, clientPool dynamic.ClientPool, n
}
func ContainerInitInvariant(older, newer runtime.Object) error {
- oldPod := older.(*api.Pod)
- newPod := newer.(*api.Pod)
+ oldPod := older.(*v1.Pod)
+ newPod := newer.(*v1.Pod)
if len(oldPod.Spec.InitContainers) == 0 {
return nil
}
@@ -1183,7 +1184,7 @@ func ContainerInitInvariant(older, newer runtime.Object) error {
return nil
}
-func podInitialized(pod *api.Pod) (ok bool, failed bool, err error) {
+func podInitialized(pod *v1.Pod) (ok bool, failed bool, err error) {
allInit := true
initFailed := false
for _, s := range pod.Status.InitContainerStatuses {
@@ -1204,7 +1205,7 @@ func podInitialized(pod *api.Pod) (ok bool, failed bool, err error) {
return allInit, initFailed, nil
}
-func initContainersInvariants(pod *api.Pod) error {
+func initContainersInvariants(pod *v1.Pod) error {
allInit, initFailed, err := podInitialized(pod)
if err != nil {
return err
@@ -1219,7 +1220,7 @@ func initContainersInvariants(pod *api.Pod) error {
}
}
}
- _, c := api.GetPodCondition(&pod.Status, api.PodInitialized)
+ _, c := v1.GetPodCondition(&pod.Status, v1.PodInitialized)
if c == nil {
return fmt.Errorf("pod does not have initialized condition")
}
@@ -1227,11 +1228,11 @@ func initContainersInvariants(pod *api.Pod) error {
return fmt.Errorf("PodInitialized condition should always have a transition time")
}
switch {
- case c.Status == api.ConditionUnknown:
+ case c.Status == v1.ConditionUnknown:
return fmt.Errorf("PodInitialized condition should never be Unknown")
- case c.Status == api.ConditionTrue && (initFailed || !allInit):
+ case c.Status == v1.ConditionTrue && (initFailed || !allInit):
return fmt.Errorf("PodInitialized condition was True but all not all containers initialized")
- case c.Status == api.ConditionFalse && (!initFailed && allInit):
+ case c.Status == v1.ConditionFalse && (!initFailed && allInit):
return fmt.Errorf("PodInitialized condition was False but all containers initialized")
}
return nil
@@ -1260,11 +1261,11 @@ func CheckInvariants(events []watch.Event, fns ...InvariantFunc) error {
// Waits default amount of time (PodStartTimeout) for the specified pod to become running.
// Returns an error if timeout occurs first, or pod goes in to failed state.
-func WaitForPodRunningInNamespace(c clientset.Interface, pod *api.Pod) error {
+func WaitForPodRunningInNamespace(c clientset.Interface, pod *v1.Pod) error {
// this short-cicuit is needed for cases when we pass a list of pods instead
// of newly created pod (e.g. VerifyPods) which means we are getting already
// running pod for which waiting does not make sense and will always fail
- if pod.Status.Phase == api.PodRunning {
+ if pod.Status.Phase == v1.PodRunning {
return nil
}
return waitTimeoutForPodRunningInNamespace(c, pod.Name, pod.Namespace, pod.ResourceVersion, PodStartTimeout)
@@ -1284,11 +1285,11 @@ func waitForPodRunningInNamespaceSlow(c clientset.Interface, podName, namespace,
}
func waitTimeoutForPodRunningInNamespace(c clientset.Interface, podName, namespace, resourceVersion string, timeout time.Duration) error {
- w, err := c.Core().Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
+ w, err := c.Core().Pods(namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
if err != nil {
return err
}
- _, err = watch.Until(timeout, w, client.PodRunning)
+ _, err = watch.Until(timeout, w, conditions.PodRunning)
return err
}
@@ -1299,20 +1300,20 @@ func WaitForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namesp
}
func WaitTimeoutForPodNoLongerRunningInNamespace(c clientset.Interface, podName, namespace, resourceVersion string, timeout time.Duration) error {
- w, err := c.Core().Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
+ w, err := c.Core().Pods(namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
if err != nil {
return err
}
- _, err = watch.Until(timeout, w, client.PodCompleted)
+ _, err = watch.Until(timeout, w, conditions.PodCompleted)
return err
}
func waitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace, resourceVersion string, timeout time.Duration) error {
- w, err := c.Core().Pods(namespace).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
+ w, err := c.Core().Pods(namespace).Watch(v1.SingleObject(v1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
if err != nil {
return err
}
- _, err = watch.Until(timeout, w, client.PodRunningAndReady)
+ _, err = watch.Until(timeout, w, conditions.PodRunningAndReady)
return err
}
@@ -1320,19 +1321,19 @@ func waitTimeoutForPodReadyInNamespace(c clientset.Interface, podName, namespace
// The resourceVersion is used when Watching object changes, it tells since when we care
// about changes to the pod.
func WaitForPodNotPending(c clientset.Interface, ns, podName, resourceVersion string) error {
- w, err := c.Core().Pods(ns).Watch(api.SingleObject(api.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
+ w, err := c.Core().Pods(ns).Watch(v1.SingleObject(v1.ObjectMeta{Name: podName, ResourceVersion: resourceVersion}))
if err != nil {
return err
}
- _, err = watch.Until(PodStartTimeout, w, client.PodNotPending)
+ _, err = watch.Until(PodStartTimeout, w, conditions.PodNotPending)
return err
}
// waitForPodTerminatedInNamespace returns an error if it took too long for the pod
// to terminate or if the pod terminated with an unexpected reason.
func waitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, namespace string) error {
- return waitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", PodStartTimeout, func(pod *api.Pod) (bool, error) {
- if pod.Status.Phase == api.PodFailed {
+ return waitForPodCondition(c, namespace, podName, "terminated due to deadline exceeded", PodStartTimeout, func(pod *v1.Pod) (bool, error) {
+ if pod.Status.Phase == v1.PodFailed {
if pod.Status.Reason == reason {
return true, nil
} else {
@@ -1346,15 +1347,15 @@ func waitForPodTerminatedInNamespace(c clientset.Interface, podName, reason, nam
// waitForPodSuccessInNamespaceTimeout returns nil if the pod reached state success, or an error if it reached failure or ran too long.
func waitForPodSuccessInNamespaceTimeout(c clientset.Interface, podName string, namespace string, timeout time.Duration) error {
- return waitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *api.Pod) (bool, error) {
- if pod.Spec.RestartPolicy == api.RestartPolicyAlways {
+ return waitForPodCondition(c, namespace, podName, "success or failure", timeout, func(pod *v1.Pod) (bool, error) {
+ if pod.Spec.RestartPolicy == v1.RestartPolicyAlways {
return true, fmt.Errorf("pod %q will never terminate with a succeeded state since its restart policy is Always", podName)
}
switch pod.Status.Phase {
- case api.PodSucceeded:
+ case v1.PodSucceeded:
By("Saw pod success")
return true, nil
- case api.PodFailed:
+ case v1.PodFailed:
return true, fmt.Errorf("pod %q failed with status: %+v", podName, pod.Status)
default:
return false, nil
@@ -1374,12 +1375,12 @@ func WaitForPodSuccessInNamespaceSlow(c clientset.Interface, podName string, nam
// waitForRCPodOnNode returns the pod from the given replication controller (described by rcName) which is scheduled on the given node.
// In case of failure or too long waiting time, an error is returned.
-func waitForRCPodOnNode(c clientset.Interface, ns, rcName, node string) (*api.Pod, error) {
+func waitForRCPodOnNode(c clientset.Interface, ns, rcName, node string) (*v1.Pod, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": rcName}))
- var p *api.Pod = nil
+ var p *v1.Pod = nil
err := wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) {
Logf("Waiting for pod %s to appear on node %s", rcName, node)
- options := api.ListOptions{LabelSelector: label}
+ options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options)
if err != nil {
return false, err
@@ -1398,10 +1399,10 @@ func waitForRCPodOnNode(c clientset.Interface, ns, rcName, node string) (*api.Po
// WaitForRCToStabilize waits till the RC has a matching generation/replica count between spec and status.
func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.Duration) error {
- options := api.ListOptions{FieldSelector: fields.Set{
+ options := v1.ListOptions{FieldSelector: fields.Set{
"metadata.name": name,
"metadata.namespace": ns,
- }.AsSelector()}
+ }.AsSelector().String()}
w, err := c.Core().ReplicationControllers(ns).Watch(options)
if err != nil {
return err
@@ -1412,14 +1413,14 @@ func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.D
return false, apierrs.NewNotFound(unversioned.GroupResource{Resource: "replicationcontrollers"}, "")
}
switch rc := event.Object.(type) {
- case *api.ReplicationController:
+ case *v1.ReplicationController:
if rc.Name == name && rc.Namespace == ns &&
rc.Generation <= rc.Status.ObservedGeneration &&
- rc.Spec.Replicas == rc.Status.Replicas {
+ *(rc.Spec.Replicas) == rc.Status.Replicas {
return true, nil
}
Logf("Waiting for rc %s to stabilize, generation %v observed generation %v spec.replicas %d status.replicas %d",
- name, rc.Generation, rc.Status.ObservedGeneration, rc.Spec.Replicas, rc.Status.Replicas)
+ name, rc.Generation, rc.Status.ObservedGeneration, *(rc.Spec.Replicas), rc.Status.Replicas)
}
return false, nil
})
@@ -1429,7 +1430,7 @@ func WaitForRCToStabilize(c clientset.Interface, ns, name string, timeout time.D
func WaitForPodToDisappear(c clientset.Interface, ns, podName string, label labels.Selector, interval, timeout time.Duration) error {
return wait.PollImmediate(interval, timeout, func() (bool, error) {
Logf("Waiting for pod %s to disappear", podName)
- options := api.ListOptions{LabelSelector: label}
+ options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options)
if err != nil {
return false, err
@@ -1493,7 +1494,7 @@ func WaitForService(c clientset.Interface, namespace, name string, exist bool, i
func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName string, expectNum int, interval, timeout time.Duration) error {
return wait.Poll(interval, timeout, func() (bool, error) {
Logf("Waiting for amount of service:%s endpoints to be %d", serviceName, expectNum)
- list, err := c.Core().Endpoints(namespace).List(api.ListOptions{})
+ list, err := c.Core().Endpoints(namespace).List(v1.ListOptions{})
if err != nil {
return false, err
}
@@ -1507,7 +1508,7 @@ func WaitForServiceEndpointsNum(c clientset.Interface, namespace, serviceName st
})
}
-func countEndpointsNum(e *api.Endpoints) int {
+func countEndpointsNum(e *v1.Endpoints) int {
num := 0
for _, sub := range e.Subsets {
num += len(sub.Addresses)
@@ -1556,10 +1557,10 @@ type podProxyResponseChecker struct {
label labels.Selector
controllerName string
respondName bool // Whether the pod should respond with its own name.
- pods *api.PodList
+ pods *v1.PodList
}
-func PodProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *api.PodList) podProxyResponseChecker {
+func PodProxyResponseChecker(c clientset.Interface, ns string, label labels.Selector, controllerName string, respondName bool, pods *v1.PodList) podProxyResponseChecker {
return podProxyResponseChecker{c, ns, label, controllerName, respondName, pods}
}
@@ -1567,7 +1568,7 @@ func PodProxyResponseChecker(c clientset.Interface, ns string, label labels.Sele
// reply with their own pod name.
func (r podProxyResponseChecker) CheckAllResponses() (done bool, err error) {
successes := 0
- options := api.ListOptions{LabelSelector: r.label}
+ options := v1.ListOptions{LabelSelector: r.label.String()}
currentPods, err := r.c.Core().Pods(r.ns).List(options)
Expect(err).NotTo(HaveOccurred())
for i, pod := range r.pods.Items {
@@ -1680,21 +1681,21 @@ func KubectlVersion() (semver.Version, error) {
return version.Parse(matches[1])
}
-func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods *api.PodList) error {
+func PodsResponding(c clientset.Interface, ns, name string, wantName bool, pods *v1.PodList) error {
By("trying to dial each unique pod")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return wait.PollImmediate(Poll, podRespondingTimeout, PodProxyResponseChecker(c, ns, label, name, wantName, pods).CheckAllResponses)
}
-func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*api.PodList, error) {
+func PodsCreated(c clientset.Interface, ns, name string, replicas int32) (*v1.PodList, error) {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
return PodsCreatedByLabel(c, ns, name, replicas, label)
}
-func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*api.PodList, error) {
+func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32, label labels.Selector) (*v1.PodList, error) {
timeout := 2 * time.Minute
for start := time.Now(); time.Since(start) < timeout; time.Sleep(5 * time.Second) {
- options := api.ListOptions{LabelSelector: label}
+ options := v1.ListOptions{LabelSelector: label.String()}
// List the pods, making sure we observe all the replicas.
pods, err := c.Core().Pods(ns).List(options)
@@ -1702,7 +1703,7 @@ func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32,
return nil, err
}
- created := []api.Pod{}
+ created := []v1.Pod{}
for _, pod := range pods.Items {
if pod.DeletionTimestamp != nil {
continue
@@ -1719,7 +1720,7 @@ func PodsCreatedByLabel(c clientset.Interface, ns, name string, replicas int32,
return nil, fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", name, timeout, replicas)
}
-func podsRunning(c clientset.Interface, pods *api.PodList) []error {
+func podsRunning(c clientset.Interface, pods *v1.PodList) []error {
// Wait for the pods to enter the running state. Waiting loops until the pods
// are running so non-running pods cause a timeout for this test.
By("ensuring each pod is running")
@@ -1727,7 +1728,7 @@ func podsRunning(c clientset.Interface, pods *api.PodList) []error {
error_chan := make(chan error)
for _, pod := range pods.Items {
- go func(p api.Pod) {
+ go func(p v1.Pod) {
error_chan <- WaitForPodRunningInNamespace(c, &p)
}(pod)
}
@@ -1845,7 +1846,15 @@ func LoadFederationClientset_1_5() (*federation_release_1_5.Clientset, error) {
return c, nil
}
-func LoadInternalClientset() (*clientset.Clientset, error) {
+func LoadInternalClientset() (*internalclientset.Clientset, error) {
+ config, err := LoadConfig()
+ if err != nil {
+ return nil, fmt.Errorf("error creating client: %v", err.Error())
+ }
+ return internalclientset.NewForConfig(config)
+}
+
+func LoadClientset() (*clientset.Clientset, error) {
config, err := LoadConfig()
if err != nil {
return nil, fmt.Errorf("error creating client: %v", err.Error())
@@ -1853,14 +1862,6 @@ func LoadInternalClientset() (*clientset.Clientset, error) {
return clientset.NewForConfig(config)
}
-func LoadClientset() (*release_1_5.Clientset, error) {
- config, err := LoadConfig()
- if err != nil {
- return nil, fmt.Errorf("error creating client: %v", err.Error())
- }
- return release_1_5.NewForConfig(config)
-}
-
// randomSuffix provides a random string to append to pods,services,rcs.
// TODO: Allow service names to have the same form as names
// for pods and replication controllers so we don't
@@ -2156,7 +2157,7 @@ func TryKill(cmd *exec.Cmd) {
// for all of the containers in the podSpec to move into the 'Success' status, and tests
// the specified container log against the given expected output using the given matcher.
func (f *Framework) testContainerOutputMatcher(scenarioName string,
- pod *api.Pod,
+ pod *v1.Pod,
containerIndex int,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) {
@@ -2170,7 +2171,7 @@ func (f *Framework) testContainerOutputMatcher(scenarioName string,
// MatchContainerOutput creates a pod and waits for all it's containers to exit with success.
// It then tests that the matcher with each expectedOutput matches the output of the specified container.
func (f *Framework) MatchContainerOutput(
- pod *api.Pod,
+ pod *v1.Pod,
containerName string,
expectedOutput []string,
matcher func(string, ...interface{}) gomegatypes.GomegaMatcher) error {
@@ -2180,7 +2181,7 @@ func (f *Framework) MatchContainerOutput(
createdPod := podClient.Create(pod)
defer func() {
By("delete the pod")
- podClient.DeleteSync(createdPod.Name, &api.DeleteOptions{}, podNoLongerRunningTimeout)
+ podClient.DeleteSync(createdPod.Name, &v1.DeleteOptions{}, podNoLongerRunningTimeout)
}()
// Wait for client pod to complete.
@@ -2260,9 +2261,9 @@ func DumpEventsInNamespace(eventsLister EventsLister, namespace string) {
// you may or may not see the killing/deletion/Cleanup events.
}
-func DumpAllNamespaceInfo(c clientset.Interface, cs *release_1_5.Clientset, namespace string) {
+func DumpAllNamespaceInfo(c clientset.Interface, namespace string) {
DumpEventsInNamespace(func(opts v1.ListOptions, ns string) (*v1.EventList, error) {
- return cs.Core().Events(ns).List(opts)
+ return c.Core().Events(ns).List(opts)
}, namespace)
// If cluster is large, then the following logs are basically useless, because:
@@ -2270,7 +2271,7 @@ func DumpAllNamespaceInfo(c clientset.Interface, cs *release_1_5.Clientset, name
// 2. there are so many of them that working with them are mostly impossible
// So we dump them only if the cluster is relatively small.
maxNodesForDump := 20
- if nodes, err := c.Core().Nodes().List(api.ListOptions{}); err == nil {
+ if nodes, err := c.Core().Nodes().List(v1.ListOptions{}); err == nil {
if len(nodes.Items) <= maxNodesForDump {
dumpAllPodInfo(c)
dumpAllNodeInfo(c)
@@ -2296,7 +2297,7 @@ func (o byFirstTimestamp) Less(i, j int) bool {
}
func dumpAllPodInfo(c clientset.Interface) {
- pods, err := c.Core().Pods("").List(api.ListOptions{})
+ pods, err := c.Core().Pods("").List(v1.ListOptions{})
if err != nil {
Logf("unable to fetch pod debug info: %v", err)
}
@@ -2305,7 +2306,7 @@ func dumpAllPodInfo(c clientset.Interface) {
func dumpAllNodeInfo(c clientset.Interface) {
// It should be OK to list unschedulable Nodes here.
- nodes, err := c.Core().Nodes().List(api.ListOptions{})
+ nodes, err := c.Core().Nodes().List(v1.ListOptions{})
if err != nil {
Logf("unable to fetch node list: %v", err)
return
@@ -2356,30 +2357,30 @@ func DumpNodeDebugInfo(c clientset.Interface, nodeNames []string, logFunc func(f
// logNodeEvents logs kubelet events from the given node. This includes kubelet
// restart and node unhealthy events. Note that listing events like this will mess
// with latency metrics, beware of calling it during a test.
-func getNodeEvents(c clientset.Interface, nodeName string) []api.Event {
+func getNodeEvents(c clientset.Interface, nodeName string) []v1.Event {
selector := fields.Set{
"involvedObject.kind": "Node",
"involvedObject.name": nodeName,
- "involvedObject.namespace": api.NamespaceAll,
+ "involvedObject.namespace": v1.NamespaceAll,
"source": "kubelet",
- }.AsSelector()
- options := api.ListOptions{FieldSelector: selector}
+ }.AsSelector().String()
+ options := v1.ListOptions{FieldSelector: selector}
events, err := c.Core().Events(api.NamespaceSystem).List(options)
if err != nil {
Logf("Unexpected error retrieving node events %v", err)
- return []api.Event{}
+ return []v1.Event{}
}
return events.Items
}
// waitListSchedulableNodesOrDie is a wrapper around listing nodes supporting retries.
-func waitListSchedulableNodesOrDie(c clientset.Interface) *api.NodeList {
- var nodes *api.NodeList
+func waitListSchedulableNodesOrDie(c clientset.Interface) *v1.NodeList {
+ var nodes *v1.NodeList
var err error
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
- nodes, err = c.Core().Nodes().List(api.ListOptions{FieldSelector: fields.Set{
+ nodes, err = c.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
- }.AsSelector()})
+ }.AsSelector().String()})
return err == nil, nil
}) != nil {
ExpectNoError(err, "Timed out while listing nodes for e2e cluster.")
@@ -2391,26 +2392,26 @@ func waitListSchedulableNodesOrDie(c clientset.Interface) *api.NodeList {
// 1) doesn't have "unschedulable" field set
// 2) it's Ready condition is set to true
// 3) doesn't have NetworkUnavailable condition set to true
-func isNodeSchedulable(node *api.Node) bool {
- nodeReady := IsNodeConditionSetAsExpected(node, api.NodeReady, true)
- networkReady := IsNodeConditionUnset(node, api.NodeNetworkUnavailable) ||
- IsNodeConditionSetAsExpectedSilent(node, api.NodeNetworkUnavailable, false)
+func isNodeSchedulable(node *v1.Node) bool {
+ nodeReady := IsNodeConditionSetAsExpected(node, v1.NodeReady, true)
+ networkReady := IsNodeConditionUnset(node, v1.NodeNetworkUnavailable) ||
+ IsNodeConditionSetAsExpectedSilent(node, v1.NodeNetworkUnavailable, false)
return !node.Spec.Unschedulable && nodeReady && networkReady
}
// Test whether a fake pod can be scheduled on "node", given its current taints.
-func isNodeUntainted(node *api.Node) bool {
- fakePod := &api.Pod{
+func isNodeUntainted(node *v1.Node) bool {
+ fakePod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
- APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
+ APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: "fake-not-scheduled",
Namespace: "fake-not-scheduled",
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "fake-not-scheduled",
Image: "fake-not-scheduled",
@@ -2432,11 +2433,11 @@ func isNodeUntainted(node *api.Node) bool {
// 1) Needs to be schedulable.
// 2) Needs to be ready.
// If EITHER 1 or 2 is not true, most tests will want to ignore the node entirely.
-func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *api.NodeList) {
+func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *v1.NodeList) {
nodes = waitListSchedulableNodesOrDie(c)
// previous tests may have cause failures of some nodes. Let's skip
// 'Not Ready' nodes, just in case (there is no need to fail the test).
- FilterNodes(nodes, func(node api.Node) bool {
+ FilterNodes(nodes, func(node v1.Node) bool {
return isNodeSchedulable(&node) && isNodeUntainted(&node)
})
return nodes
@@ -2445,12 +2446,12 @@ func GetReadySchedulableNodesOrDie(c clientset.Interface) (nodes *api.NodeList)
func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all (but %d) nodes to be schedulable", timeout, TestContext.AllowedNotReadyNodes)
- var notSchedulable []*api.Node
+ var notSchedulable []*v1.Node
return wait.PollImmediate(30*time.Second, timeout, func() (bool, error) {
notSchedulable = nil
- opts := api.ListOptions{
+ opts := v1.ListOptions{
ResourceVersion: "0",
- FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector(),
+ FieldSelector: fields.Set{"spec.unschedulable": "false"}.AsSelector().String(),
}
nodes, err := c.Core().Nodes().List(opts)
if err != nil {
@@ -2476,8 +2477,8 @@ func WaitForAllNodesSchedulable(c clientset.Interface, timeout time.Duration) er
for i := range notSchedulable {
Logf("-> %s Ready=%t Network=%t",
notSchedulable[i].Name,
- IsNodeConditionSetAsExpected(notSchedulable[i], api.NodeReady, true),
- IsNodeConditionSetAsExpected(notSchedulable[i], api.NodeNetworkUnavailable, false))
+ IsNodeConditionSetAsExpected(notSchedulable[i], v1.NodeReady, true),
+ IsNodeConditionSetAsExpected(notSchedulable[i], v1.NodeNetworkUnavailable, false))
}
}
if len(notSchedulable) > TestContext.AllowedNotReadyNodes {
@@ -2508,15 +2509,15 @@ func RemoveLabelOffNode(c clientset.Interface, nodeName string, labelKey string)
ExpectNoError(testutils.VerifyLabelsRemoved(c, nodeName, []string{labelKey}))
}
-func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint api.Taint) {
+func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint v1.Taint) {
for attempt := 0; attempt < UpdateRetries; attempt++ {
node, err := c.Core().Nodes().Get(nodeName)
ExpectNoError(err)
- nodeTaints, err := api.GetTaintsFromNodeAnnotations(node.Annotations)
+ nodeTaints, err := v1.GetTaintsFromNodeAnnotations(node.Annotations)
ExpectNoError(err)
- var newTaints []api.Taint
+ var newTaints []v1.Taint
updated := false
for _, existingTaint := range nodeTaints {
if taint.MatchTaint(existingTaint) {
@@ -2538,7 +2539,7 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint api.Ta
if node.Annotations == nil {
node.Annotations = make(map[string]string)
}
- node.Annotations[api.TaintsAnnotationKey] = string(taintsData)
+ node.Annotations[v1.TaintsAnnotationKey] = string(taintsData)
_, err = c.Core().Nodes().Update(node)
if err != nil {
if !apierrs.IsConflict(err) {
@@ -2553,7 +2554,7 @@ func AddOrUpdateTaintOnNode(c clientset.Interface, nodeName string, taint api.Ta
}
}
-func taintExists(taints []api.Taint, taintToFind api.Taint) bool {
+func taintExists(taints []v1.Taint, taintToFind v1.Taint) bool {
for _, taint := range taints {
if taint.MatchTaint(taintToFind) {
return true
@@ -2562,12 +2563,12 @@ func taintExists(taints []api.Taint, taintToFind api.Taint) bool {
return false
}
-func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint api.Taint) {
+func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint v1.Taint) {
By("verifying the node has the taint " + taint.ToString())
node, err := c.Core().Nodes().Get(nodeName)
ExpectNoError(err)
- nodeTaints, err := api.GetTaintsFromNodeAnnotations(node.Annotations)
+ nodeTaints, err := v1.GetTaintsFromNodeAnnotations(node.Annotations)
ExpectNoError(err)
if len(nodeTaints) == 0 || !taintExists(nodeTaints, taint) {
@@ -2575,8 +2576,8 @@ func ExpectNodeHasTaint(c clientset.Interface, nodeName string, taint api.Taint)
}
}
-func deleteTaint(oldTaints []api.Taint, taintToDelete api.Taint) ([]api.Taint, error) {
- newTaints := []api.Taint{}
+func deleteTaint(oldTaints []v1.Taint, taintToDelete v1.Taint) ([]v1.Taint, error) {
+ newTaints := []v1.Taint{}
found := false
for _, oldTaint := range oldTaints {
if oldTaint.MatchTaint(taintToDelete) {
@@ -2594,13 +2595,13 @@ func deleteTaint(oldTaints []api.Taint, taintToDelete api.Taint) ([]api.Taint, e
// RemoveTaintOffNode is for cleaning up taints temporarily added to node,
// won't fail if target taint doesn't exist or has been removed.
-func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint api.Taint) {
+func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint v1.Taint) {
By("removing the taint " + taint.ToString() + " off the node " + nodeName)
for attempt := 0; attempt < UpdateRetries; attempt++ {
node, err := c.Core().Nodes().Get(nodeName)
ExpectNoError(err)
- nodeTaints, err := api.GetTaintsFromNodeAnnotations(node.Annotations)
+ nodeTaints, err := v1.GetTaintsFromNodeAnnotations(node.Annotations)
ExpectNoError(err)
if len(nodeTaints) == 0 {
return
@@ -2613,11 +2614,11 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint api.Taint)
newTaints, err := deleteTaint(nodeTaints, taint)
ExpectNoError(err)
if len(newTaints) == 0 {
- delete(node.Annotations, api.TaintsAnnotationKey)
+ delete(node.Annotations, v1.TaintsAnnotationKey)
} else {
taintsData, err := json.Marshal(newTaints)
ExpectNoError(err)
- node.Annotations[api.TaintsAnnotationKey] = string(taintsData)
+ node.Annotations[v1.TaintsAnnotationKey] = string(taintsData)
}
_, err = c.Core().Nodes().Update(node)
@@ -2636,16 +2637,16 @@ func RemoveTaintOffNode(c clientset.Interface, nodeName string, taint api.Taint)
nodeUpdated, err := c.Core().Nodes().Get(nodeName)
ExpectNoError(err)
By("verifying the node doesn't have the taint " + taint.ToString())
- taintsGot, err := api.GetTaintsFromNodeAnnotations(nodeUpdated.Annotations)
+ taintsGot, err := v1.GetTaintsFromNodeAnnotations(nodeUpdated.Annotations)
ExpectNoError(err)
if taintExists(taintsGot, taint) {
Failf("Failed removing taint " + taint.ToString() + " of the node " + nodeName)
}
}
-func ScaleRC(clientset clientset.Interface, ns, name string, size uint, wait bool) error {
+func ScaleRC(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
By(fmt.Sprintf("Scaling replication controller %s in namespace %s to %d", name, ns, size))
- scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), clientset)
+ scaler, err := kubectl.ScalerFor(api.Kind("ReplicationController"), internalClientset)
if err != nil {
return err
}
@@ -2674,9 +2675,9 @@ func WaitForRCPodsRunning(c clientset.Interface, ns, rcName string) error {
return nil
}
-func ScaleDeployment(clientset clientset.Interface, ns, name string, size uint, wait bool) error {
+func ScaleDeployment(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string, size uint, wait bool) error {
By(fmt.Sprintf("Scaling Deployment %s in namespace %s to %d", name, ns, size))
- scaler, err := kubectl.ScalerFor(extensions.Kind("Deployment"), clientset)
+ scaler, err := kubectl.ScalerFor(extensionsinternal.Kind("Deployment"), internalClientset)
if err != nil {
return err
}
@@ -2722,7 +2723,7 @@ func podsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selec
// Wait for all matching pods to become scheduled and at least one
// matching pod exists. Return the list of matching pods.
-func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *api.PodList, err error) {
+func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
err = wait.PollImmediate(Poll, podScheduledBeforeTimeout,
func() (bool, error) {
pods, err = WaitForPodsWithLabel(c, ns, label)
@@ -2740,9 +2741,9 @@ func WaitForPodsWithLabelScheduled(c clientset.Interface, ns string, label label
}
// Wait up to PodListTimeout for getting pods with certain label
-func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *api.PodList, err error) {
+func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selector) (pods *v1.PodList, err error) {
for t := time.Now(); time.Since(t) < PodListTimeout; time.Sleep(Poll) {
- options := api.ListOptions{LabelSelector: label}
+ options := v1.ListOptions{LabelSelector: label.String()}
pods, err = c.Core().Pods(ns).List(options)
Expect(err).NotTo(HaveOccurred())
if len(pods.Items) > 0 {
@@ -2756,7 +2757,7 @@ func WaitForPodsWithLabel(c clientset.Interface, ns string, label labels.Selecto
}
// DeleteRCAndPods a Replication Controller and all pods it spawned
-func DeleteRCAndPods(clientset clientset.Interface, ns, name string) error {
+func DeleteRCAndPods(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
By(fmt.Sprintf("deleting replication controller %s in namespace %s", name, ns))
rc, err := clientset.Core().ReplicationControllers(ns).Get(name)
if err != nil {
@@ -2766,7 +2767,7 @@ func DeleteRCAndPods(clientset clientset.Interface, ns, name string) error {
}
return err
}
- reaper, err := kubectl.ReaperForReplicationController(clientset.Core(), 10*time.Minute)
+ reaper, err := kubectl.ReaperForReplicationController(internalClientset.Core(), 10*time.Minute)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err)
@@ -2823,7 +2824,7 @@ func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error {
defer ps.Stop()
startTime := time.Now()
falseVar := false
- deleteOption := &api.DeleteOptions{OrphanDependents: &falseVar}
+ deleteOption := &v1.DeleteOptions{OrphanDependents: &falseVar}
err = c.Core().ReplicationControllers(ns).Delete(name, deleteOption)
if err != nil && apierrs.IsNotFound(err) {
Logf("RC %s was already deleted: %v", name, err)
@@ -2836,17 +2837,17 @@ func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error {
Logf("Deleting RC %s took: %v", name, deleteRCTime)
var interval, timeout time.Duration
switch {
- case rc.Spec.Replicas < 100:
+ case *(rc.Spec.Replicas) < 100:
interval = 100 * time.Millisecond
- case rc.Spec.Replicas < 1000:
+ case *(rc.Spec.Replicas) < 1000:
interval = 1 * time.Second
default:
interval = 10 * time.Second
}
- if rc.Spec.Replicas < 5000 {
+ if *(rc.Spec.Replicas) < 5000 {
timeout = 10 * time.Minute
} else {
- timeout = time.Duration(rc.Spec.Replicas/gcThroughput) * time.Second
+ timeout = time.Duration(*(rc.Spec.Replicas)/gcThroughput) * time.Second
// gcThroughput is pretty strict now, add a bit more to it
timeout = timeout + 3*time.Minute
}
@@ -2865,7 +2866,7 @@ func DeleteRCAndWaitForGC(c clientset.Interface, ns, name string) error {
// podStoreForRC creates a PodStore that monitors pods belong to the rc. It
// waits until the reflector does a List() before returning.
-func podStoreForRC(c clientset.Interface, rc *api.ReplicationController) (*testutils.PodStore, error) {
+func podStoreForRC(c clientset.Interface, rc *v1.ReplicationController) (*testutils.PodStore, error) {
labels := labels.SelectorFromSet(rc.Spec.Selector)
ps := testutils.NewPodStore(c, rc.Namespace, labels, fields.Everything())
err := wait.Poll(1*time.Second, 2*time.Minute, func() (bool, error) {
@@ -2904,7 +2905,7 @@ func waitForPodsGone(ps *testutils.PodStore, interval, timeout time.Duration) er
}
// Delete a ReplicaSet and all pods it spawned
-func DeleteReplicaSet(clientset clientset.Interface, ns, name string) error {
+func DeleteReplicaSet(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
By(fmt.Sprintf("deleting ReplicaSet %s in namespace %s", name, ns))
rc, err := clientset.Extensions().ReplicaSets(ns).Get(name)
if err != nil {
@@ -2914,7 +2915,7 @@ func DeleteReplicaSet(clientset clientset.Interface, ns, name string) error {
}
return err
}
- reaper, err := kubectl.ReaperFor(extensions.Kind("ReplicaSet"), clientset)
+ reaper, err := kubectl.ReaperFor(extensionsinternal.Kind("ReplicaSet"), internalClientset)
if err != nil {
if apierrs.IsNotFound(err) {
Logf("ReplicaSet %s was already deleted: %v", name, err)
@@ -2944,7 +2945,7 @@ func waitForReplicaSetPodsGone(c clientset.Interface, rs *extensions.ReplicaSet)
return wait.PollImmediate(Poll, 2*time.Minute, func() (bool, error) {
selector, err := unversioned.LabelSelectorAsSelector(rs.Spec.Selector)
ExpectNoError(err)
- options := api.ListOptions{LabelSelector: selector}
+ options := v1.ListOptions{LabelSelector: selector.String()}
if pods, err := c.Core().Pods(rs.Namespace).List(options); err == nil && len(pods.Items) == 0 {
return true, nil
}
@@ -2989,7 +2990,7 @@ func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deploymen
}
}
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
- maxCreated := deployment.Spec.Replicas + deploymentutil.MaxSurge(*deployment)
+ maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
if totalCreated > maxCreated {
reason = fmt.Sprintf("total pods created: %d, more than the max allowed: %d", totalCreated, maxCreated)
Logf(reason)
@@ -3003,9 +3004,9 @@ func WaitForDeploymentStatusValid(c clientset.Interface, d *extensions.Deploymen
}
// When the deployment status and its underlying resources reach the desired state, we're done
- if deployment.Status.Replicas == deployment.Spec.Replicas &&
- deployment.Status.UpdatedReplicas == deployment.Spec.Replicas &&
- deployment.Status.AvailableReplicas == deployment.Spec.Replicas {
+ if deployment.Status.Replicas == *(deployment.Spec.Replicas) &&
+ deployment.Status.UpdatedReplicas == *(deployment.Spec.Replicas) &&
+ deployment.Status.AvailableReplicas == *(deployment.Spec.Replicas) {
return true, nil
}
@@ -3057,7 +3058,7 @@ func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment) er
}
}
totalCreated := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
- maxCreated := deployment.Spec.Replicas + deploymentutil.MaxSurge(*deployment)
+ maxCreated := *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
if totalCreated > maxCreated {
logReplicaSetsOfDeployment(deployment, allOldRSs, newRS)
logPodsOfDeployment(c, deployment)
@@ -3071,8 +3072,8 @@ func WaitForDeploymentStatus(c clientset.Interface, d *extensions.Deployment) er
}
// When the deployment status and its underlying resources reach the desired state, we're done
- if deployment.Status.Replicas == deployment.Spec.Replicas &&
- deployment.Status.UpdatedReplicas == deployment.Spec.Replicas {
+ if deployment.Status.Replicas == *(deployment.Spec.Replicas) &&
+ deployment.Status.UpdatedReplicas == *(deployment.Spec.Replicas) {
return true, nil
}
return false, nil
@@ -3196,7 +3197,7 @@ func CheckNewRSAnnotations(c clientset.Interface, ns, deploymentName string, exp
func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds int) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
- options := api.ListOptions{LabelSelector: label}
+ options := v1.ListOptions{LabelSelector: label.String()}
return wait.Poll(Poll, 5*time.Minute, func() (bool, error) {
pods, err := c.Core().Pods(ns).List(options)
if err != nil {
@@ -3262,7 +3263,7 @@ func WaitForDeploymentWithCondition(c clientset.Interface, ns, deploymentName, r
func logPodsOfDeployment(c clientset.Interface, deployment *extensions.Deployment) {
minReadySeconds := deployment.Spec.MinReadySeconds
podList, err := deploymentutil.ListPods(deployment,
- func(namespace string, options api.ListOptions) (*api.PodList, error) {
+ func(namespace string, options v1.ListOptions) (*v1.PodList, error) {
return c.Core().Pods(namespace).List(options)
})
if err != nil {
@@ -3363,10 +3364,10 @@ func UpdateReplicaSetWithRetries(c clientset.Interface, namespace, name string,
return rs, pollErr
}
-type updateRcFunc func(d *api.ReplicationController)
+type updateRcFunc func(d *v1.ReplicationController)
-func UpdateReplicationControllerWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateRcFunc) (*api.ReplicationController, error) {
- var rc *api.ReplicationController
+func UpdateReplicationControllerWithRetries(c clientset.Interface, namespace, name string, applyUpdate updateRcFunc) (*v1.ReplicationController, error) {
+ var rc *v1.ReplicationController
var updateErr error
pollErr := wait.PollImmediate(10*time.Millisecond, 1*time.Minute, func() (bool, error) {
var err error
@@ -3437,7 +3438,7 @@ func UpdateJobWithRetries(c clientset.Interface, namespace, name string, applyUp
}
// NodeAddresses returns the first address of the given type of each node.
-func NodeAddresses(nodelist *api.NodeList, addrType api.NodeAddressType) []string {
+func NodeAddresses(nodelist *v1.NodeList, addrType v1.NodeAddressType) []string {
hosts := []string{}
for _, n := range nodelist.Items {
for _, addr := range n.Status.Addresses {
@@ -3461,7 +3462,7 @@ func NodeSSHHosts(c clientset.Interface) ([]string, error) {
nodelist := waitListSchedulableNodesOrDie(c)
// TODO(roberthbailey): Use the "preferred" address for the node, once such a thing is defined (#2462).
- hosts := NodeAddresses(nodelist, api.NodeExternalIP)
+ hosts := NodeAddresses(nodelist, v1.NodeExternalIP)
// Error if any node didn't have an external IP.
if len(hosts) != len(nodelist.Items) {
@@ -3521,11 +3522,11 @@ func LogSSHResult(result SSHResult) {
Logf("ssh %s: exit code: %d", remote, result.Code)
}
-func IssueSSHCommandWithResult(cmd, provider string, node *api.Node) (*SSHResult, error) {
+func IssueSSHCommandWithResult(cmd, provider string, node *v1.Node) (*SSHResult, error) {
Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
- if a.Type == api.NodeExternalIP {
+ if a.Type == v1.NodeExternalIP {
host = a.Address + ":22"
break
}
@@ -3547,7 +3548,7 @@ func IssueSSHCommandWithResult(cmd, provider string, node *api.Node) (*SSHResult
return &result, nil
}
-func IssueSSHCommand(cmd, provider string, node *api.Node) error {
+func IssueSSHCommand(cmd, provider string, node *v1.Node) error {
result, err := IssueSSHCommandWithResult(cmd, provider, node)
if result != nil {
LogSSHResult(*result)
@@ -3562,23 +3563,22 @@ func IssueSSHCommand(cmd, provider string, node *api.Node) error {
}
// NewHostExecPodSpec returns the pod spec of hostexec pod
-func NewHostExecPodSpec(ns, name string) *api.Pod {
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+func NewHostExecPodSpec(ns, name string) *v1.Pod {
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
Namespace: ns,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "hostexec",
Image: "gcr.io/google_containers/hostexec:1.2",
- ImagePullPolicy: api.PullIfNotPresent,
+ ImagePullPolicy: v1.PullIfNotPresent,
},
},
- SecurityContext: &api.PodSecurityContext{
- HostNetwork: true,
- },
+ HostNetwork: true,
+ SecurityContext: &v1.PodSecurityContext{},
},
}
return pod
@@ -3600,7 +3600,7 @@ func RunHostCmdOrDie(ns, name, cmd string) string {
// LaunchHostExecPod launches a hostexec pod in the given namespace and waits
// until it's Running
-func LaunchHostExecPod(client clientset.Interface, ns, name string) *api.Pod {
+func LaunchHostExecPod(client clientset.Interface, ns, name string) *v1.Pod {
hostExecPod := NewHostExecPodSpec(ns, name)
pod, err := client.Core().Pods(ns).Create(hostExecPod)
ExpectNoError(err)
@@ -3687,27 +3687,27 @@ func CheckPodsCondition(c clientset.Interface, ns string, podNames []string, tim
// WaitForNodeToBeReady returns whether node name is ready within timeout.
func WaitForNodeToBeReady(c clientset.Interface, name string, timeout time.Duration) bool {
- return WaitForNodeToBe(c, name, api.NodeReady, true, timeout)
+ return WaitForNodeToBe(c, name, v1.NodeReady, true, timeout)
}
// WaitForNodeToBeNotReady returns whether node name is not ready (i.e. the
// readiness condition is anything but ready, e.g false or unknown) within
// timeout.
func WaitForNodeToBeNotReady(c clientset.Interface, name string, timeout time.Duration) bool {
- return WaitForNodeToBe(c, name, api.NodeReady, false, timeout)
+ return WaitForNodeToBe(c, name, v1.NodeReady, false, timeout)
}
-func isNodeConditionSetAsExpected(node *api.Node, conditionType api.NodeConditionType, wantTrue, silent bool) bool {
+func isNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue, silent bool) bool {
// Check the node readiness condition (logging all).
for _, cond := range node.Status.Conditions {
// Ensure that the condition type and the status matches as desired.
if cond.Type == conditionType {
- if (cond.Status == api.ConditionTrue) == wantTrue {
+ if (cond.Status == v1.ConditionTrue) == wantTrue {
return true
} else {
if !silent {
Logf("Condition %s of node %s is %v instead of %t. Reason: %v, message: %v",
- conditionType, node.Name, cond.Status == api.ConditionTrue, wantTrue, cond.Reason, cond.Message)
+ conditionType, node.Name, cond.Status == v1.ConditionTrue, wantTrue, cond.Reason, cond.Message)
}
return false
}
@@ -3719,15 +3719,15 @@ func isNodeConditionSetAsExpected(node *api.Node, conditionType api.NodeConditio
return false
}
-func IsNodeConditionSetAsExpected(node *api.Node, conditionType api.NodeConditionType, wantTrue bool) bool {
+func IsNodeConditionSetAsExpected(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, false)
}
-func IsNodeConditionSetAsExpectedSilent(node *api.Node, conditionType api.NodeConditionType, wantTrue bool) bool {
+func IsNodeConditionSetAsExpectedSilent(node *v1.Node, conditionType v1.NodeConditionType, wantTrue bool) bool {
return isNodeConditionSetAsExpected(node, conditionType, wantTrue, true)
}
-func IsNodeConditionUnset(node *api.Node, conditionType api.NodeConditionType) bool {
+func IsNodeConditionUnset(node *v1.Node, conditionType v1.NodeConditionType) bool {
for _, cond := range node.Status.Conditions {
if cond.Type == conditionType {
return false
@@ -3740,7 +3740,7 @@ func IsNodeConditionUnset(node *api.Node, conditionType api.NodeConditionType) b
// within timeout. If wantTrue is true, it will ensure the node condition status
// is ConditionTrue; if it's false, it ensures the node condition is in any state
// other than ConditionTrue (e.g. not true or unknown).
-func WaitForNodeToBe(c clientset.Interface, name string, conditionType api.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
+func WaitForNodeToBe(c clientset.Interface, name string, conditionType v1.NodeConditionType, wantTrue bool, timeout time.Duration) bool {
Logf("Waiting up to %v for node %s condition %s to be %t", timeout, name, conditionType, wantTrue)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(Poll) {
node, err := c.Core().Nodes().Get(name)
@@ -3763,9 +3763,9 @@ func WaitForNodeToBe(c clientset.Interface, name string, conditionType api.NodeC
// Currently we allow only for:
// - not present CNI plugins on node
// TODO: we should extend it for other reasons.
-func allowedNotReadyReasons(nodes []*api.Node) bool {
+func allowedNotReadyReasons(nodes []*v1.Node) bool {
for _, node := range nodes {
- index, condition := api.GetNodeCondition(&node.Status, api.NodeReady)
+ index, condition := v1.GetNodeCondition(&node.Status, v1.NodeReady)
if index == -1 ||
!strings.Contains(condition.Message, "could not locate kubenet required CNI plugins") {
return false
@@ -3781,17 +3781,17 @@ func allowedNotReadyReasons(nodes []*api.Node) bool {
func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all (but %d) nodes to be ready", timeout, TestContext.AllowedNotReadyNodes)
- var notReady []*api.Node
+ var notReady []*v1.Node
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
- nodes, err := c.Core().Nodes().List(api.ListOptions{})
+ nodes, err := c.Core().Nodes().List(v1.ListOptions{})
if err != nil {
return false, err
}
for i := range nodes.Items {
node := &nodes.Items[i]
- if !IsNodeConditionSetAsExpected(node, api.NodeReady, true) {
+ if !IsNodeConditionSetAsExpected(node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}
@@ -3822,28 +3822,28 @@ func AllNodesReady(c clientset.Interface, timeout time.Duration) error {
func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error {
Logf("Waiting up to %v for all nodes to be ready", timeout)
- var notReady []api.Node
+ var notReady []v1.Node
var missingPodsPerNode map[string][]string
err := wait.PollImmediate(Poll, timeout, func() (bool, error) {
notReady = nil
// It should be OK to list unschedulable Nodes here.
- nodes, err := c.Core().Nodes().List(api.ListOptions{ResourceVersion: "0"})
+ nodes, err := c.Core().Nodes().List(v1.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
for _, node := range nodes.Items {
- if !IsNodeConditionSetAsExpected(&node, api.NodeReady, true) {
+ if !IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
notReady = append(notReady, node)
}
}
- pods, err := c.Core().Pods(api.NamespaceAll).List(api.ListOptions{ResourceVersion: "0"})
+ pods, err := c.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{ResourceVersion: "0"})
if err != nil {
return false, err
}
systemPodsPerNode := make(map[string][]string)
for _, pod := range pods.Items {
- if pod.Namespace == api.NamespaceSystem && pod.Status.Phase == api.PodRunning {
+ if pod.Namespace == api.NamespaceSystem && pod.Status.Phase == v1.PodRunning {
if pod.Spec.NodeName != "" {
systemPodsPerNode[pod.Spec.NodeName] = append(systemPodsPerNode[pod.Spec.NodeName], pod.Name)
}
@@ -3851,7 +3851,7 @@ func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error
}
missingPodsPerNode = make(map[string][]string)
for _, node := range nodes.Items {
- if !system.IsMasterNode(&node) {
+ if !system.IsMasterNode(node.Name) {
for _, requiredPod := range requiredPerNodePods {
foundRequired := false
for _, presentPod := range systemPodsPerNode[node.Name] {
@@ -3886,8 +3886,8 @@ func WaitForAllNodesHealthy(c clientset.Interface, timeout time.Duration) error
// Filters nodes in NodeList in place, removing nodes that do not
// satisfy the given condition
// TODO: consider merging with pkg/client/cache.NodeLister
-func FilterNodes(nodeList *api.NodeList, fn func(node api.Node) bool) {
- var l []api.Node
+func FilterNodes(nodeList *v1.NodeList, fn func(node v1.Node) bool) {
+ var l []v1.Node
for _, node := range nodeList.Items {
if fn(node) {
@@ -4001,9 +4001,9 @@ func WaitForApiserverUp(c clientset.Interface) error {
// By cluster size we mean number of Nodes excluding Master Node.
func WaitForClusterSize(c clientset.Interface, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
- nodes, err := c.Core().Nodes().List(api.ListOptions{FieldSelector: fields.Set{
+ nodes, err := c.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
- }.AsSelector()})
+ }.AsSelector().String()})
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
@@ -4011,8 +4011,8 @@ func WaitForClusterSize(c clientset.Interface, size int, timeout time.Duration)
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
- FilterNodes(nodes, func(node api.Node) bool {
- return IsNodeConditionSetAsExpected(&node, api.NodeReady, true)
+ FilterNodes(nodes, func(node v1.Node) bool {
+ return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
@@ -4032,14 +4032,14 @@ func GenerateMasterRegexp(prefix string) string {
// waitForMasters waits until the cluster has the desired number of ready masters in it.
func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeout time.Duration) error {
for start := time.Now(); time.Since(start) < timeout; time.Sleep(20 * time.Second) {
- nodes, err := c.Core().Nodes().List(api.ListOptions{})
+ nodes, err := c.Core().Nodes().List(v1.ListOptions{})
if err != nil {
Logf("Failed to list nodes: %v", err)
continue
}
// Filter out nodes that are not master replicas
- FilterNodes(nodes, func(node api.Node) bool {
+ FilterNodes(nodes, func(node v1.Node) bool {
res, err := regexp.Match(GenerateMasterRegexp(masterPrefix), ([]byte)(node.Name))
if err != nil {
Logf("Failed to match regexp to node name: %v", err)
@@ -4051,8 +4051,8 @@ func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeou
numNodes := len(nodes.Items)
// Filter out not-ready nodes.
- FilterNodes(nodes, func(node api.Node) bool {
- return IsNodeConditionSetAsExpected(&node, api.NodeReady, true)
+ FilterNodes(nodes, func(node v1.Node) bool {
+ return IsNodeConditionSetAsExpected(&node, v1.NodeReady, true)
})
numReady := len(nodes.Items)
@@ -4069,13 +4069,13 @@ func WaitForMasters(masterPrefix string, c clientset.Interface, size int, timeou
// GetHostExternalAddress gets the node for a pod and returns the first External
// address. Returns an error if the node the pod is on doesn't have an External
// address.
-func GetHostExternalAddress(client clientset.Interface, p *api.Pod) (externalAddress string, err error) {
+func GetHostExternalAddress(client clientset.Interface, p *v1.Pod) (externalAddress string, err error) {
node, err := client.Core().Nodes().Get(p.Spec.NodeName)
if err != nil {
return "", err
}
for _, address := range node.Status.Addresses {
- if address.Type == api.NodeExternalIP {
+ if address.Type == v1.NodeExternalIP {
if address.Address != "" {
externalAddress = address.Address
break
@@ -4241,11 +4241,11 @@ func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (s
// This list of nodes must not include the master, which is marked
// unschedulable, since the master doesn't run kube-proxy. Without
// kube-proxy NodePorts won't work.
- var nodes *api.NodeList
+ var nodes *v1.NodeList
if wait.PollImmediate(Poll, SingleCallTimeout, func() (bool, error) {
- nodes, err = client.Core().Nodes().List(api.ListOptions{FieldSelector: fields.Set{
+ nodes, err = client.Core().Nodes().List(v1.ListOptions{FieldSelector: fields.Set{
"spec.unschedulable": "false",
- }.AsSelector()})
+ }.AsSelector().String()})
return err == nil, nil
}) != nil {
return "", err
@@ -4255,7 +4255,7 @@ func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (s
}
for _, node := range nodes.Items {
for _, address := range node.Status.Addresses {
- if address.Type == api.NodeExternalIP {
+ if address.Type == v1.NodeExternalIP {
if address.Address != "" {
return fmt.Sprintf("http://%v:%v", address.Address, nodePort), nil
}
@@ -4267,8 +4267,8 @@ func GetNodePortURL(client clientset.Interface, ns, name string, svcPort int) (s
// ScaleRCByLabels scales an RC via ns/label lookup. If replicas == 0 it waits till
// none are running, otherwise it does what a synchronous scale operation would do.
-func ScaleRCByLabels(clientset clientset.Interface, ns string, l map[string]string, replicas uint) error {
- listOpts := api.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l))}
+func ScaleRCByLabels(clientset clientset.Interface, internalClientset internalclientset.Interface, ns string, l map[string]string, replicas uint) error {
+ listOpts := v1.ListOptions{LabelSelector: labels.SelectorFromSet(labels.Set(l)).String()}
rcs, err := clientset.Core().ReplicationControllers(ns).List(listOpts)
if err != nil {
return err
@@ -4279,7 +4279,7 @@ func ScaleRCByLabels(clientset clientset.Interface, ns string, l map[string]stri
Logf("Scaling %v RCs with labels %v in ns %v to %v replicas.", len(rcs.Items), l, ns, replicas)
for _, labelRC := range rcs.Items {
name := labelRC.Name
- if err := ScaleRC(clientset, ns, name, replicas, false); err != nil {
+ if err := ScaleRC(clientset, internalClientset, ns, name, replicas, false); err != nil {
return err
}
rc, err := clientset.Core().ReplicationControllers(ns).Get(name)
@@ -4424,7 +4424,7 @@ func UnblockNetwork(from string, to string) {
}
}
-func isElementOf(podUID types.UID, pods *api.PodList) bool {
+func isElementOf(podUID types.UID, pods *v1.PodList) bool {
for _, pod := range pods.Items {
if pod.UID == podUID {
return true
@@ -4442,7 +4442,7 @@ func CheckRSHashLabel(rs *extensions.ReplicaSet) error {
return nil
}
-func CheckPodHashLabel(pods *api.PodList) error {
+func CheckPodHashLabel(pods *v1.PodList) error {
invalidPod := ""
for _, pod := range pods.Items {
if len(pod.Labels[extensions.DefaultDeploymentUniqueLabelKey]) == 0 {
@@ -4499,25 +4499,25 @@ func NodeProxyRequest(c clientset.Interface, node, endpoint string) (restclient.
}
// GetKubeletPods retrieves the list of pods on the kubelet
-func GetKubeletPods(c clientset.Interface, node string) (*api.PodList, error) {
+func GetKubeletPods(c clientset.Interface, node string) (*v1.PodList, error) {
return getKubeletPods(c, node, "pods")
}
// GetKubeletRunningPods retrieves the list of running pods on the kubelet. The pods
// includes necessary information (e.g., UID, name, namespace for
// pods/containers), but do not contain the full spec.
-func GetKubeletRunningPods(c clientset.Interface, node string) (*api.PodList, error) {
+func GetKubeletRunningPods(c clientset.Interface, node string) (*v1.PodList, error) {
return getKubeletPods(c, node, "runningpods")
}
-func getKubeletPods(c clientset.Interface, node, resource string) (*api.PodList, error) {
- result := &api.PodList{}
+func getKubeletPods(c clientset.Interface, node, resource string) (*v1.PodList, error) {
+ result := &v1.PodList{}
client, err := NodeProxyRequest(c, node, resource)
if err != nil {
- return &api.PodList{}, err
+ return &v1.PodList{}, err
}
if err = client.Into(result); err != nil {
- return &api.PodList{}, err
+ return &v1.PodList{}, err
}
return result, nil
}
@@ -4529,21 +4529,21 @@ func getKubeletPods(c clientset.Interface, node, resource string) (*api.PodList,
func LaunchWebserverPod(f *Framework, podName, nodeName string) (ip string) {
containerName := fmt.Sprintf("%s-container", podName)
port := 8080
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: containerName,
Image: "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab",
- Env: []api.EnvVar{{Name: fmt.Sprintf("SERVE_PORT_%d", port), Value: "foo"}},
- Ports: []api.ContainerPort{{ContainerPort: int32(port)}},
+ Env: []v1.EnvVar{{Name: fmt.Sprintf("SERVE_PORT_%d", port), Value: "foo"}},
+ Ports: []v1.ContainerPort{{ContainerPort: int32(port)}},
},
},
NodeName: nodeName,
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
@@ -4562,12 +4562,12 @@ func LaunchWebserverPod(f *Framework, podName, nodeName string) (ip string) {
// error will be returned if the host is not reachable from the pod.
func CheckConnectivityToHost(f *Framework, nodeName, podName, host string, timeout int) error {
contName := fmt.Sprintf("%s-container", podName)
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: contName,
Image: "gcr.io/google_containers/busybox:1.24",
@@ -4575,7 +4575,7 @@ func CheckConnectivityToHost(f *Framework, nodeName, podName, host string, timeo
},
},
NodeName: nodeName,
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
@@ -4608,7 +4608,7 @@ func CoreDump(dir string) {
}
}
-func UpdatePodWithRetries(client clientset.Interface, ns, name string, update func(*api.Pod)) (*api.Pod, error) {
+func UpdatePodWithRetries(client clientset.Interface, ns, name string, update func(*v1.Pod)) (*v1.Pod, error) {
for i := 0; i < 3; i++ {
pod, err := client.Core().Pods(ns).Get(name)
if err != nil {
@@ -4626,13 +4626,13 @@ func UpdatePodWithRetries(client clientset.Interface, ns, name string, update fu
return nil, fmt.Errorf("Too many retries updating Pod %q", name)
}
-func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*api.Pod, error) {
- pods, err := c.Core().Pods(ns).List(api.ListOptions{})
+func GetPodsInNamespace(c clientset.Interface, ns string, ignoreLabels map[string]string) ([]*v1.Pod, error) {
+ pods, err := c.Core().Pods(ns).List(v1.ListOptions{})
if err != nil {
- return []*api.Pod{}, err
+ return []*v1.Pod{}, err
}
ignoreSelector := labels.SelectorFromSet(ignoreLabels)
- filtered := []*api.Pod{}
+ filtered := []*v1.Pod{}
for _, p := range pods.Items {
if len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(p.Labels)) {
continue
@@ -4681,18 +4681,18 @@ func retryCmd(command string, args ...string) (string, string, error) {
}
// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
-func GetPodsScheduled(masterNodes sets.String, pods *api.PodList) (scheduledPods, notScheduledPods []api.Pod) {
+func GetPodsScheduled(masterNodes sets.String, pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
for _, pod := range pods.Items {
if !masterNodes.Has(pod.Spec.NodeName) {
if pod.Spec.NodeName != "" {
- _, scheduledCondition := api.GetPodCondition(&pod.Status, api.PodScheduled)
+ _, scheduledCondition := v1.GetPodCondition(&pod.Status, v1.PodScheduled)
Expect(scheduledCondition != nil).To(Equal(true))
- Expect(scheduledCondition.Status).To(Equal(api.ConditionTrue))
+ Expect(scheduledCondition.Status).To(Equal(v1.ConditionTrue))
scheduledPods = append(scheduledPods, pod)
} else {
- _, scheduledCondition := api.GetPodCondition(&pod.Status, api.PodScheduled)
+ _, scheduledCondition := v1.GetPodCondition(&pod.Status, v1.PodScheduled)
Expect(scheduledCondition != nil).To(Equal(true))
- Expect(scheduledCondition.Status).To(Equal(api.ConditionFalse))
+ Expect(scheduledCondition.Status).To(Equal(v1.ConditionFalse))
if scheduledCondition.Reason == "Unschedulable" {
notScheduledPods = append(notScheduledPods, pod)
@@ -4708,12 +4708,12 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
timeout := 10 * time.Minute
startTime := time.Now()
- allPods, err := c.Core().Pods(api.NamespaceAll).List(api.ListOptions{})
+ allPods, err := c.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{})
ExpectNoError(err)
// API server returns also Pods that succeeded. We need to filter them out.
- currentPods := make([]api.Pod, 0, len(allPods.Items))
+ currentPods := make([]v1.Pod, 0, len(allPods.Items))
for _, pod := range allPods.Items {
- if pod.Status.Phase != api.PodSucceeded && pod.Status.Phase != api.PodFailed {
+ if pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
currentPods = append(currentPods, pod)
}
@@ -4723,7 +4723,7 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
for len(currentlyNotScheduledPods) != 0 {
time.Sleep(2 * time.Second)
- allPods, err := c.Core().Pods(api.NamespaceAll).List(api.ListOptions{})
+ allPods, err := c.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{})
ExpectNoError(err)
scheduledPods, currentlyNotScheduledPods = GetPodsScheduled(masterNodes, allPods)
@@ -4736,12 +4736,12 @@ func WaitForStableCluster(c clientset.Interface, masterNodes sets.String) int {
}
// GetMasterAndWorkerNodesOrDie will return a list masters and schedulable worker nodes
-func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *api.NodeList) {
- nodes := &api.NodeList{}
+func GetMasterAndWorkerNodesOrDie(c clientset.Interface) (sets.String, *v1.NodeList) {
+ nodes := &v1.NodeList{}
masters := sets.NewString()
- all, _ := c.Core().Nodes().List(api.ListOptions{})
+ all, _ := c.Core().Nodes().List(v1.ListOptions{})
for _, n := range all.Items {
- if system.IsMasterNode(&n) {
+ if system.IsMasterNode(n.Name) {
masters.Insert(n.Name)
} else if isNodeSchedulable(&n) && isNodeUntainted(&n) {
nodes.Items = append(nodes.Items, n)
@@ -4768,7 +4768,7 @@ func CreateFileForGoBinData(gobindataPath, outputFilename string) error {
}
func ListNamespaceEvents(c clientset.Interface, ns string) error {
- ls, err := c.Core().Events(ns).List(api.ListOptions{})
+ ls, err := c.Core().Events(ns).List(v1.ListOptions{})
if err != nil {
return err
}
@@ -4857,7 +4857,7 @@ func getMaster(c clientset.Interface) Address {
master := Address{}
// Populate the internal IP.
- eps, err := c.Core().Endpoints(api.NamespaceDefault).Get("kubernetes")
+ eps, err := c.Core().Endpoints(v1.NamespaceDefault).Get("kubernetes")
if err != nil {
Failf("Failed to get kubernetes endpoints: %v", err)
}
@@ -4898,11 +4898,11 @@ func GetMasterAddress(c clientset.Interface) string {
// GetNodeExternalIP returns node external IP concatenated with port 22 for ssh
// e.g. 1.2.3.4:22
-func GetNodeExternalIP(node *api.Node) string {
+func GetNodeExternalIP(node *v1.Node) string {
Logf("Getting external IP address for %s", node.Name)
host := ""
for _, a := range node.Status.Addresses {
- if a.Type == api.NodeExternalIP {
+ if a.Type == v1.NodeExternalIP {
host = a.Address + ":22"
break
}
diff --git a/test/e2e/garbage_collector.go b/test/e2e/garbage_collector.go
index 6952b0d28ea..2fc849d83a2 100644
--- a/test/e2e/garbage_collector.go
+++ b/test/e2e/garbage_collector.go
@@ -117,7 +117,7 @@ func gatherMetrics(f *framework.Framework) {
var _ = framework.KubeDescribe("Garbage collector", func() {
f := framework.NewDefaultFramework("gc")
It("[Feature:GarbageCollector] should delete pods created by rc when not orphaning", func() {
- clientSet := f.ClientSet_1_5
+ clientSet := f.ClientSet
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name)
rcName := "simpletest.rc"
@@ -168,7 +168,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
})
It("[Feature:GarbageCollector] should orphan pods created by rc if delete options say so", func() {
- clientSet := f.ClientSet_1_5
+ clientSet := f.ClientSet
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name)
rcName := "simpletest.rc"
@@ -230,7 +230,7 @@ var _ = framework.KubeDescribe("Garbage collector", func() {
})
It("[Feature:GarbageCollector] should orphan pods created by rc if deleteOptions.OrphanDependents is nil", func() {
- clientSet := f.ClientSet_1_5
+ clientSet := f.ClientSet
rcClient := clientSet.Core().ReplicationControllers(f.Namespace.Name)
podClient := clientSet.Core().Pods(f.Namespace.Name)
rcName := "simpletest.rc"
diff --git a/test/e2e/generated_clientset.go b/test/e2e/generated_clientset.go
index 0e182eb88ee..65d6bd01e94 100644
--- a/test/e2e/generated_clientset.go
+++ b/test/e2e/generated_clientset.go
@@ -121,7 +121,7 @@ func observeObjectDeletion(w watch.Interface) (obj runtime.Object) {
var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
f := framework.NewDefaultFramework("clientset")
It("should create pods, delete pods, watch pods", func() {
- podClient := f.ClientSet_1_5.Core().Pods(f.Namespace.Name)
+ podClient := f.ClientSet.Core().Pods(f.Namespace.Name)
By("constructing the pod")
name := "pod" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
@@ -240,7 +240,7 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
f := framework.NewDefaultFramework("clientset")
It("should create v2alpha1 cronJobs, delete cronJobs, watch cronJobs", func() {
var enabled bool
- groupList, err := f.ClientSet_1_5.Discovery().ServerGroups()
+ groupList, err := f.ClientSet.Discovery().ServerGroups()
ExpectNoError(err)
for _, group := range groupList.Groups {
if group.Name == v2alpha1.GroupName {
@@ -256,7 +256,7 @@ var _ = framework.KubeDescribe("Generated release_1_5 clientset", func() {
framework.Logf("%s is not enabled, test skipped", v2alpha1.SchemeGroupVersion)
return
}
- cronJobClient := f.ClientSet_1_5.BatchV2alpha1().CronJobs(f.Namespace.Name)
+ cronJobClient := f.ClientSet.BatchV2alpha1().CronJobs(f.Namespace.Name)
By("constructing the cronJob")
name := "cronjob" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
diff --git a/test/e2e/gke_local_ssd.go b/test/e2e/gke_local_ssd.go
index 8256018bc3b..cfa4ef56b72 100644
--- a/test/e2e/gke_local_ssd.go
+++ b/test/e2e/gke_local_ssd.go
@@ -20,8 +20,8 @@ import (
"fmt"
"os/exec"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
@@ -65,28 +65,28 @@ func doTestWriteAndReadToLocalSsd(f *framework.Framework) {
f.TestContainerOutput(msg, pod, 0, out)
}
-func testPodWithSsd(command string) *api.Pod {
+func testPodWithSsd(command string) *v1.Pod {
containerName := "test-container"
volumeName := "test-ssd-volume"
path := "/mnt/disks/ssd0"
podName := "pod-" + string(uuid.NewUUID())
image := "ubuntu:14.04"
- return &api.Pod{
+ return &v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
- APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
+ APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: containerName,
Image: image,
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: path,
@@ -94,12 +94,12 @@ func testPodWithSsd(command string) *api.Pod {
},
},
},
- RestartPolicy: api.RestartPolicyNever,
- Volumes: []api.Volume{
+ RestartPolicy: v1.RestartPolicyNever,
+ Volumes: []v1.Volume{
{
Name: volumeName,
- VolumeSource: api.VolumeSource{
- HostPath: &api.HostPathVolumeSource{
+ VolumeSource: v1.VolumeSource{
+ HostPath: &v1.HostPathVolumeSource{
Path: path,
},
},
diff --git a/test/e2e/ha_master.go b/test/e2e/ha_master.go
index b351a8b4066..5c07a19dbee 100644
--- a/test/e2e/ha_master.go
+++ b/test/e2e/ha_master.go
@@ -26,7 +26,7 @@ import (
"time"
. "github.com/onsi/ginkgo"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/test/e2e/framework"
)
diff --git a/test/e2e/horizontal_pod_autoscaling.go b/test/e2e/horizontal_pod_autoscaling.go
index 90e57f830f5..4e721e1b7a0 100644
--- a/test/e2e/horizontal_pod_autoscaling.go
+++ b/test/e2e/horizontal_pod_autoscaling.go
@@ -19,8 +19,8 @@ package e2e
import (
"time"
- "k8s.io/kubernetes/pkg/api"
- "k8s.io/kubernetes/pkg/apis/autoscaling"
+ "k8s.io/kubernetes/pkg/api/v1"
+ autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
@@ -177,7 +177,7 @@ func scaleDown(name, kind string, checkStability bool, rc *ResourceConsumer, f *
func createCPUHorizontalPodAutoscaler(rc *ResourceConsumer, cpu, minReplicas, maxRepl int32) {
hpa := &autoscaling.HorizontalPodAutoscaler{
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: rc.name,
Namespace: rc.framework.Namespace.Name,
},
diff --git a/test/e2e/ingress_utils.go b/test/e2e/ingress_utils.go
index bb9d2f0416c..70f707405a3 100644
--- a/test/e2e/ingress_utils.go
+++ b/test/e2e/ingress_utils.go
@@ -38,12 +38,13 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
compute "google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi"
apierrs "k8s.io/kubernetes/pkg/api/errors"
- "k8s.io/kubernetes/pkg/apis/extensions"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
@@ -280,16 +281,16 @@ func createSecret(kubeClient clientset.Interface, ing *extensions.Ingress) (host
}
cert := c.Bytes()
key := k.Bytes()
- secret := &api.Secret{
- ObjectMeta: api.ObjectMeta{
+ secret := &v1.Secret{
+ ObjectMeta: v1.ObjectMeta{
Name: tls.SecretName,
},
Data: map[string][]byte{
- api.TLSCertKey: cert,
- api.TLSPrivateKeyKey: key,
+ v1.TLSCertKey: cert,
+ v1.TLSPrivateKeyKey: key,
},
}
- var s *api.Secret
+ var s *v1.Secret
if s, err = kubeClient.Core().Secrets(ing.Namespace).Get(tls.SecretName); err == nil {
// TODO: Retry the update. We don't really expect anything to conflict though.
framework.Logf("Updating secret %v in ns %v with hosts %v for ingress %v", secret.Name, secret.Namespace, host, ing.Name)
@@ -841,8 +842,8 @@ type GCEIngressController struct {
rcPath string
UID string
staticIPName string
- rc *api.ReplicationController
- svc *api.Service
+ rc *v1.ReplicationController
+ svc *v1.Service
c clientset.Interface
cloud framework.CloudConfig
}
@@ -854,8 +855,8 @@ func newTestJig(c clientset.Interface) *testJig {
// NginxIngressController manages implementation details of Ingress on Nginx.
type NginxIngressController struct {
ns string
- rc *api.ReplicationController
- pod *api.Pod
+ rc *v1.ReplicationController
+ pod *v1.Pod
c clientset.Interface
externalIP string
}
@@ -874,7 +875,7 @@ func (cont *NginxIngressController) init() {
framework.Logf("waiting for pods with label %v", rc.Spec.Selector)
sel := labels.SelectorFromSet(labels.Set(rc.Spec.Selector))
ExpectNoError(testutils.WaitForPodsWithLabelRunning(cont.c, cont.ns, sel))
- pods, err := cont.c.Core().Pods(cont.ns).List(api.ListOptions{LabelSelector: sel})
+ pods, err := cont.c.Core().Pods(cont.ns).List(v1.ListOptions{LabelSelector: sel.String()})
ExpectNoError(err)
if len(pods.Items) == 0 {
framework.Failf("Failed to find nginx ingress controller pods with selector %v", sel)
diff --git a/test/e2e/initial_resources.go b/test/e2e/initial_resources.go
index 808e59c42c5..6d590bcdf29 100644
--- a/test/e2e/initial_resources.go
+++ b/test/e2e/initial_resources.go
@@ -22,7 +22,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
@@ -51,13 +51,13 @@ var _ = framework.KubeDescribe("Initial Resources [Feature:InitialResources] [Fl
})
})
-func runPod(f *framework.Framework, name, image string) *api.Pod {
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+func runPod(f *framework.Framework, name, image string) *v1.Pod {
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: name,
Image: image,
diff --git a/test/e2e/job.go b/test/e2e/job.go
index 327972de3a6..6bde390f701 100644
--- a/test/e2e/job.go
+++ b/test/e2e/job.go
@@ -21,8 +21,10 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
- "k8s.io/kubernetes/pkg/apis/batch"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ batchinternal "k8s.io/kubernetes/pkg/apis/batch"
+ batch "k8s.io/kubernetes/pkg/apis/batch/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/kubectl"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait"
@@ -49,7 +51,7 @@ var _ = framework.KubeDescribe("Job", func() {
// Simplest case: all pods succeed promptly
It("should run a job to completion when tasks succeed", func() {
By("Creating a job")
- job := newTestJob("succeed", "all-succeed", api.RestartPolicyNever, parallelism, completions)
+ job := newTestJob("succeed", "all-succeed", v1.RestartPolicyNever, parallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
@@ -68,7 +70,7 @@ var _ = framework.KubeDescribe("Job", func() {
// up to 5 minutes between restarts, making test timeouts
// due to successive failures too likely with a reasonable
// test timeout.
- job := newTestJob("failOnce", "fail-once-local", api.RestartPolicyOnFailure, parallelism, completions)
+ job := newTestJob("failOnce", "fail-once-local", v1.RestartPolicyOnFailure, parallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
@@ -86,7 +88,7 @@ var _ = framework.KubeDescribe("Job", func() {
// Worst case analysis: 15 failures, each taking 1 minute to
// run due to some slowness, 1 in 2^15 chance of happening,
// causing test flake. Should be very rare.
- job := newTestJob("randomlySucceedOrFail", "rand-non-local", api.RestartPolicyNever, parallelism, completions)
+ job := newTestJob("randomlySucceedOrFail", "rand-non-local", v1.RestartPolicyNever, parallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
@@ -97,7 +99,7 @@ var _ = framework.KubeDescribe("Job", func() {
It("should keep restarting failed pods", func() {
By("Creating a job")
- job := newTestJob("fail", "all-fail", api.RestartPolicyNever, parallelism, completions)
+ job := newTestJob("fail", "all-fail", v1.RestartPolicyNever, parallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
@@ -115,7 +117,7 @@ var _ = framework.KubeDescribe("Job", func() {
startParallelism := int32(1)
endParallelism := int32(2)
By("Creating a job")
- job := newTestJob("notTerminate", "scale-up", api.RestartPolicyNever, startParallelism, completions)
+ job := newTestJob("notTerminate", "scale-up", v1.RestartPolicyNever, startParallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
@@ -124,7 +126,7 @@ var _ = framework.KubeDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("scale job up")
- scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.ClientSet)
+ scaler, err := kubectl.ScalerFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@@ -140,7 +142,7 @@ var _ = framework.KubeDescribe("Job", func() {
startParallelism := int32(2)
endParallelism := int32(1)
By("Creating a job")
- job := newTestJob("notTerminate", "scale-down", api.RestartPolicyNever, startParallelism, completions)
+ job := newTestJob("notTerminate", "scale-down", v1.RestartPolicyNever, startParallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
@@ -149,7 +151,7 @@ var _ = framework.KubeDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("scale job down")
- scaler, err := kubectl.ScalerFor(batch.Kind("Job"), f.ClientSet)
+ scaler, err := kubectl.ScalerFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred())
waitForScale := kubectl.NewRetryParams(5*time.Second, 1*time.Minute)
waitForReplicas := kubectl.NewRetryParams(5*time.Second, 5*time.Minute)
@@ -163,7 +165,7 @@ var _ = framework.KubeDescribe("Job", func() {
It("should delete a job", func() {
By("Creating a job")
- job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
+ job := newTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
@@ -172,7 +174,7 @@ var _ = framework.KubeDescribe("Job", func() {
Expect(err).NotTo(HaveOccurred())
By("delete a job")
- reaper, err := kubectl.ReaperFor(batch.Kind("Job"), f.ClientSet)
+ reaper, err := kubectl.ReaperFor(batchinternal.Kind("Job"), f.InternalClientset)
Expect(err).NotTo(HaveOccurred())
timeout := 1 * time.Minute
err = reaper.Stop(f.Namespace.Name, job.Name, timeout, api.NewDeleteOptions(0))
@@ -186,7 +188,7 @@ var _ = framework.KubeDescribe("Job", func() {
It("should fail a job", func() {
By("Creating a job")
- job := newTestJob("notTerminate", "foo", api.RestartPolicyNever, parallelism, completions)
+ job := newTestJob("notTerminate", "foo", v1.RestartPolicyNever, parallelism, completions)
activeDeadlineSeconds := int64(10)
job.Spec.ActiveDeadlineSeconds = &activeDeadlineSeconds
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
@@ -211,35 +213,35 @@ var _ = framework.KubeDescribe("Job", func() {
})
// newTestJob returns a job which does one of several testing behaviors.
-func newTestJob(behavior, name string, rPol api.RestartPolicy, parallelism, completions int32) *batch.Job {
+func newTestJob(behavior, name string, rPol v1.RestartPolicy, parallelism, completions int32) *batch.Job {
job := &batch.Job{
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
Spec: batch.JobSpec{
Parallelism: ¶llelism,
Completions: &completions,
ManualSelector: newBool(false),
- Template: api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
+ Template: v1.PodTemplateSpec{
+ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{jobSelectorKey: name},
},
- Spec: api.PodSpec{
+ Spec: v1.PodSpec{
RestartPolicy: rPol,
- Volumes: []api.Volume{
+ Volumes: []v1.Volume{
{
Name: "data",
- VolumeSource: api.VolumeSource{
- EmptyDir: &api.EmptyDirVolumeSource{},
+ VolumeSource: v1.VolumeSource{
+ EmptyDir: &v1.EmptyDirVolumeSource{},
},
},
},
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: "c",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
MountPath: "/data",
Name: "data",
@@ -293,14 +295,14 @@ func deleteJob(c clientset.Interface, ns, name string) error {
func waitForAllPodsRunning(c clientset.Interface, ns, jobName string, parallelism int32) error {
label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: jobName}))
return wait.Poll(framework.Poll, jobTimeout, func() (bool, error) {
- options := api.ListOptions{LabelSelector: label}
+ options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options)
if err != nil {
return false, err
}
count := int32(0)
for _, p := range pods.Items {
- if p.Status.Phase == api.PodRunning {
+ if p.Status.Phase == v1.PodRunning {
count++
}
}
@@ -327,7 +329,7 @@ func waitForJobFail(c clientset.Interface, ns, jobName string, timeout time.Dura
return false, err
}
for _, c := range curr.Status.Conditions {
- if c.Type == batch.JobFailed && c.Status == api.ConditionTrue {
+ if c.Type == batch.JobFailed && c.Status == v1.ConditionTrue {
return true, nil
}
}
diff --git a/test/e2e/kibana_logging.go b/test/e2e/kibana_logging.go
index 6695d04d8e5..9bf466e1cb6 100644
--- a/test/e2e/kibana_logging.go
+++ b/test/e2e/kibana_logging.go
@@ -20,6 +20,7 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework"
@@ -69,7 +70,7 @@ func ClusterLevelLoggingWithKibana(f *framework.Framework) {
// Wait for the Kibana pod(s) to enter the running state.
By("Checking to make sure the Kibana pods are running")
label := labels.SelectorFromSet(labels.Set(map[string]string{kibanaKey: kibanaValue}))
- options := api.ListOptions{LabelSelector: label}
+ options := v1.ListOptions{LabelSelector: label.String()}
pods, err := f.ClientSet.Core().Pods(api.NamespaceSystem).List(options)
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods.Items {
diff --git a/test/e2e/kube_proxy.go b/test/e2e/kube_proxy.go
index ec9aa64a650..e3070240e90 100644
--- a/test/e2e/kube_proxy.go
+++ b/test/e2e/kube_proxy.go
@@ -24,7 +24,7 @@ import (
"strings"
"time"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/images/net/nat"
@@ -47,7 +47,7 @@ var _ = framework.KubeDescribe("Network", func() {
It("should set TCP CLOSE_WAIT timeout", func() {
nodes := framework.GetReadySchedulableNodesOrDie(fr.ClientSet)
- ips := collectAddresses(nodes, api.NodeInternalIP)
+ ips := collectAddresses(nodes, v1.NodeInternalIP)
if len(nodes.Items) < 2 {
framework.Skipf(
@@ -56,7 +56,7 @@ var _ = framework.KubeDescribe("Network", func() {
}
type NodeInfo struct {
- node *api.Node
+ node *v1.Node
name string
nodeIp string
}
@@ -75,15 +75,15 @@ var _ = framework.KubeDescribe("Network", func() {
zero := int64(0)
- clientPodSpec := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ clientPodSpec := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "e2e-net-client",
Namespace: fr.Namespace.Name,
Labels: map[string]string{"app": "e2e-net-client"},
},
- Spec: api.PodSpec{
+ Spec: v1.PodSpec{
NodeName: clientNodeInfo.name,
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: "e2e-net-client",
Image: kubeProxyE2eImage,
@@ -97,15 +97,15 @@ var _ = framework.KubeDescribe("Network", func() {
},
}
- serverPodSpec := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ serverPodSpec := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "e2e-net-server",
Namespace: fr.Namespace.Name,
Labels: map[string]string{"app": "e2e-net-server"},
},
- Spec: api.PodSpec{
+ Spec: v1.PodSpec{
NodeName: serverNodeInfo.name,
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: "e2e-net-server",
Image: kubeProxyE2eImage,
@@ -118,7 +118,7 @@ var _ = framework.KubeDescribe("Network", func() {
testDaemonTcpPort,
postFinTimeoutSeconds),
},
- Ports: []api.ContainerPort{
+ Ports: []v1.ContainerPort{
{
Name: "tcp",
ContainerPort: testDaemonTcpPort,
diff --git a/test/e2e/kubectl.go b/test/e2e/kubectl.go
index 8579c870e06..f48aaae6c3a 100644
--- a/test/e2e/kubectl.go
+++ b/test/e2e/kubectl.go
@@ -41,12 +41,12 @@ import (
"github.com/elazarl/goproxy"
"github.com/ghodss/yaml"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/annotations"
apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/kubernetes/pkg/labels"
@@ -204,7 +204,7 @@ var _ = framework.KubeDescribe("Kubectl alpha client", func() {
framework.RunKubectlOrDie("run", sjName, "--restart=OnFailure", "--generator=scheduledjob/v2alpha1",
"--schedule="+schedule, "--image="+busyboxImage, nsFlag)
By("verifying the ScheduledJob " + sjName + " was created")
- sj, err := c.Batch().CronJobs(ns).Get(sjName)
+ sj, err := c.BatchV2alpha1().CronJobs(ns).Get(sjName)
if err != nil {
framework.Failf("Failed getting ScheduledJob %s: %v", sjName, err)
}
@@ -215,7 +215,7 @@ var _ = framework.KubeDescribe("Kubectl alpha client", func() {
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
framework.Failf("Failed creating ScheduledJob %s for 1 pod with expected image %s: %#v", sjName, busyboxImage, containers)
}
- if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != api.RestartPolicyOnFailure {
+ if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a ScheduledJob with correct restart policy for --restart=OnFailure")
}
})
@@ -241,7 +241,7 @@ var _ = framework.KubeDescribe("Kubectl alpha client", func() {
framework.RunKubectlOrDie("run", cjName, "--restart=OnFailure", "--generator=cronjob/v2alpha1",
"--schedule="+schedule, "--image="+busyboxImage, nsFlag)
By("verifying the CronJob " + cjName + " was created")
- sj, err := c.Batch().CronJobs(ns).Get(cjName)
+ sj, err := c.BatchV2alpha1().CronJobs(ns).Get(cjName)
if err != nil {
framework.Failf("Failed getting CronJob %s: %v", cjName, err)
}
@@ -252,7 +252,7 @@ var _ = framework.KubeDescribe("Kubectl alpha client", func() {
if containers == nil || len(containers) != 1 || containers[0].Image != busyboxImage {
framework.Failf("Failed creating CronJob %s for 1 pod with expected image %s: %#v", cjName, busyboxImage, containers)
}
- if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != api.RestartPolicyOnFailure {
+ if sj.Spec.JobTemplate.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a CronJob with correct restart policy for --restart=OnFailure")
}
})
@@ -268,10 +268,10 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
return f.NewClusterVerification(
framework.PodStateVerification{
Selectors: map[string]string{"app": "redis"},
- ValidPhases: []api.PodPhase{api.PodRunning /*api.PodPending*/},
+ ValidPhases: []v1.PodPhase{v1.PodRunning /*v1.PodPending*/},
})
}
- forEachPod := func(podFunc func(p api.Pod)) {
+ forEachPod := func(podFunc func(p v1.Pod)) {
clusterState().ForEach(podFunc)
}
var c clientset.Interface
@@ -289,7 +289,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
pods, err := clusterState().WaitFor(atLeast, framework.PodStartTimeout)
if err != nil || len(pods) < atLeast {
// TODO: Generalize integrating debug info into these tests so we always get debug info when we need it
- framework.DumpAllNamespaceInfo(c, f.ClientSet_1_5, ns)
+ framework.DumpAllNamespaceInfo(f.ClientSet, ns)
framework.Failf("Verified %v of %v pods , error : %v", len(pods), atLeast, err)
}
}
@@ -519,8 +519,8 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
WithStdinData("abcd1234\n").
ExecOrDie()
Expect(runOutput).ToNot(ContainSubstring("stdin closed"))
- g := func(pods []*api.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
- runTestPod, _, err := util.GetFirstPod(f.ClientSet.Core(), ns, labels.SelectorFromSet(map[string]string{"run": "run-test-3"}), 1*time.Minute, g)
+ g := func(pods []*v1.Pod) sort.Interface { return sort.Reverse(controller.ActivePods(pods)) }
+ runTestPod, _, err := util.GetFirstPod(f.InternalClientset.Core(), ns, labels.SelectorFromSet(map[string]string{"run": "run-test-3"}), 1*time.Minute, g)
if err != nil {
os.Exit(1)
}
@@ -646,7 +646,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
waitForOrFailWithDebug(1)
// Pod
- forEachPod(func(pod api.Pod) {
+ forEachPod(func(pod v1.Pod) {
output := framework.RunKubectlOrDie("describe", "pod", pod.Name, nsFlag)
requiredStrings := [][]string{
{"Name:", "redis-master-"},
@@ -700,7 +700,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
// Node
// It should be OK to list unschedulable Nodes here.
- nodes, err := c.Core().Nodes().List(api.ListOptions{})
+ nodes, err := c.Core().Nodes().List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
node := nodes.Items[0]
output = framework.RunKubectlOrDie("describe", "node", node.Name)
@@ -748,7 +748,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
// It may take a while for the pods to get registered in some cases, wait to be sure.
By("Waiting for Redis master to start.")
waitForOrFailWithDebug(1)
- forEachPod(func(pod api.Pod) {
+ forEachPod(func(pod v1.Pod) {
framework.Logf("wait on redis-master startup in %v ", ns)
framework.LookForStringInLog(ns, pod.Name, "redis-master", "The server is now ready to accept connections", framework.PodStartTimeout)
})
@@ -873,7 +873,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
By("Waiting for Redis master to start.")
waitForOrFailWithDebug(1)
- forEachPod(func(pod api.Pod) {
+ forEachPod(func(pod v1.Pod) {
By("checking for a matching strings")
_, err := framework.LookForStringInLog(ns, pod.Name, containerName, "The server is now ready to accept connections", framework.PodStartTimeout)
Expect(err).NotTo(HaveOccurred())
@@ -923,12 +923,12 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
By("Waiting for Redis master to start.")
waitForOrFailWithDebug(1)
By("patching all pods")
- forEachPod(func(pod api.Pod) {
+ forEachPod(func(pod v1.Pod) {
framework.RunKubectlOrDie("patch", "pod", pod.Name, nsFlag, "-p", "{\"metadata\":{\"annotations\":{\"x\":\"y\"}}}")
})
By("checking annotations")
- forEachPod(func(pod api.Pod) {
+ forEachPod(func(pod v1.Pod) {
found := false
for key, val := range pod.Annotations {
if key == "x" && val == "y" {
@@ -1082,7 +1082,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
By("rolling-update to same image controller")
- runKubectlRetryOrDie("rolling-update", rcName, "--update-period=1s", "--image="+nginxImage, "--image-pull-policy="+string(api.PullIfNotPresent), nsFlag)
+ runKubectlRetryOrDie("rolling-update", rcName, "--update-period=1s", "--image="+nginxImage, "--image-pull-policy="+string(v1.PullIfNotPresent), nsFlag)
framework.ValidateController(c, nginxImage, 1, rcName, "run="+rcName, noOpValidatorFn, ns)
})
})
@@ -1166,7 +1166,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
framework.Failf("Failed creating job %s for 1 pod with expected image %s: %#v", jobName, nginxImage, containers)
}
- if job.Spec.Template.Spec.RestartPolicy != api.RestartPolicyOnFailure {
+ if job.Spec.Template.Spec.RestartPolicy != v1.RestartPolicyOnFailure {
framework.Failf("Failed creating a job with correct restart policy for --restart=OnFailure")
}
})
@@ -1199,7 +1199,7 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
if containers == nil || len(containers) != 1 || containers[0].Image != nginxImage {
framework.Failf("Failed creating pod %s with expected image %s", podName, nginxImage)
}
- if pod.Spec.RestartPolicy != api.RestartPolicyNever {
+ if pod.Spec.RestartPolicy != v1.RestartPolicyNever {
framework.Failf("Failed creating a pod with correct restart policy for --restart=Never")
}
})
@@ -1333,10 +1333,10 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
framework.KubeDescribe("Kubectl taint", func() {
It("should update the taint on a node", func() {
- testTaint := api.Taint{
+ testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-001-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
- Effect: api.TaintEffectNoSchedule,
+ Effect: v1.TaintEffectNoSchedule,
}
nodeName := getNodeThatCanRunPod(f)
@@ -1364,10 +1364,10 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
})
It("should remove all the taints with the same key off a node", func() {
- testTaint := api.Taint{
+ testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-002-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
- Effect: api.TaintEffectNoSchedule,
+ Effect: v1.TaintEffectNoSchedule,
}
nodeName := getNodeThatCanRunPod(f)
@@ -1385,10 +1385,10 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
}
checkOutput(output, requiredStrings)
- newTestTaint := api.Taint{
+ newTestTaint := v1.Taint{
Key: testTaint.Key,
Value: "another-testing-taint-value",
- Effect: api.TaintEffectPreferNoSchedule,
+ Effect: v1.TaintEffectPreferNoSchedule,
}
By("adding another taint " + newTestTaint.ToString() + " to the node")
runKubectlRetryOrDie("taint", "nodes", nodeName, newTestTaint.ToString())
@@ -1434,11 +1434,11 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
if len(quota.Spec.Hard) != 2 {
framework.Failf("Expected two resources, got %v", quota.Spec.Hard)
}
- r, found := quota.Spec.Hard[api.ResourcePods]
+ r, found := quota.Spec.Hard[v1.ResourcePods]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
framework.Failf("Expected pods=1000000, got %v", r)
}
- r, found = quota.Spec.Hard[api.ResourceServices]
+ r, found = quota.Spec.Hard[v1.ResourceServices]
if expected := resource.MustParse("1000000"); !found || (&r).Cmp(expected) != 0 {
framework.Failf("Expected services=1000000, got %v", r)
}
@@ -1461,14 +1461,14 @@ var _ = framework.KubeDescribe("Kubectl client", func() {
if len(quota.Spec.Scopes) != 2 {
framework.Failf("Expected two scopes, got %v", quota.Spec.Scopes)
}
- scopes := make(map[api.ResourceQuotaScope]struct{})
+ scopes := make(map[v1.ResourceQuotaScope]struct{})
for _, scope := range quota.Spec.Scopes {
scopes[scope] = struct{}{}
}
- if _, found := scopes[api.ResourceQuotaScopeBestEffort]; !found {
+ if _, found := scopes[v1.ResourceQuotaScopeBestEffort]; !found {
framework.Failf("Expected BestEffort scope, got %v", quota.Spec.Scopes)
}
- if _, found := scopes[api.ResourceQuotaScopeNotTerminating]; !found {
+ if _, found := scopes[v1.ResourceQuotaScopeNotTerminating]; !found {
framework.Failf("Expected NotTerminating scope, got %v", quota.Spec.Scopes)
}
})
@@ -1640,8 +1640,8 @@ func readBytesFromFile(filename string) []byte {
return data
}
-func readReplicationControllerFromString(contents string) *api.ReplicationController {
- rc := api.ReplicationController{}
+func readReplicationControllerFromString(contents string) *v1.ReplicationController {
+ rc := v1.ReplicationController{}
if err := yaml.Unmarshal([]byte(contents), &rc); err != nil {
framework.Failf(err.Error())
}
@@ -1662,12 +1662,12 @@ func modifyReplicationControllerConfiguration(contents string) io.Reader {
return bytes.NewReader(data)
}
-func forEachReplicationController(c clientset.Interface, ns, selectorKey, selectorValue string, fn func(api.ReplicationController)) {
- var rcs *api.ReplicationControllerList
+func forEachReplicationController(c clientset.Interface, ns, selectorKey, selectorValue string, fn func(v1.ReplicationController)) {
+ var rcs *v1.ReplicationControllerList
var err error
for t := time.Now(); time.Since(t) < framework.PodListTimeout; time.Sleep(framework.Poll) {
label := labels.SelectorFromSet(labels.Set(map[string]string{selectorKey: selectorValue}))
- options := api.ListOptions{LabelSelector: label}
+ options := v1.ListOptions{LabelSelector: label.String()}
rcs, err = c.Core().ReplicationControllers(ns).List(options)
Expect(err).NotTo(HaveOccurred())
if len(rcs.Items) > 0 {
@@ -1684,7 +1684,7 @@ func forEachReplicationController(c clientset.Interface, ns, selectorKey, select
}
}
-func validateReplicationControllerConfiguration(rc api.ReplicationController) {
+func validateReplicationControllerConfiguration(rc v1.ReplicationController) {
if rc.Name == "redis-master" {
if _, ok := rc.Annotations[annotations.LastAppliedConfigAnnotation]; !ok {
framework.Failf("Annotation not found in modified configuration:\n%v\n", rc)
diff --git a/test/e2e/kubelet.go b/test/e2e/kubelet.go
index 981770cbaa0..02b2610414c 100644
--- a/test/e2e/kubelet.go
+++ b/test/e2e/kubelet.go
@@ -21,8 +21,8 @@ import (
"strings"
"time"
- "k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait"
@@ -98,7 +98,7 @@ func waitTillNPodsRunningOnNodes(c clientset.Interface, nodeNames sets.String, p
func updateNodeLabels(c clientset.Interface, nodeNames sets.String, toAdd, toRemove map[string]string) {
const maxRetries = 5
for nodeName := range nodeNames {
- var node *api.Node
+ var node *v1.Node
var err error
for i := 0; i < maxRetries; i++ {
node, err = c.Core().Nodes().Get(nodeName)
@@ -189,12 +189,13 @@ var _ = framework.KubeDescribe("kubelet", func() {
rcName := fmt.Sprintf("cleanup%d-%s", totalPods, string(uuid.NewUUID()))
Expect(framework.RunRC(testutils.RCConfig{
- Client: f.ClientSet,
- Name: rcName,
- Namespace: f.Namespace.Name,
- Image: framework.GetPauseImageName(f.ClientSet),
- Replicas: totalPods,
- NodeSelector: nodeLabels,
+ Client: f.ClientSet,
+ InternalClient: f.InternalClientset,
+ Name: rcName,
+ Namespace: f.Namespace.Name,
+ Image: framework.GetPauseImageName(f.ClientSet),
+ Replicas: totalPods,
+ NodeSelector: nodeLabels,
})).NotTo(HaveOccurred())
// Perform a sanity check so that we know all desired pods are
// running on the nodes according to kubelet. The timeout is set to
@@ -207,7 +208,7 @@ var _ = framework.KubeDescribe("kubelet", func() {
}
By("Deleting the RC")
- framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, rcName)
+ framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
// Check that the pods really are gone by querying /runningpods on the
// node. The /runningpods handler checks the container runtime (or its
// cache) and returns a list of running pods. Some possible causes of
diff --git a/test/e2e/kubelet_perf.go b/test/e2e/kubelet_perf.go
index faace8a058d..99417ced1b5 100644
--- a/test/e2e/kubelet_perf.go
+++ b/test/e2e/kubelet_perf.go
@@ -22,7 +22,7 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/kubelet/api/v1alpha1/stats"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/uuid"
@@ -70,11 +70,12 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
// TODO: Use a more realistic workload
Expect(framework.RunRC(testutils.RCConfig{
- Client: f.ClientSet,
- Name: rcName,
- Namespace: f.Namespace.Name,
- Image: framework.GetPauseImageName(f.ClientSet),
- Replicas: totalPods,
+ Client: f.ClientSet,
+ InternalClient: f.InternalClientset,
+ Name: rcName,
+ Namespace: f.Namespace.Name,
+ Image: framework.GetPauseImageName(f.ClientSet),
+ Replicas: totalPods,
})).NotTo(HaveOccurred())
// Log once and flush the stats.
@@ -116,7 +117,7 @@ func runResourceTrackingTest(f *framework.Framework, podsPerNode int, nodeNames
verifyCPULimits(expectedCPU, cpuSummary)
By("Deleting the RC")
- framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, rcName)
+ framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, rcName)
}
func verifyMemoryLimits(c clientset.Interface, expected framework.ResourceUsagePerContainer, actual framework.ResourceUsagePerNode) {
diff --git a/test/e2e/limit_range.go b/test/e2e/limit_range.go
index 9c7d1d8674b..7a52af3bc96 100644
--- a/test/e2e/limit_range.go
+++ b/test/e2e/limit_range.go
@@ -19,8 +19,8 @@ package e2e
import (
"fmt"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
@@ -37,8 +37,8 @@ var _ = framework.KubeDescribe("LimitRange", func() {
max := getResourceList("500m", "500Mi")
defaultLimit := getResourceList("500m", "500Mi")
defaultRequest := getResourceList("100m", "200Mi")
- maxLimitRequestRatio := api.ResourceList{}
- limitRange := newLimitRange("limit-range", api.LimitTypeContainer,
+ maxLimitRequestRatio := v1.ResourceList{}
+ limitRange := newLimitRange("limit-range", v1.LimitTypeContainer,
min, max,
defaultLimit, defaultRequest,
maxLimitRequestRatio)
@@ -47,13 +47,13 @@ var _ = framework.KubeDescribe("LimitRange", func() {
By("Fetching the LimitRange to ensure it has proper values")
limitRange, err = f.ClientSet.Core().LimitRanges(f.Namespace.Name).Get(limitRange.Name)
- expected := api.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
- actual := api.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
+ expected := v1.ResourceRequirements{Requests: defaultRequest, Limits: defaultLimit}
+ actual := v1.ResourceRequirements{Requests: limitRange.Spec.Limits[0].DefaultRequest, Limits: limitRange.Spec.Limits[0].Default}
err = equalResourceRequirement(expected, actual)
Expect(err).NotTo(HaveOccurred())
By("Creating a Pod with no resource requirements")
- pod := newTestPod(f, "pod-no-resources", api.ResourceList{}, api.ResourceList{})
+ pod := newTestPod(f, "pod-no-resources", v1.ResourceList{}, v1.ResourceList{})
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
@@ -80,7 +80,7 @@ var _ = framework.KubeDescribe("LimitRange", func() {
// This is an interesting case, so it's worth a comment
// If you specify a Limit, and no Request, the Limit will default to the Request
// This means that the LimitRange.DefaultRequest will ONLY take affect if a container.resources.limit is not supplied
- expected = api.ResourceRequirements{Requests: getResourceList("300m", "150Mi"), Limits: getResourceList("300m", "500Mi")}
+ expected = v1.ResourceRequirements{Requests: getResourceList("300m", "150Mi"), Limits: getResourceList("300m", "500Mi")}
for i := range pod.Spec.Containers {
err = equalResourceRequirement(expected, pod.Spec.Containers[i].Resources)
if err != nil {
@@ -91,19 +91,19 @@ var _ = framework.KubeDescribe("LimitRange", func() {
}
By("Failing to create a Pod with less than min resources")
- pod = newTestPod(f, podName, getResourceList("10m", "50Mi"), api.ResourceList{})
+ pod = newTestPod(f, podName, getResourceList("10m", "50Mi"), v1.ResourceList{})
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
By("Failing to create a Pod with more than max resources")
- pod = newTestPod(f, podName, getResourceList("600m", "600Mi"), api.ResourceList{})
+ pod = newTestPod(f, podName, getResourceList("600m", "600Mi"), v1.ResourceList{})
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
})
})
-func equalResourceRequirement(expected api.ResourceRequirements, actual api.ResourceRequirements) error {
+func equalResourceRequirement(expected v1.ResourceRequirements, actual v1.ResourceRequirements) error {
framework.Logf("Verifying requests: expected %v with actual %v", expected.Requests, actual.Requests)
err := equalResourceList(expected.Requests, actual.Requests)
if err != nil {
@@ -117,7 +117,7 @@ func equalResourceRequirement(expected api.ResourceRequirements, actual api.Reso
return nil
}
-func equalResourceList(expected api.ResourceList, actual api.ResourceList) error {
+func equalResourceList(expected v1.ResourceList, actual v1.ResourceList) error {
for k, v := range expected {
if actualValue, found := actual[k]; !found || (v.Cmp(actualValue) != 0) {
return fmt.Errorf("resource %v expected %v actual %v", k, v.String(), actualValue.String())
@@ -131,28 +131,28 @@ func equalResourceList(expected api.ResourceList, actual api.ResourceList) error
return nil
}
-func getResourceList(cpu, memory string) api.ResourceList {
- res := api.ResourceList{}
+func getResourceList(cpu, memory string) v1.ResourceList {
+ res := v1.ResourceList{}
if cpu != "" {
- res[api.ResourceCPU] = resource.MustParse(cpu)
+ res[v1.ResourceCPU] = resource.MustParse(cpu)
}
if memory != "" {
- res[api.ResourceMemory] = resource.MustParse(memory)
+ res[v1.ResourceMemory] = resource.MustParse(memory)
}
return res
}
// newLimitRange returns a limit range with specified data
-func newLimitRange(name string, limitType api.LimitType,
+func newLimitRange(name string, limitType v1.LimitType,
min, max,
defaultLimit, defaultRequest,
- maxLimitRequestRatio api.ResourceList) *api.LimitRange {
- return &api.LimitRange{
- ObjectMeta: api.ObjectMeta{
+ maxLimitRequestRatio v1.ResourceList) *v1.LimitRange {
+ return &v1.LimitRange{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
- Spec: api.LimitRangeSpec{
- Limits: []api.LimitRangeItem{
+ Spec: v1.LimitRangeSpec{
+ Limits: []v1.LimitRangeItem{
{
Type: limitType,
Min: min,
@@ -167,17 +167,17 @@ func newLimitRange(name string, limitType api.LimitType,
}
// newTestPod returns a pod that has the specified requests and limits
-func newTestPod(f *framework.Framework, name string, requests api.ResourceList, limits api.ResourceList) *api.Pod {
- return &api.Pod{
- ObjectMeta: api.ObjectMeta{
+func newTestPod(f *framework.Framework, name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod {
+ return &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "pause",
Image: framework.GetPauseImageName(f.ClientSet),
- Resources: api.ResourceRequirements{
+ Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
diff --git a/test/e2e/load.go b/test/e2e/load.go
index 6c400054e5d..c6e57e6ac1e 100644
--- a/test/e2e/load.go
+++ b/test/e2e/load.go
@@ -27,8 +27,9 @@ import (
"sync"
"time"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/transport"
"k8s.io/kubernetes/pkg/labels"
@@ -62,7 +63,7 @@ const (
// To run this suite you must explicitly ask for it by setting the
// -t/--test flag or ginkgo.focus flag.
var _ = framework.KubeDescribe("Load capacity", func() {
- var clientset internalclientset.Interface
+ var clientset clientset.Interface
var nodeCount int
var ns string
var configs []*testutils.RCConfig
@@ -140,7 +141,7 @@ var _ = framework.KubeDescribe("Load capacity", func() {
totalPods := itArg.podsPerNode * nodeCount
configs = generateRCConfigs(totalPods, itArg.image, itArg.command, namespaces)
- var services []*api.Service
+ var services []*v1.Service
// Read the environment variable to see if we want to create services
createServices := os.Getenv("CREATE_SERVICES")
if createServices == "true" {
@@ -206,8 +207,9 @@ var _ = framework.KubeDescribe("Load capacity", func() {
}
})
-func createClients(numberOfClients int) ([]*internalclientset.Clientset, error) {
- clients := make([]*internalclientset.Clientset, numberOfClients)
+func createClients(numberOfClients int) ([]*clientset.Clientset, []*internalclientset.Clientset, error) {
+ clients := make([]*clientset.Clientset, numberOfClients)
+ internalClients := make([]*internalclientset.Clientset, numberOfClients)
for i := 0; i < numberOfClients; i++ {
config, err := framework.LoadConfig()
Expect(err).NotTo(HaveOccurred())
@@ -223,11 +225,11 @@ func createClients(numberOfClients int) ([]*internalclientset.Clientset, error)
// each client here.
transportConfig, err := config.TransportConfig()
if err != nil {
- return nil, err
+ return nil, nil, err
}
tlsConfig, err := transport.TLSConfigFor(transportConfig)
if err != nil {
- return nil, err
+ return nil, nil, err
}
config.Transport = utilnet.SetTransportDefaults(&http.Transport{
Proxy: http.ProxyFromEnvironment,
@@ -243,13 +245,18 @@ func createClients(numberOfClients int) ([]*internalclientset.Clientset, error)
// Transport field.
config.TLSClientConfig = restclient.TLSClientConfig{}
- c, err := internalclientset.NewForConfig(config)
+ c, err := clientset.NewForConfig(config)
if err != nil {
- return nil, err
+ return nil, nil, err
}
clients[i] = c
+ internalClient, err := internalclientset.NewForConfig(config)
+ if err != nil {
+ return nil, nil, err
+ }
+ internalClients[i] = internalClient
}
- return clients, nil
+ return clients, internalClients, nil
}
func computeRCCounts(total int) (int, int, int) {
@@ -266,7 +273,7 @@ func computeRCCounts(total int) (int, int, int) {
return smallRCCount, mediumRCCount, bigRCCount
}
-func generateRCConfigs(totalPods int, image string, command []string, nss []*api.Namespace) []*testutils.RCConfig {
+func generateRCConfigs(totalPods int, image string, command []string, nss []*v1.Namespace) []*testutils.RCConfig {
configs := make([]*testutils.RCConfig, 0)
smallRCCount, mediumRCCount, bigRCCount := computeRCCounts(totalPods)
@@ -277,49 +284,51 @@ func generateRCConfigs(totalPods int, image string, command []string, nss []*api
// Create a number of clients to better simulate real usecase
// where not everyone is using exactly the same client.
rcsPerClient := 20
- clients, err := createClients((len(configs) + rcsPerClient - 1) / rcsPerClient)
+ clients, internalClients, err := createClients((len(configs) + rcsPerClient - 1) / rcsPerClient)
framework.ExpectNoError(err)
for i := 0; i < len(configs); i++ {
configs[i].Client = clients[i%len(clients)]
+ configs[i].InternalClient = internalClients[i%len(internalClients)]
}
return configs
}
func generateRCConfigsForGroup(
- nss []*api.Namespace, groupName string, size, count int, image string, command []string) []*testutils.RCConfig {
+ nss []*v1.Namespace, groupName string, size, count int, image string, command []string) []*testutils.RCConfig {
configs := make([]*testutils.RCConfig, 0, count)
for i := 1; i <= count; i++ {
config := &testutils.RCConfig{
- Client: nil, // this will be overwritten later
- Name: groupName + "-" + strconv.Itoa(i),
- Namespace: nss[i%len(nss)].Name,
- Timeout: 10 * time.Minute,
- Image: image,
- Command: command,
- Replicas: size,
- CpuRequest: 10, // 0.01 core
- MemRequest: 26214400, // 25MB
+ Client: nil, // this will be overwritten later
+ InternalClient: nil, // this will be overwritten later
+ Name: groupName + "-" + strconv.Itoa(i),
+ Namespace: nss[i%len(nss)].Name,
+ Timeout: 10 * time.Minute,
+ Image: image,
+ Command: command,
+ Replicas: size,
+ CpuRequest: 10, // 0.01 core
+ MemRequest: 26214400, // 25MB
}
configs = append(configs, config)
}
return configs
}
-func generateServicesForConfigs(configs []*testutils.RCConfig) []*api.Service {
- services := make([]*api.Service, 0, len(configs))
+func generateServicesForConfigs(configs []*testutils.RCConfig) []*v1.Service {
+ services := make([]*v1.Service, 0, len(configs))
for _, config := range configs {
serviceName := config.Name + "-svc"
labels := map[string]string{"name": config.Name}
- service := &api.Service{
- ObjectMeta: api.ObjectMeta{
+ service := &v1.Service{
+ ObjectMeta: v1.ObjectMeta{
Name: serviceName,
Namespace: config.Namespace,
},
- Spec: api.ServiceSpec{
+ Spec: v1.ServiceSpec{
Selector: labels,
- Ports: []api.ServicePort{{
+ Ports: []v1.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}},
@@ -368,11 +377,11 @@ func scaleRC(wg *sync.WaitGroup, config *testutils.RCConfig, scalingTime time.Du
sleepUpTo(scalingTime)
newSize := uint(rand.Intn(config.Replicas) + config.Replicas/2)
- framework.ExpectNoError(framework.ScaleRC(config.Client, config.Namespace, config.Name, newSize, true),
+ framework.ExpectNoError(framework.ScaleRC(config.Client, config.InternalClient, config.Namespace, config.Name, newSize, true),
fmt.Sprintf("scaling rc %s for the first time", config.Name))
selector := labels.SelectorFromSet(labels.Set(map[string]string{"name": config.Name}))
- options := api.ListOptions{
- LabelSelector: selector,
+ options := v1.ListOptions{
+ LabelSelector: selector.String(),
ResourceVersion: "0",
}
_, err := config.Client.Core().Pods(config.Namespace).List(options)
@@ -396,16 +405,16 @@ func deleteRC(wg *sync.WaitGroup, config *testutils.RCConfig, deletingTime time.
if framework.TestContext.GarbageCollectorEnabled {
framework.ExpectNoError(framework.DeleteRCAndWaitForGC(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
} else {
- framework.ExpectNoError(framework.DeleteRCAndPods(config.Client, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
+ framework.ExpectNoError(framework.DeleteRCAndPods(config.Client, config.InternalClient, config.Namespace, config.Name), fmt.Sprintf("deleting rc %s", config.Name))
}
}
-func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string) ([]*api.Namespace, error) {
- namespaces := []*api.Namespace{}
+func CreateNamespaces(f *framework.Framework, namespaceCount int, namePrefix string) ([]*v1.Namespace, error) {
+ namespaces := []*v1.Namespace{}
for i := 1; i <= namespaceCount; i++ {
namespace, err := f.CreateNamespace(fmt.Sprintf("%v-%d", namePrefix, i), nil)
if err != nil {
- return []*api.Namespace{}, err
+ return []*v1.Namespace{}, err
}
namespaces = append(namespaces, namespace)
}
diff --git a/test/e2e/logging_soak.go b/test/e2e/logging_soak.go
index 87f1647011c..a95feea4e23 100644
--- a/test/e2e/logging_soak.go
+++ b/test/e2e/logging_soak.go
@@ -25,7 +25,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
@@ -95,9 +95,9 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname
appName := "logging-soak" + podname
podlables := f.CreatePodsPerNodeForSimpleApp(
appName,
- func(n api.Node) api.PodSpec {
- return api.PodSpec{
- Containers: []api.Container{{
+ func(n v1.Node) v1.PodSpec {
+ return v1.PodSpec{
+ Containers: []v1.Container{{
Name: "logging-soak",
Image: "gcr.io/google_containers/busybox:1.24",
Args: []string{
@@ -107,7 +107,7 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname
},
}},
NodeName: n.Name,
- RestartPolicy: api.RestartPolicyAlways,
+ RestartPolicy: v1.RestartPolicyAlways,
}
},
totalPods,
@@ -116,10 +116,10 @@ func RunLogPodsWithSleepOf(f *framework.Framework, sleep time.Duration, podname
logSoakVerification := f.NewClusterVerification(
framework.PodStateVerification{
Selectors: podlables,
- ValidPhases: []api.PodPhase{api.PodRunning, api.PodSucceeded},
+ ValidPhases: []v1.PodPhase{v1.PodRunning, v1.PodSucceeded},
// we don't validate total log data, since there is no gaurantee all logs will be stored forever.
// instead, we just validate that some logs are being created in std out.
- Verify: func(p api.Pod) (bool, error) {
+ Verify: func(p v1.Pod) (bool, error) {
s, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "logging-soak", "logs-123", 1*time.Second)
return s != "", err
},
diff --git a/test/e2e/mesos.go b/test/e2e/mesos.go
index 1ffb07d0af2..bb65d1597d3 100644
--- a/test/e2e/mesos.go
+++ b/test/e2e/mesos.go
@@ -19,9 +19,9 @@ package e2e
import (
"fmt"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
@@ -45,7 +45,7 @@ var _ = framework.KubeDescribe("Mesos", func() {
nodeClient := f.ClientSet.Core().Nodes()
rackA := labels.SelectorFromSet(map[string]string{"k8s.mesosphere.io/attribute-rack": "1"})
- options := api.ListOptions{LabelSelector: rackA}
+ options := v1.ListOptions{LabelSelector: rackA.String()}
nodes, err := nodeClient.List(options)
if err != nil {
framework.Failf("Failed to query for node: %v", err)
@@ -54,7 +54,7 @@ var _ = framework.KubeDescribe("Mesos", func() {
var addr string
for _, a := range nodes.Items[0].Status.Addresses {
- if a.Type == api.NodeInternalIP {
+ if a.Type == v1.NodeInternalIP {
addr = a.Address
}
}
@@ -79,18 +79,18 @@ var _ = framework.KubeDescribe("Mesos", func() {
// scheduled onto it.
By("Trying to launch a pod with a label to get a node which can launch it.")
podName := "with-label"
- _, err := c.Core().Pods(ns).Create(&api.Pod{
+ _, err := c.Core().Pods(ns).Create(&v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
Annotations: map[string]string{
"k8s.mesosphere.io/roles": "public",
},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: podName,
Image: framework.GetPauseImageName(f.ClientSet),
@@ -110,7 +110,7 @@ var _ = framework.KubeDescribe("Mesos", func() {
rack2 := labels.SelectorFromSet(map[string]string{
"k8s.mesosphere.io/attribute-rack": "2",
})
- options := api.ListOptions{LabelSelector: rack2}
+ options := v1.ListOptions{LabelSelector: rack2.String()}
nodes, err := nodeClient.List(options)
framework.ExpectNoError(err)
diff --git a/test/e2e/metrics_grabber_test.go b/test/e2e/metrics_grabber_test.go
index 4e0c39dbe00..92658802b9d 100644
--- a/test/e2e/metrics_grabber_test.go
+++ b/test/e2e/metrics_grabber_test.go
@@ -19,8 +19,8 @@ package e2e
import (
"strings"
- "k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/metrics"
"k8s.io/kubernetes/test/e2e/framework"
@@ -59,7 +59,7 @@ var _ = framework.KubeDescribe("MetricsGrabber", func() {
It("should grab all metrics from a Scheduler.", func() {
By("Proxying to Pod through the API server")
// Check if master Node is registered
- nodes, err := c.Core().Nodes().List(api.ListOptions{})
+ nodes, err := c.Core().Nodes().List(v1.ListOptions{})
framework.ExpectNoError(err)
var masterRegistered = false
@@ -80,7 +80,7 @@ var _ = framework.KubeDescribe("MetricsGrabber", func() {
It("should grab all metrics from a ControllerManager.", func() {
By("Proxying to Pod through the API server")
// Check if master Node is registered
- nodes, err := c.Core().Nodes().List(api.ListOptions{})
+ nodes, err := c.Core().Nodes().List(v1.ListOptions{})
framework.ExpectNoError(err)
var masterRegistered = false
diff --git a/test/e2e/monitoring.go b/test/e2e/monitoring.go
index d4115d58f1f..f09640a5259 100644
--- a/test/e2e/monitoring.go
+++ b/test/e2e/monitoring.go
@@ -24,7 +24,8 @@ import (
influxdb "github.com/influxdata/influxdb/client"
"k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework"
@@ -101,7 +102,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string,
// is running (which would be an error except during a rolling update).
for _, rcLabel := range rcLabels {
selector := labels.Set{"k8s-app": rcLabel}.AsSelector()
- options := api.ListOptions{LabelSelector: selector}
+ options := v1.ListOptions{LabelSelector: selector.String()}
deploymentList, err := c.Extensions().Deployments(api.NamespaceSystem).List(options)
if err != nil {
return nil, err
@@ -121,7 +122,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string,
// Check all the replication controllers.
for _, rc := range rcList.Items {
selector := labels.Set(rc.Spec.Selector).AsSelector()
- options := api.ListOptions{LabelSelector: selector}
+ options := v1.ListOptions{LabelSelector: selector.String()}
podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
if err != nil {
return nil, err
@@ -136,7 +137,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string,
// Do the same for all deployments.
for _, rc := range deploymentList.Items {
selector := labels.Set(rc.Spec.Selector.MatchLabels).AsSelector()
- options := api.ListOptions{LabelSelector: selector}
+ options := v1.ListOptions{LabelSelector: selector.String()}
podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
if err != nil {
return nil, err
@@ -151,7 +152,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string,
// And for pet sets.
for _, ps := range psList.Items {
selector := labels.Set(ps.Spec.Selector.MatchLabels).AsSelector()
- options := api.ListOptions{LabelSelector: selector}
+ options := v1.ListOptions{LabelSelector: selector.String()}
podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
if err != nil {
return nil, err
@@ -168,7 +169,7 @@ func verifyExpectedRcsExistAndGetExpectedPods(c clientset.Interface) ([]string,
}
func expectedServicesExist(c clientset.Interface) error {
- serviceList, err := c.Core().Services(api.NamespaceSystem).List(api.ListOptions{})
+ serviceList, err := c.Core().Services(api.NamespaceSystem).List(v1.ListOptions{})
if err != nil {
return err
}
@@ -187,7 +188,7 @@ func expectedServicesExist(c clientset.Interface) error {
func getAllNodesInCluster(c clientset.Interface) ([]string, error) {
// It should be OK to list unschedulable Nodes here.
- nodeList, err := c.Core().Nodes().List(api.ListOptions{})
+ nodeList, err := c.Core().Nodes().List(v1.ListOptions{})
if err != nil {
return nil, err
}
@@ -281,7 +282,7 @@ func testMonitoringUsingHeapsterInfluxdb(c clientset.Interface) {
func printDebugInfo(c clientset.Interface) {
set := labels.Set{"k8s-app": "heapster"}
- options := api.ListOptions{LabelSelector: set.AsSelector()}
+ options := v1.ListOptions{LabelSelector: set.AsSelector().String()}
podList, err := c.Core().Pods(api.NamespaceSystem).List(options)
if err != nil {
framework.Logf("Error while listing pods %v", err)
diff --git a/test/e2e/namespace.go b/test/e2e/namespace.go
index d2e65901712..97c62323f6b 100644
--- a/test/e2e/namespace.go
+++ b/test/e2e/namespace.go
@@ -22,8 +22,8 @@ import (
"sync"
"time"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
@@ -60,7 +60,7 @@ func extinguish(f *framework.Framework, totalNS int, maxAllowedAfterDel int, max
framework.ExpectNoError(wait.Poll(2*time.Second, time.Duration(maxSeconds)*time.Second,
func() (bool, error) {
var cnt = 0
- nsList, err := f.ClientSet.Core().Namespaces().List(api.ListOptions{})
+ nsList, err := f.ClientSet.Core().Namespaces().List(v1.ListOptions{})
if err != nil {
return false, err
}
@@ -89,12 +89,12 @@ func ensurePodsAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred())
By("Creating a pod in the namespace")
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "test-pod",
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "nginx",
Image: framework.GetPauseImageName(f.ClientSet),
@@ -145,13 +145,13 @@ func ensureServicesAreRemovedWhenNamespaceIsDeleted(f *framework.Framework) {
"foo": "bar",
"baz": "blah",
}
- service := &api.Service{
- ObjectMeta: api.ObjectMeta{
+ service := &v1.Service{
+ ObjectMeta: v1.ObjectMeta{
Name: serviceName,
},
- Spec: api.ServiceSpec{
+ Spec: v1.ServiceSpec{
Selector: labels,
- Ports: []api.ServicePort{{
+ Ports: []v1.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}},
diff --git a/test/e2e/network_partition.go b/test/e2e/network_partition.go
index 53d3ac10d0c..a8b6c851001 100644
--- a/test/e2e/network_partition.go
+++ b/test/e2e/network_partition.go
@@ -22,8 +22,9 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
@@ -40,7 +41,7 @@ import (
// At the end (even in case of errors), the network traffic is brought back to normal.
// This function executes commands on a node so it will work only for some
// environments.
-func testUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *api.Node, testFunc func()) {
+func testUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1.Node, testFunc func()) {
host := framework.GetNodeExternalIP(node)
master := framework.GetMasterAddress(c)
By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
@@ -54,13 +55,13 @@ func testUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *ap
}()
framework.Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
- if !framework.WaitForNodeToBe(c, node.Name, api.NodeReady, true, resizeNodeReadyTimeout) {
+ if !framework.WaitForNodeToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
framework.Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
}
framework.BlockNetwork(host, master)
framework.Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
- if !framework.WaitForNodeToBe(c, node.Name, api.NodeReady, false, resizeNodeNotReadyTimeout) {
+ if !framework.WaitForNodeToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) {
framework.Failf("Node %s did not become not-ready within %v", node.Name, resizeNodeNotReadyTimeout)
}
@@ -68,14 +69,14 @@ func testUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *ap
// network traffic is unblocked in a deferred function
}
-func expectNodeReadiness(isReady bool, newNode chan *api.Node) {
+func expectNodeReadiness(isReady bool, newNode chan *v1.Node) {
timeout := false
expected := false
timer := time.After(nodeReadinessTimeout)
for !expected && !timeout {
select {
case n := <-newNode:
- if framework.IsNodeConditionSetAsExpected(n, api.NodeReady, isReady) {
+ if framework.IsNodeConditionSetAsExpected(n, v1.NodeReady, isReady) {
expected = true
} else {
framework.Logf("Observed node ready status is NOT %v as expected", isReady)
@@ -89,24 +90,24 @@ func expectNodeReadiness(isReady bool, newNode chan *api.Node) {
}
}
-func podOnNode(podName, nodeName string, image string) *api.Pod {
- return &api.Pod{
- ObjectMeta: api.ObjectMeta{
+func podOnNode(podName, nodeName string, image string) *v1.Pod {
+ return &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{
"name": podName,
},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: podName,
Image: image,
- Ports: []api.ContainerPort{{ContainerPort: 9376}},
+ Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},
NodeName: nodeName,
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
}
@@ -158,16 +159,16 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
It("All pods on the unreachable node should be marked as NotReady upon the node turn NotReady "+
"AND all pods should be mark back to Ready when the node get back to Ready before pod eviction timeout", func() {
By("choose a node - we will block all network traffic on this node")
- var podOpts api.ListOptions
- nodeOpts := api.ListOptions{}
+ var podOpts v1.ListOptions
+ nodeOpts := v1.ListOptions{}
nodes, err := c.Core().Nodes().List(nodeOpts)
Expect(err).NotTo(HaveOccurred())
- framework.FilterNodes(nodes, func(node api.Node) bool {
- if !framework.IsNodeConditionSetAsExpected(&node, api.NodeReady, true) {
+ framework.FilterNodes(nodes, func(node v1.Node) bool {
+ if !framework.IsNodeConditionSetAsExpected(&node, v1.NodeReady, true) {
return false
}
- podOpts = api.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name)}
- pods, err := c.Core().Pods(api.NamespaceAll).List(podOpts)
+ podOpts = v1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
+ pods, err := c.Core().Pods(v1.NamespaceAll).List(podOpts)
if err != nil || len(pods.Items) <= 0 {
return false
}
@@ -177,7 +178,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
framework.Failf("No eligible node were found: %d", len(nodes.Items))
}
node := nodes.Items[0]
- podOpts = api.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name)}
+ podOpts = v1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, node.Name).String()}
if err = framework.WaitForMatchPodsCondition(c, podOpts, "Running and Ready", podReadyTimeout, testutils.PodRunningReady); err != nil {
framework.Failf("Pods on node %s are not ready and running within %v: %v", node.Name, podReadyTimeout, err)
}
@@ -185,25 +186,25 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
By("Set up watch on node status")
nodeSelector := fields.OneTermEqualSelector("metadata.name", node.Name)
stopCh := make(chan struct{})
- newNode := make(chan *api.Node)
+ newNode := make(chan *v1.Node)
var controller *cache.Controller
_, controller = cache.NewInformer(
&cache.ListWatch{
- ListFunc: func(options api.ListOptions) (runtime.Object, error) {
- options.FieldSelector = nodeSelector
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ options.FieldSelector = nodeSelector.String()
obj, err := f.ClientSet.Core().Nodes().List(options)
return runtime.Object(obj), err
},
- WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
- options.FieldSelector = nodeSelector
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ options.FieldSelector = nodeSelector.String()
return f.ClientSet.Core().Nodes().Watch(options)
},
},
- &api.Node{},
+ &v1.Node{},
0,
cache.ResourceEventHandlerFuncs{
UpdateFunc: func(oldObj, newObj interface{}) {
- n, ok := newObj.(*api.Node)
+ n, ok := newObj.(*v1.Node)
Expect(ok).To(Equal(true))
newNode <- n
@@ -262,7 +263,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
By("choose a node with at least one pod - we will block some network traffic on this node")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
- options := api.ListOptions{LabelSelector: label}
+ options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled
Expect(err).NotTo(HaveOccurred())
nodeName := pods.Items[0].Spec.NodeName
@@ -327,7 +328,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
By("choose a node with at least one pod - we will block some network traffic on this node")
label := labels.SelectorFromSet(labels.Set(map[string]string{"name": name}))
- options := api.ListOptions{LabelSelector: label}
+ options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled
Expect(err).NotTo(HaveOccurred())
nodeName := pods.Items[0].Spec.NodeName
@@ -385,8 +386,8 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
})
It("should come back up if node goes down [Slow] [Disruptive]", func() {
- petMounts := []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
- podMounts := []api.VolumeMount{{Name: "home", MountPath: "/home"}}
+ petMounts := []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
+ podMounts := []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
ps := newStatefulSet(psName, ns, headlessSvcName, 3, petMounts, podMounts, labels)
_, err := c.Apps().StatefulSets(ns).Create(ps)
Expect(err).NotTo(HaveOccurred())
@@ -399,16 +400,16 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
restartNodes(f, nodeNames)
By("waiting for pods to be running again")
- pst.waitForRunningAndReady(ps.Spec.Replicas, ps)
+ pst.waitForRunningAndReady(*ps.Spec.Replicas, ps)
})
It("should not reschedule pets if there is a network partition [Slow] [Disruptive]", func() {
- ps := newStatefulSet(psName, ns, headlessSvcName, 3, []api.VolumeMount{}, []api.VolumeMount{}, labels)
+ ps := newStatefulSet(psName, ns, headlessSvcName, 3, []v1.VolumeMount{}, []v1.VolumeMount{}, labels)
_, err := c.Apps().StatefulSets(ns).Create(ps)
Expect(err).NotTo(HaveOccurred())
pst := statefulSetTester{c: c}
- pst.waitForRunningAndReady(ps.Spec.Replicas, ps)
+ pst.waitForRunningAndReady(*ps.Spec.Replicas, ps)
pod := pst.getPodList(ps).Items[0]
node, err := c.Core().Nodes().Get(pod.Spec.NodeName)
@@ -429,7 +430,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
}
By("waiting for pods to be running again")
- pst.waitForRunningAndReady(ps.Spec.Replicas, ps)
+ pst.waitForRunningAndReady(*ps.Spec.Replicas, ps)
})
})
@@ -438,7 +439,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
parallelism := int32(2)
completions := int32(4)
- job := newTestJob("notTerminate", "network-partition", api.RestartPolicyNever, parallelism, completions)
+ job := newTestJob("notTerminate", "network-partition", v1.RestartPolicyNever, parallelism, completions)
job, err := createJob(f.ClientSet, f.Namespace.Name, job)
Expect(err).NotTo(HaveOccurred())
label := labels.SelectorFromSet(labels.Set(map[string]string{jobSelectorKey: job.Name}))
@@ -448,7 +449,7 @@ var _ = framework.KubeDescribe("Network Partition [Disruptive] [Slow]", func() {
Expect(err).NotTo(HaveOccurred())
By("choose a node with at least one pod - we will block some network traffic on this node")
- options := api.ListOptions{LabelSelector: label}
+ options := v1.ListOptions{LabelSelector: label.String()}
pods, err := c.Core().Pods(ns).List(options) // list pods after all have been scheduled
Expect(err).NotTo(HaveOccurred())
nodeName := pods.Items[0].Spec.NodeName
diff --git a/test/e2e/networking_perf.go b/test/e2e/networking_perf.go
index e0dfab64ef5..55b67b3071b 100644
--- a/test/e2e/networking_perf.go
+++ b/test/e2e/networking_perf.go
@@ -24,7 +24,7 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
)
@@ -59,9 +59,9 @@ var _ = framework.KubeDescribe("Networking IPerf [Experimental] [Slow] [Feature:
8001,
8002,
appName,
- func(n api.Node) api.PodSpec {
- return api.PodSpec{
- Containers: []api.Container{{
+ func(n v1.Node) v1.PodSpec {
+ return v1.PodSpec{
+ Containers: []v1.Container{{
Name: "iperf-server",
Image: "gcr.io/google_containers/iperf:e2e",
Args: []string{
@@ -69,10 +69,10 @@ var _ = framework.KubeDescribe("Networking IPerf [Experimental] [Slow] [Feature:
"-c",
"/usr/local/bin/iperf -s -p 8001 ",
},
- Ports: []api.ContainerPort{{ContainerPort: 8001}},
+ Ports: []v1.ContainerPort{{ContainerPort: 8001}},
}},
NodeName: n.Name,
- RestartPolicy: api.RestartPolicyOnFailure,
+ RestartPolicy: v1.RestartPolicyOnFailure,
}
},
// this will be used to generate the -service name which all iperf clients point at.
@@ -86,9 +86,9 @@ var _ = framework.KubeDescribe("Networking IPerf [Experimental] [Slow] [Feature:
iperfClientPodLabels := f.CreatePodsPerNodeForSimpleApp(
"iperf-e2e-cli",
- func(n api.Node) api.PodSpec {
- return api.PodSpec{
- Containers: []api.Container{
+ func(n v1.Node) v1.PodSpec {
+ return v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "iperf-client",
Image: "gcr.io/google_containers/iperf:e2e",
@@ -99,7 +99,7 @@ var _ = framework.KubeDescribe("Networking IPerf [Experimental] [Slow] [Feature:
},
},
},
- RestartPolicy: api.RestartPolicyOnFailure, // let them successfully die.
+ RestartPolicy: v1.RestartPolicyOnFailure, // let them successfully die.
}
},
numClient,
@@ -121,7 +121,7 @@ var _ = framework.KubeDescribe("Networking IPerf [Experimental] [Slow] [Feature:
iperfClusterVerification := f.NewClusterVerification(
framework.PodStateVerification{
Selectors: iperfClientPodLabels,
- ValidPhases: []api.PodPhase{api.PodSucceeded},
+ ValidPhases: []v1.PodPhase{v1.PodSucceeded},
},
)
@@ -133,7 +133,7 @@ var _ = framework.KubeDescribe("Networking IPerf [Experimental] [Slow] [Feature:
} else {
// For each builds up a collection of IPerfRecords
iperfClusterVerification.ForEach(
- func(p api.Pod) {
+ func(p v1.Pod) {
resultS, err := framework.LookForStringInLog(f.Namespace.Name, p.Name, "iperf-client", "0-", 1*time.Second)
if err == nil {
framework.Logf(resultS)
diff --git a/test/e2e/node_problem_detector.go b/test/e2e/node_problem_detector.go
index 7c602742d58..d59227ed8c5 100644
--- a/test/e2e/node_problem_detector.go
+++ b/test/e2e/node_problem_detector.go
@@ -23,8 +23,9 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
- coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
+ coreclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/system"
@@ -54,7 +55,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
name = "node-problem-detector-" + uid
configName = "node-problem-detector-config-" + uid
// There is no namespace for Node, event recorder will set default namespace for node events.
- eventNamespace = api.NamespaceDefault
+ eventNamespace = v1.NamespaceDefault
})
// Test kernel monitor. We may add other tests if we have more problem daemons in the future.
@@ -63,7 +64,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
// Use test condition to avoid conflict with real node problem detector
// TODO(random-liu): Now node condition could be arbitrary string, consider wether we need to
// add TestCondition when switching to predefined condition list.
- condition = api.NodeConditionType("TestCondition")
+ condition = v1.NodeConditionType("TestCondition")
lookback = time.Hour // Assume the test won't take more than 1 hour, in fact it usually only takes 90 seconds.
startPattern = "test reboot"
@@ -88,8 +89,8 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
permMessage = "permanent error"
)
var source, config, tmpDir string
- var node *api.Node
- var eventListOptions api.ListOptions
+ var node *v1.Node
+ var eventListOptions v1.ListOptions
injectCommand := func(timestamp time.Time, log string, num int) string {
var commands []string
for i := 0; i < num; i++ {
@@ -132,11 +133,11 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
]
}`
By("Get a non master node to run the pod")
- nodes, err := c.Core().Nodes().List(api.ListOptions{})
+ nodes, err := c.Core().Nodes().List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
node = nil
for _, n := range nodes.Items {
- if !system.IsMasterNode(&n) {
+ if !system.IsMasterNode(n.Name) {
node = &n
break
}
@@ -146,70 +147,71 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
selector := fields.Set{
"involvedObject.kind": "Node",
"involvedObject.name": node.Name,
- "involvedObject.namespace": api.NamespaceAll,
+ "involvedObject.namespace": v1.NamespaceAll,
"source": source,
- }.AsSelector()
- eventListOptions = api.ListOptions{FieldSelector: selector}
+ }.AsSelector().String()
+ eventListOptions = v1.ListOptions{FieldSelector: selector}
By("Create the test log file")
tmpDir = "/tmp/" + name
cmd := fmt.Sprintf("mkdir %s; > %s/%s", tmpDir, tmpDir, logFile)
Expect(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node)).To(Succeed())
By("Create config map for the node problem detector")
- _, err = c.Core().ConfigMaps(ns).Create(&api.ConfigMap{
- ObjectMeta: api.ObjectMeta{
+ _, err = c.Core().ConfigMaps(ns).Create(&v1.ConfigMap{
+ ObjectMeta: v1.ObjectMeta{
Name: configName,
},
Data: map[string]string{configFile: config},
})
Expect(err).NotTo(HaveOccurred())
By("Create the node problem detector")
- _, err = c.Core().Pods(ns).Create(&api.Pod{
- ObjectMeta: api.ObjectMeta{
+ _, err = c.Core().Pods(ns).Create(&v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
- Spec: api.PodSpec{
+ Spec: v1.PodSpec{
NodeName: node.Name,
- SecurityContext: &api.PodSecurityContext{HostNetwork: true},
- Volumes: []api.Volume{
+ HostNetwork: true,
+ SecurityContext: &v1.PodSecurityContext{},
+ Volumes: []v1.Volume{
{
Name: configVolume,
- VolumeSource: api.VolumeSource{
- ConfigMap: &api.ConfigMapVolumeSource{
- LocalObjectReference: api.LocalObjectReference{Name: configName},
+ VolumeSource: v1.VolumeSource{
+ ConfigMap: &v1.ConfigMapVolumeSource{
+ LocalObjectReference: v1.LocalObjectReference{Name: configName},
},
},
},
{
Name: logVolume,
- VolumeSource: api.VolumeSource{
- HostPath: &api.HostPathVolumeSource{Path: tmpDir},
+ VolumeSource: v1.VolumeSource{
+ HostPath: &v1.HostPathVolumeSource{Path: tmpDir},
},
},
{
Name: localtimeVolume,
- VolumeSource: api.VolumeSource{
- HostPath: &api.HostPathVolumeSource{Path: etcLocaltime},
+ VolumeSource: v1.VolumeSource{
+ HostPath: &v1.HostPathVolumeSource{Path: etcLocaltime},
},
},
},
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: name,
Image: image,
Command: []string{"/node-problem-detector", "--kernel-monitor=" + filepath.Join(configDir, configFile)},
- ImagePullPolicy: api.PullAlways,
- Env: []api.EnvVar{
+ ImagePullPolicy: v1.PullAlways,
+ Env: []v1.EnvVar{
{
Name: "NODE_NAME",
- ValueFrom: &api.EnvVarSource{
- FieldRef: &api.ObjectFieldSelector{
+ ValueFrom: &v1.EnvVarSource{
+ FieldRef: &v1.ObjectFieldSelector{
APIVersion: "v1",
FieldPath: "spec.nodeName",
},
},
},
},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: logVolume,
MountPath: logDir,
@@ -248,13 +250,13 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
events int
conditionReason string
conditionMessage string
- conditionType api.ConditionStatus
+ conditionType v1.ConditionStatus
}{
{
description: "should generate default node condition",
conditionReason: defaultReason,
conditionMessage: defaultMessage,
- conditionType: api.ConditionFalse,
+ conditionType: v1.ConditionFalse,
},
{
description: "should not generate events for too old log",
@@ -263,7 +265,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
messageNum: 3,
conditionReason: defaultReason,
conditionMessage: defaultMessage,
- conditionType: api.ConditionFalse,
+ conditionType: v1.ConditionFalse,
},
{
description: "should not change node condition for too old log",
@@ -272,7 +274,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
messageNum: 1,
conditionReason: defaultReason,
conditionMessage: defaultMessage,
- conditionType: api.ConditionFalse,
+ conditionType: v1.ConditionFalse,
},
{
description: "should generate event for old log within lookback duration",
@@ -282,7 +284,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
events: 3,
conditionReason: defaultReason,
conditionMessage: defaultMessage,
- conditionType: api.ConditionFalse,
+ conditionType: v1.ConditionFalse,
},
{
description: "should change node condition for old log within lookback duration",
@@ -292,7 +294,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
events: 3, // event number should not change
conditionReason: permReason,
conditionMessage: permMessage,
- conditionType: api.ConditionTrue,
+ conditionType: v1.ConditionTrue,
},
{
description: "should reset node condition if the node is reboot",
@@ -302,7 +304,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
events: 3, // event number should not change
conditionReason: defaultReason,
conditionMessage: defaultMessage,
- conditionType: api.ConditionFalse,
+ conditionType: v1.ConditionFalse,
},
{
description: "should generate event for new log",
@@ -312,7 +314,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
events: 6,
conditionReason: defaultReason,
conditionMessage: defaultMessage,
- conditionType: api.ConditionFalse,
+ conditionType: v1.ConditionFalse,
},
{
description: "should change node condition for new log",
@@ -322,7 +324,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
events: 6, // event number should not change
conditionReason: permReason,
conditionMessage: permMessage,
- conditionType: api.ConditionTrue,
+ conditionType: v1.ConditionTrue,
},
} {
By(test.description)
@@ -360,13 +362,13 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
framework.Logf("Node Problem Detector logs:\n %s", log)
}
By("Delete the node problem detector")
- c.Core().Pods(ns).Delete(name, api.NewDeleteOptions(0))
+ c.Core().Pods(ns).Delete(name, v1.NewDeleteOptions(0))
By("Wait for the node problem detector to disappear")
Expect(framework.WaitForPodToDisappear(c, ns, name, labels.Everything(), pollInterval, pollTimeout)).To(Succeed())
By("Delete the config map")
c.Core().ConfigMaps(ns).Delete(configName, nil)
By("Clean up the events")
- Expect(c.Core().Events(eventNamespace).DeleteCollection(api.NewDeleteOptions(0), eventListOptions)).To(Succeed())
+ Expect(c.Core().Events(eventNamespace).DeleteCollection(v1.NewDeleteOptions(0), eventListOptions)).To(Succeed())
By("Clean up the node condition")
patch := []byte(fmt.Sprintf(`{"status":{"conditions":[{"$patch":"delete","type":"%s"}]}}`, condition))
c.Core().RESTClient().Patch(api.StrategicMergePatchType).Resource("nodes").Name(node.Name).SubResource("status").Body(patch).Do()
@@ -377,7 +379,7 @@ var _ = framework.KubeDescribe("NodeProblemDetector", func() {
})
// verifyEvents verifies there are num specific events generated
-func verifyEvents(e coreclientset.EventInterface, options api.ListOptions, num int, reason, message string) error {
+func verifyEvents(e coreclientset.EventInterface, options v1.ListOptions, num int, reason, message string) error {
events, err := e.List(options)
if err != nil {
return err
@@ -396,7 +398,7 @@ func verifyEvents(e coreclientset.EventInterface, options api.ListOptions, num i
}
// verifyNoEvents verifies there is no event generated
-func verifyNoEvents(e coreclientset.EventInterface, options api.ListOptions) error {
+func verifyNoEvents(e coreclientset.EventInterface, options v1.ListOptions) error {
events, err := e.List(options)
if err != nil {
return err
@@ -408,12 +410,12 @@ func verifyNoEvents(e coreclientset.EventInterface, options api.ListOptions) err
}
// verifyCondition verifies specific node condition is generated, if reason and message are empty, they will not be checked
-func verifyCondition(n coreclientset.NodeInterface, nodeName string, condition api.NodeConditionType, status api.ConditionStatus, reason, message string) error {
+func verifyCondition(n coreclientset.NodeInterface, nodeName string, condition v1.NodeConditionType, status v1.ConditionStatus, reason, message string) error {
node, err := n.Get(nodeName)
if err != nil {
return err
}
- _, c := api.GetNodeCondition(&node.Status, condition)
+ _, c := v1.GetNodeCondition(&node.Status, condition)
if c == nil {
return fmt.Errorf("node condition %q not found", condition)
}
diff --git a/test/e2e/nodeoutofdisk.go b/test/e2e/nodeoutofdisk.go
index 9c95861b640..eafbeb22b8e 100644
--- a/test/e2e/nodeoutofdisk.go
+++ b/test/e2e/nodeoutofdisk.go
@@ -22,9 +22,9 @@ import (
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
@@ -136,10 +136,10 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
"involvedObject.kind": "Pod",
"involvedObject.name": pendingPodName,
"involvedObject.namespace": ns,
- "source": api.DefaultSchedulerName,
+ "source": v1.DefaultSchedulerName,
"reason": "FailedScheduling",
- }.AsSelector()
- options := api.ListOptions{FieldSelector: selector}
+ }.AsSelector().String()
+ options := v1.ListOptions{FieldSelector: selector}
schedEvents, err := c.Core().Events(ns).List(options)
framework.ExpectNoError(err)
@@ -171,19 +171,19 @@ var _ = framework.KubeDescribe("NodeOutOfDisk [Serial] [Flaky] [Disruptive]", fu
func createOutOfDiskPod(c clientset.Interface, ns, name string, milliCPU int64) {
podClient := c.Core().Pods(ns)
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "pause",
Image: framework.GetPauseImageName(c),
- Resources: api.ResourceRequirements{
- Requests: api.ResourceList{
+ Resources: v1.ResourceRequirements{
+ Requests: v1.ResourceList{
// Request enough CPU to fit only two pods on a given node.
- api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
+ v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
},
},
},
@@ -197,11 +197,11 @@ func createOutOfDiskPod(c clientset.Interface, ns, name string, milliCPU int64)
// availCpu calculates the available CPU on a given node by subtracting the CPU requested by
// all the pods from the total available CPU capacity on the node.
-func availCpu(c clientset.Interface, node *api.Node) (int64, error) {
- podClient := c.Core().Pods(api.NamespaceAll)
+func availCpu(c clientset.Interface, node *v1.Node) (int64, error) {
+ podClient := c.Core().Pods(v1.NamespaceAll)
- selector := fields.Set{"spec.nodeName": node.Name}.AsSelector()
- options := api.ListOptions{FieldSelector: selector}
+ selector := fields.Set{"spec.nodeName": node.Name}.AsSelector().String()
+ options := v1.ListOptions{FieldSelector: selector}
pods, err := podClient.List(options)
if err != nil {
return 0, fmt.Errorf("failed to retrieve all the pods on node %s: %v", node.Name, err)
@@ -217,7 +217,7 @@ func availCpu(c clientset.Interface, node *api.Node) (int64, error) {
// availSize returns the available disk space on a given node by querying node stats which
// is in turn obtained internally from cadvisor.
-func availSize(c clientset.Interface, node *api.Node) (uint64, error) {
+func availSize(c clientset.Interface, node *v1.Node) (uint64, error) {
statsResource := fmt.Sprintf("api/v1/proxy/nodes/%s/stats/", node.Name)
framework.Logf("Querying stats for node %s using url %s", node.Name, statsResource)
res, err := c.Core().RESTClient().Get().AbsPath(statsResource).Timeout(time.Minute).Do().Raw()
@@ -235,7 +235,7 @@ func availSize(c clientset.Interface, node *api.Node) (uint64, error) {
// fillDiskSpace fills the available disk space on a given node by creating a large file. The disk
// space on the node is filled in such a way that the available space after filling the disk is just
// below the lowDiskSpaceThreshold mark.
-func fillDiskSpace(c clientset.Interface, node *api.Node) {
+func fillDiskSpace(c clientset.Interface, node *v1.Node) {
avail, err := availSize(c, node)
framework.ExpectNoError(err, "Node %s: couldn't obtain available disk size %v", node.Name, err)
@@ -247,7 +247,7 @@ func fillDiskSpace(c clientset.Interface, node *api.Node) {
cmd := fmt.Sprintf("fallocate -l %d test.img", fillSize)
framework.ExpectNoError(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node))
- ood := framework.WaitForNodeToBe(c, node.Name, api.NodeOutOfDisk, true, nodeOODTimeOut)
+ ood := framework.WaitForNodeToBe(c, node.Name, v1.NodeOutOfDisk, true, nodeOODTimeOut)
Expect(ood).To(BeTrue(), "Node %s did not run out of disk within %v", node.Name, nodeOODTimeOut)
avail, err = availSize(c, node)
@@ -256,11 +256,11 @@ func fillDiskSpace(c clientset.Interface, node *api.Node) {
}
// recoverDiskSpace recovers disk space, filled by creating a large file, on a given node.
-func recoverDiskSpace(c clientset.Interface, node *api.Node) {
+func recoverDiskSpace(c clientset.Interface, node *v1.Node) {
By(fmt.Sprintf("Recovering disk space on node %s", node.Name))
cmd := "rm -f test.img"
framework.ExpectNoError(framework.IssueSSHCommand(cmd, framework.TestContext.Provider, node))
- ood := framework.WaitForNodeToBe(c, node.Name, api.NodeOutOfDisk, false, nodeOODTimeOut)
+ ood := framework.WaitForNodeToBe(c, node.Name, v1.NodeOutOfDisk, false, nodeOODTimeOut)
Expect(ood).To(BeTrue(), "Node %s's out of disk condition status did not change to false within %v", node.Name, nodeOODTimeOut)
}
diff --git a/test/e2e/opaque_resource.go b/test/e2e/opaque_resource.go
index 28750309158..af40015f1ea 100644
--- a/test/e2e/opaque_resource.go
+++ b/test/e2e/opaque_resource.go
@@ -24,6 +24,7 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/runtime"
@@ -38,16 +39,16 @@ import (
var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", func() {
f := framework.NewDefaultFramework("opaque-resource")
- opaqueResName := api.OpaqueIntResourceName("foo")
- var node *api.Node
+ opaqueResName := v1.OpaqueIntResourceName("foo")
+ var node *v1.Node
BeforeEach(func() {
if node == nil {
// Priming invocation; select the first non-master node.
- nodes, err := f.ClientSet.Core().Nodes().List(api.ListOptions{})
+ nodes, err := f.ClientSet.Core().Nodes().List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, n := range nodes.Items {
- if !system.IsMasterNode(&n) {
+ if !system.IsMasterNode(n.Name) {
node = &n
break
}
@@ -63,8 +64,8 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
It("should not break pods that do not consume opaque integer resources.", func() {
By("Creating a vanilla pod")
- requests := api.ResourceList{api.ResourceCPU: resource.MustParse("0.1")}
- limits := api.ResourceList{api.ResourceCPU: resource.MustParse("0.2")}
+ requests := v1.ResourceList{v1.ResourceCPU: resource.MustParse("0.1")}
+ limits := v1.ResourceList{v1.ResourceCPU: resource.MustParse("0.2")}
pod := newTestPod(f, "without-oir", requests, limits)
By("Observing an event that indicates the pod was scheduled")
@@ -72,8 +73,8 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
return err
}
- predicate := func(e *api.Event) bool {
- return e.Type == api.EventTypeNormal &&
+ predicate := func(e *v1.Event) bool {
+ return e.Type == v1.EventTypeNormal &&
e.Reason == "Scheduled" &&
// Here we don't check for the bound node name since it can land on
// any one (this pod doesn't require any of the opaque resource.)
@@ -86,13 +87,13 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
It("should schedule pods that do consume opaque integer resources.", func() {
By("Creating a pod that requires less of the opaque resource than is allocatable on a node.")
- requests := api.ResourceList{
- api.ResourceCPU: resource.MustParse("0.1"),
- opaqueResName: resource.MustParse("1"),
+ requests := v1.ResourceList{
+ v1.ResourceCPU: resource.MustParse("0.1"),
+ opaqueResName: resource.MustParse("1"),
}
- limits := api.ResourceList{
- api.ResourceCPU: resource.MustParse("0.2"),
- opaqueResName: resource.MustParse("2"),
+ limits := v1.ResourceList{
+ v1.ResourceCPU: resource.MustParse("0.2"),
+ opaqueResName: resource.MustParse("2"),
}
pod := newTestPod(f, "min-oir", requests, limits)
@@ -101,8 +102,8 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
return err
}
- predicate := func(e *api.Event) bool {
- return e.Type == api.EventTypeNormal &&
+ predicate := func(e *v1.Event) bool {
+ return e.Type == v1.EventTypeNormal &&
e.Reason == "Scheduled" &&
strings.Contains(e.Message, fmt.Sprintf("Successfully assigned %v to %v", pod.Name, node.Name))
}
@@ -113,15 +114,15 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
It("should not schedule pods that exceed the available amount of opaque integer resource.", func() {
By("Creating a pod that requires more of the opaque resource than is allocatable on any node")
- requests := api.ResourceList{opaqueResName: resource.MustParse("6")}
- limits := api.ResourceList{}
+ requests := v1.ResourceList{opaqueResName: resource.MustParse("6")}
+ limits := v1.ResourceList{}
By("Observing an event that indicates the pod was not scheduled")
action := func() error {
_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(newTestPod(f, "over-max-oir", requests, limits))
return err
}
- predicate := func(e *api.Event) bool {
+ predicate := func(e *v1.Event) bool {
return e.Type == "Warning" &&
e.Reason == "FailedScheduling" &&
strings.Contains(e.Message, "failed to fit in any node")
@@ -133,20 +134,20 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
It("should account opaque integer resources in pods with multiple containers.", func() {
By("Creating a pod with two containers that together require less of the opaque resource than is allocatable on a node")
- requests := api.ResourceList{opaqueResName: resource.MustParse("1")}
- limits := api.ResourceList{}
+ requests := v1.ResourceList{opaqueResName: resource.MustParse("1")}
+ limits := v1.ResourceList{}
image := framework.GetPauseImageName(f.ClientSet)
// This pod consumes 2 "foo" resources.
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "mult-container-oir",
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "pause",
Image: image,
- Resources: api.ResourceRequirements{
+ Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
@@ -154,7 +155,7 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
{
Name: "pause-sidecar",
Image: image,
- Resources: api.ResourceRequirements{
+ Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
@@ -168,8 +169,8 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
_, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
return err
}
- predicate := func(e *api.Event) bool {
- return e.Type == api.EventTypeNormal &&
+ predicate := func(e *v1.Event) bool {
+ return e.Type == v1.EventTypeNormal &&
e.Reason == "Scheduled" &&
strings.Contains(e.Message, fmt.Sprintf("Successfully assigned %v to %v", pod.Name, node.Name))
}
@@ -178,19 +179,19 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
Expect(success).To(Equal(true))
By("Creating a pod with two containers that together require more of the opaque resource than is allocatable on any node")
- requests = api.ResourceList{opaqueResName: resource.MustParse("3")}
- limits = api.ResourceList{}
+ requests = v1.ResourceList{opaqueResName: resource.MustParse("3")}
+ limits = v1.ResourceList{}
// This pod consumes 6 "foo" resources.
- pod = &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod = &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "mult-container-over-max-oir",
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "pause",
Image: image,
- Resources: api.ResourceRequirements{
+ Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
@@ -198,7 +199,7 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
{
Name: "pause-sidecar",
Image: image,
- Resources: api.ResourceRequirements{
+ Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
@@ -212,7 +213,7 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
_, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
return err
}
- predicate = func(e *api.Event) bool {
+ predicate = func(e *v1.Event) bool {
return e.Type == "Warning" &&
e.Reason == "FailedScheduling" &&
strings.Contains(e.Message, "failed to fit in any node")
@@ -224,12 +225,12 @@ var _ = framework.KubeDescribe("Opaque resources [Feature:OpaqueResources]", fun
})
// Adds the opaque resource to a node.
-func addOpaqueResource(f *framework.Framework, nodeName string, opaqueResName api.ResourceName) {
+func addOpaqueResource(f *framework.Framework, nodeName string, opaqueResName v1.ResourceName) {
action := func() error {
patch := []byte(fmt.Sprintf(`[{"op": "add", "path": "/status/capacity/%s", "value": "5"}]`, escapeForJSONPatch(opaqueResName)))
return f.ClientSet.Core().RESTClient().Patch(api.JSONPatchType).Resource("nodes").Name(nodeName).SubResource("status").Body(patch).Do().Error()
}
- predicate := func(n *api.Node) bool {
+ predicate := func(n *v1.Node) bool {
capacity, foundCap := n.Status.Capacity[opaqueResName]
allocatable, foundAlloc := n.Status.Allocatable[opaqueResName]
return foundCap && capacity.MilliValue() == int64(5000) &&
@@ -241,13 +242,13 @@ func addOpaqueResource(f *framework.Framework, nodeName string, opaqueResName ap
}
// Removes the opaque resource from a node.
-func removeOpaqueResource(f *framework.Framework, nodeName string, opaqueResName api.ResourceName) {
+func removeOpaqueResource(f *framework.Framework, nodeName string, opaqueResName v1.ResourceName) {
action := func() error {
patch := []byte(fmt.Sprintf(`[{"op": "remove", "path": "/status/capacity/%s"}]`, escapeForJSONPatch(opaqueResName)))
f.ClientSet.Core().RESTClient().Patch(api.JSONPatchType).Resource("nodes").Name(nodeName).SubResource("status").Body(patch).Do()
return nil // Ignore error -- the opaque resource may not exist.
}
- predicate := func(n *api.Node) bool {
+ predicate := func(n *v1.Node) bool {
_, foundCap := n.Status.Capacity[opaqueResName]
_, foundAlloc := n.Status.Allocatable[opaqueResName]
return !foundCap && !foundAlloc
@@ -257,7 +258,7 @@ func removeOpaqueResource(f *framework.Framework, nodeName string, opaqueResName
Expect(success).To(Equal(true))
}
-func escapeForJSONPatch(resName api.ResourceName) string {
+func escapeForJSONPatch(resName v1.ResourceName) string {
// Escape forward slashes in the resource name per the JSON Pointer spec.
// See https://tools.ietf.org/html/rfc6901#section-3
return strings.Replace(string(resName), "/", "~1", -1)
@@ -265,7 +266,7 @@ func escapeForJSONPatch(resName api.ResourceName) string {
// Returns true if a node update matching the predicate was emitted from the
// system after performing the supplied action.
-func observeNodeUpdateAfterAction(f *framework.Framework, nodeName string, nodePredicate func(*api.Node) bool, action func() error) (bool, error) {
+func observeNodeUpdateAfterAction(f *framework.Framework, nodeName string, nodePredicate func(*v1.Node) bool, action func() error) (bool, error) {
observedMatchingNode := false
nodeSelector := fields.OneTermEqualSelector("metadata.name", nodeName)
informerStartedChan := make(chan struct{})
@@ -273,24 +274,24 @@ func observeNodeUpdateAfterAction(f *framework.Framework, nodeName string, nodeP
_, controller := cache.NewInformer(
&cache.ListWatch{
- ListFunc: func(options api.ListOptions) (runtime.Object, error) {
- options.FieldSelector = nodeSelector
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ options.FieldSelector = nodeSelector.String()
ls, err := f.ClientSet.Core().Nodes().List(options)
return ls, err
},
- WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
- options.FieldSelector = nodeSelector
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ options.FieldSelector = nodeSelector.String()
w, err := f.ClientSet.Core().Nodes().Watch(options)
// Signal parent goroutine that watching has begun.
informerStartedGuard.Do(func() { close(informerStartedChan) })
return w, err
},
},
- &api.Node{},
+ &v1.Node{},
0,
cache.ResourceEventHandlerFuncs{
UpdateFunc: func(oldObj, newObj interface{}) {
- n, ok := newObj.(*api.Node)
+ n, ok := newObj.(*v1.Node)
Expect(ok).To(Equal(true))
if nodePredicate(n) {
observedMatchingNode = true
@@ -323,26 +324,26 @@ func observeNodeUpdateAfterAction(f *framework.Framework, nodeName string, nodeP
// Returns true if an event matching the predicate was emitted from the system
// after performing the supplied action.
-func observeEventAfterAction(f *framework.Framework, eventPredicate func(*api.Event) bool, action func() error) (bool, error) {
+func observeEventAfterAction(f *framework.Framework, eventPredicate func(*v1.Event) bool, action func() error) (bool, error) {
observedMatchingEvent := false
// Create an informer to list/watch events from the test framework namespace.
_, controller := cache.NewInformer(
&cache.ListWatch{
- ListFunc: func(options api.ListOptions) (runtime.Object, error) {
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
ls, err := f.ClientSet.Core().Events(f.Namespace.Name).List(options)
return ls, err
},
- WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
w, err := f.ClientSet.Core().Events(f.Namespace.Name).Watch(options)
return w, err
},
},
- &api.Event{},
+ &v1.Event{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
- e, ok := obj.(*api.Event)
+ e, ok := obj.(*v1.Event)
By(fmt.Sprintf("Considering event: \nType = [%s], Reason = [%s], Message = [%s]", e.Type, e.Reason, e.Message))
Expect(ok).To(Equal(true))
if ok && eventPredicate(e) {
diff --git a/test/e2e/pd.go b/test/e2e/pd.go
index 7f1a70c14d2..5173612fa8f 100644
--- a/test/e2e/pd.go
+++ b/test/e2e/pd.go
@@ -30,11 +30,11 @@ import (
"github.com/aws/aws-sdk-go/service/ec2"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
- unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
+ v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
awscloud "k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/types"
@@ -54,8 +54,8 @@ const (
var _ = framework.KubeDescribe("Pod Disks", func() {
var (
- podClient unversionedcore.PodInterface
- nodeClient unversionedcore.NodeInterface
+ podClient v1core.PodInterface
+ nodeClient v1core.NodeInterface
host0Name types.NodeName
host1Name types.NodeName
)
@@ -91,8 +91,8 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
// Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed.
By("cleaning up PD-RW test environment")
- podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
- podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0))
+ podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0))
+ podClient.Delete(host1Pod.Name, v1.NewDeleteOptions(0))
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
}()
@@ -113,7 +113,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
By("deleting host0Pod")
// Delete pod with 0 grace period
- framework.ExpectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
+ framework.ExpectNoError(podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")
By("submitting host1Pod to kubernetes")
_, err = podClient.Create(host1Pod)
@@ -131,7 +131,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */))
By("deleting host1Pod")
- framework.ExpectNoError(podClient.Delete(host1Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host1Pod")
+ framework.ExpectNoError(podClient.Delete(host1Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host1Pod")
By("Test completed successfully, waiting for PD to safely detach")
waitForPDDetach(diskName, host0Name)
@@ -155,8 +155,8 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
// Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed.
By("cleaning up PD-RW test environment")
- podClient.Delete(host0Pod.Name, &api.DeleteOptions{})
- podClient.Delete(host1Pod.Name, &api.DeleteOptions{})
+ podClient.Delete(host0Pod.Name, &v1.DeleteOptions{})
+ podClient.Delete(host1Pod.Name, &v1.DeleteOptions{})
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
}()
@@ -177,7 +177,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
By("deleting host0Pod")
// Delete pod with default grace period 30s
- framework.ExpectNoError(podClient.Delete(host0Pod.Name, &api.DeleteOptions{}), "Failed to delete host0Pod")
+ framework.ExpectNoError(podClient.Delete(host0Pod.Name, &v1.DeleteOptions{}), "Failed to delete host0Pod")
By("submitting host1Pod to kubernetes")
_, err = podClient.Create(host1Pod)
@@ -195,7 +195,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(waitForPDInVolumesInUse(nodeClient, diskName, host0Name, nodeStatusTimeout, false /* shouldExist */))
By("deleting host1Pod")
- framework.ExpectNoError(podClient.Delete(host1Pod.Name, &api.DeleteOptions{}), "Failed to delete host1Pod")
+ framework.ExpectNoError(podClient.Delete(host1Pod.Name, &v1.DeleteOptions{}), "Failed to delete host1Pod")
By("Test completed successfully, waiting for PD to safely detach")
waitForPDDetach(diskName, host0Name)
@@ -219,9 +219,9 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
By("cleaning up PD-RO test environment")
// Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed.
- podClient.Delete(rwPod.Name, api.NewDeleteOptions(0))
- podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0))
- podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0))
+ podClient.Delete(rwPod.Name, v1.NewDeleteOptions(0))
+ podClient.Delete(host0ROPod.Name, v1.NewDeleteOptions(0))
+ podClient.Delete(host1ROPod.Name, v1.NewDeleteOptions(0))
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
}()
@@ -230,7 +230,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(err, "Failed to create rwPod")
framework.ExpectNoError(f.WaitForPodRunningSlow(rwPod.Name))
// Delete pod with 0 grace period
- framework.ExpectNoError(podClient.Delete(rwPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
+ framework.ExpectNoError(podClient.Delete(rwPod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")
framework.ExpectNoError(waitForPDDetach(diskName, host0Name))
By("submitting host0ROPod to kubernetes")
@@ -246,10 +246,10 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(f.WaitForPodRunningSlow(host1ROPod.Name))
By("deleting host0ROPod")
- framework.ExpectNoError(podClient.Delete(host0ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host0ROPod")
+ framework.ExpectNoError(podClient.Delete(host0ROPod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0ROPod")
By("deleting host1ROPod")
- framework.ExpectNoError(podClient.Delete(host1ROPod.Name, api.NewDeleteOptions(0)), "Failed to delete host1ROPod")
+ framework.ExpectNoError(podClient.Delete(host1ROPod.Name, v1.NewDeleteOptions(0)), "Failed to delete host1ROPod")
By("Test completed successfully, waiting for PD to safely detach")
waitForPDDetach(diskName, host0Name)
@@ -271,9 +271,9 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
By("cleaning up PD-RO test environment")
// Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed.
- podClient.Delete(rwPod.Name, &api.DeleteOptions{})
- podClient.Delete(host0ROPod.Name, &api.DeleteOptions{})
- podClient.Delete(host1ROPod.Name, &api.DeleteOptions{})
+ podClient.Delete(rwPod.Name, &v1.DeleteOptions{})
+ podClient.Delete(host0ROPod.Name, &v1.DeleteOptions{})
+ podClient.Delete(host1ROPod.Name, &v1.DeleteOptions{})
detachAndDeletePDs(diskName, []types.NodeName{host0Name, host1Name})
}()
@@ -282,7 +282,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(err, "Failed to create rwPod")
framework.ExpectNoError(f.WaitForPodRunningSlow(rwPod.Name))
// Delete pod with default grace period 30s
- framework.ExpectNoError(podClient.Delete(rwPod.Name, &api.DeleteOptions{}), "Failed to delete host0Pod")
+ framework.ExpectNoError(podClient.Delete(rwPod.Name, &v1.DeleteOptions{}), "Failed to delete host0Pod")
framework.ExpectNoError(waitForPDDetach(diskName, host0Name))
By("submitting host0ROPod to kubernetes")
@@ -298,10 +298,10 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
framework.ExpectNoError(f.WaitForPodRunningSlow(host1ROPod.Name))
By("deleting host0ROPod")
- framework.ExpectNoError(podClient.Delete(host0ROPod.Name, &api.DeleteOptions{}), "Failed to delete host0ROPod")
+ framework.ExpectNoError(podClient.Delete(host0ROPod.Name, &v1.DeleteOptions{}), "Failed to delete host0ROPod")
By("deleting host1ROPod")
- framework.ExpectNoError(podClient.Delete(host1ROPod.Name, &api.DeleteOptions{}), "Failed to delete host1ROPod")
+ framework.ExpectNoError(podClient.Delete(host1ROPod.Name, &v1.DeleteOptions{}), "Failed to delete host1ROPod")
By("Test completed successfully, waiting for PD to safely detach")
waitForPDDetach(diskName, host0Name)
@@ -315,14 +315,14 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
diskName, err := createPDWithRetry()
framework.ExpectNoError(err, "Error creating PD")
numContainers := 4
- var host0Pod *api.Pod
+ var host0Pod *v1.Pod
defer func() {
By("cleaning up PD-RW test environment")
// Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed.
if host0Pod != nil {
- podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
+ podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0))
}
detachAndDeletePDs(diskName, []types.NodeName{host0Name})
}()
@@ -354,7 +354,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify)
By("deleting host0Pod")
- framework.ExpectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
+ framework.ExpectNoError(podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")
}
By("Test completed successfully, waiting for PD to safely detach")
@@ -370,14 +370,14 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
By("creating PD2")
disk2Name, err := createPDWithRetry()
framework.ExpectNoError(err, "Error creating PD2")
- var host0Pod *api.Pod
+ var host0Pod *v1.Pod
defer func() {
By("cleaning up PD-RW test environment")
// Teardown pods, PD. Ignore errors.
// Teardown should do nothing unless test failed.
if host0Pod != nil {
- podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0))
+ podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0))
}
detachAndDeletePDs(disk1Name, []types.NodeName{host0Name})
detachAndDeletePDs(disk2Name, []types.NodeName{host0Name})
@@ -413,7 +413,7 @@ var _ = framework.KubeDescribe("Pod Disks", func() {
verifyPDContentsViaContainer(f, host0Pod.Name, containerName, fileAndContentToVerify)
By("deleting host0Pod")
- framework.ExpectNoError(podClient.Delete(host0Pod.Name, api.NewDeleteOptions(0)), "Failed to delete host0Pod")
+ framework.ExpectNoError(podClient.Delete(host0Pod.Name, v1.NewDeleteOptions(0)), "Failed to delete host0Pod")
}
By("Test completed successfully, waiting for PD to safely detach")
@@ -590,8 +590,8 @@ func detachPD(nodeName types.NodeName, pdName string) error {
}
}
-func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, numContainers int) *api.Pod {
- containers := make([]api.Container, numContainers)
+func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, numContainers int) *v1.Pod {
+ containers := make([]v1.Container, numContainers)
for i := range containers {
containers[i].Name = "mycontainer"
if numContainers > 1 {
@@ -602,37 +602,37 @@ func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, num
containers[i].Command = []string{"sleep", "6000"}
- containers[i].VolumeMounts = make([]api.VolumeMount, len(diskNames))
+ containers[i].VolumeMounts = make([]v1.VolumeMount, len(diskNames))
for k := range diskNames {
containers[i].VolumeMounts[k].Name = fmt.Sprintf("testpd%v", k+1)
containers[i].VolumeMounts[k].MountPath = fmt.Sprintf("/testpd%v", k+1)
}
- containers[i].Resources.Limits = api.ResourceList{}
- containers[i].Resources.Limits[api.ResourceCPU] = *resource.NewQuantity(int64(0), resource.DecimalSI)
+ containers[i].Resources.Limits = v1.ResourceList{}
+ containers[i].Resources.Limits[v1.ResourceCPU] = *resource.NewQuantity(int64(0), resource.DecimalSI)
}
- pod := &api.Pod{
+ pod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
- APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
+ APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: "pd-test-" + string(uuid.NewUUID()),
},
- Spec: api.PodSpec{
+ Spec: v1.PodSpec{
Containers: containers,
NodeName: string(targetNode),
},
}
if framework.TestContext.Provider == "gce" || framework.TestContext.Provider == "gke" {
- pod.Spec.Volumes = make([]api.Volume, len(diskNames))
+ pod.Spec.Volumes = make([]v1.Volume, len(diskNames))
for k, diskName := range diskNames {
pod.Spec.Volumes[k].Name = fmt.Sprintf("testpd%v", k+1)
- pod.Spec.Volumes[k].VolumeSource = api.VolumeSource{
- GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
+ pod.Spec.Volumes[k].VolumeSource = v1.VolumeSource{
+ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: diskName,
FSType: "ext4",
ReadOnly: readOnly,
@@ -640,11 +640,11 @@ func testPDPod(diskNames []string, targetNode types.NodeName, readOnly bool, num
}
}
} else if framework.TestContext.Provider == "aws" {
- pod.Spec.Volumes = make([]api.Volume, len(diskNames))
+ pod.Spec.Volumes = make([]v1.Volume, len(diskNames))
for k, diskName := range diskNames {
pod.Spec.Volumes[k].Name = fmt.Sprintf("testpd%v", k+1)
- pod.Spec.Volumes[k].VolumeSource = api.VolumeSource{
- AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{
+ pod.Spec.Volumes[k].VolumeSource = v1.VolumeSource{
+ AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: diskName,
FSType: "ext4",
ReadOnly: readOnly,
@@ -711,7 +711,7 @@ func detachAndDeletePDs(diskName string, hosts []types.NodeName) {
}
func waitForPDInVolumesInUse(
- nodeClient unversionedcore.NodeInterface,
+ nodeClient v1core.NodeInterface,
diskName string,
nodeName types.NodeName,
timeout time.Duration,
diff --git a/test/e2e/persistent_volumes.go b/test/e2e/persistent_volumes.go
index 80c47db1c8a..9f65df8ec1b 100644
--- a/test/e2e/persistent_volumes.go
+++ b/test/e2e/persistent_volumes.go
@@ -22,12 +22,12 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- "k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
"k8s.io/kubernetes/test/e2e/framework"
@@ -97,7 +97,7 @@ func pvPvcCleanup(c clientset.Interface, ns string, pvols pvmap, claims pvcmap)
// Delete the PVC and wait for the PV to become Available again. Validate that the PV
// has recycled (assumption here about reclaimPolicy). Caller tells this func which
// phase value to expect for the pv bound to the to-be-deleted claim.
-func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *api.PersistentVolumeClaim, pv *api.PersistentVolume, expctPVPhase api.PersistentVolumePhase) {
+func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim, pv *v1.PersistentVolume, expctPVPhase v1.PersistentVolumePhase) {
pvname := pvc.Spec.VolumeName
framework.Logf("Deleting PVC %v to trigger recycling of PV %v", pvc.Name, pvname)
@@ -118,11 +118,11 @@ func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *api.Persisten
pv, err = c.Core().PersistentVolumes().Get(pv.Name)
Expect(err).NotTo(HaveOccurred())
cr := pv.Spec.ClaimRef
- if expctPVPhase == api.VolumeAvailable {
+ if expctPVPhase == v1.VolumeAvailable {
if cr != nil { // may be ok if cr != nil
Expect(len(cr.UID)).To(BeZero())
}
- } else if expctPVPhase == api.VolumeBound {
+ } else if expctPVPhase == v1.VolumeBound {
Expect(cr).NotTo(BeNil())
Expect(len(cr.UID)).NotTo(BeZero())
}
@@ -137,7 +137,7 @@ func deletePVCandValidatePV(c clientset.Interface, ns string, pvc *api.Persisten
func deletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols pvmap, claims pvcmap) {
var boundPVs, deletedPVCs int
- var expctPVPhase api.PersistentVolumePhase
+ var expctPVPhase v1.PersistentVolumePhase
for pvName := range pvols {
pv, err := c.Core().PersistentVolumes().Get(pvName)
@@ -156,11 +156,11 @@ func deletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols pvmap,
// what Phase do we expect the PV that was bound to the claim to
// be in after that claim is deleted?
- expctPVPhase = api.VolumeAvailable
+ expctPVPhase = v1.VolumeAvailable
if len(claims) > len(pvols) {
// there are excess pvcs so expect the previously bound
// PV to become bound again
- expctPVPhase = api.VolumeBound
+ expctPVPhase = v1.VolumeBound
}
deletePVCandValidatePV(c, ns, pvc, pv, expctPVPhase)
@@ -172,7 +172,7 @@ func deletePVCandValidatePVGroup(c clientset.Interface, ns string, pvols pvmap,
}
// create the PV resource. Fails test on error.
-func createPV(c clientset.Interface, pv *api.PersistentVolume) *api.PersistentVolume {
+func createPV(c clientset.Interface, pv *v1.PersistentVolume) *v1.PersistentVolume {
pv, err := c.Core().PersistentVolumes().Create(pv)
Expect(err).NotTo(HaveOccurred())
@@ -180,7 +180,7 @@ func createPV(c clientset.Interface, pv *api.PersistentVolume) *api.PersistentVo
}
// create the PVC resource. Fails test on error.
-func createPVC(c clientset.Interface, ns string, pvc *api.PersistentVolumeClaim) *api.PersistentVolumeClaim {
+func createPVC(c clientset.Interface, ns string, pvc *v1.PersistentVolumeClaim) *v1.PersistentVolumeClaim {
pvc, err := c.Core().PersistentVolumeClaims(ns).Create(pvc)
Expect(err).NotTo(HaveOccurred())
@@ -193,9 +193,9 @@ func createPVC(c clientset.Interface, ns string, pvc *api.PersistentVolumeClaim)
// Note: in the pre-bind case the real PVC name, which is generated, is not
// known until after the PVC is instantiated. This is why the pvc is created
// before the pv.
-func createPVCPV(c clientset.Interface, serverIP, ns string, preBind bool) (*api.PersistentVolume, *api.PersistentVolumeClaim) {
+func createPVCPV(c clientset.Interface, serverIP, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
- var bindTo *api.PersistentVolumeClaim
+ var bindTo *v1.PersistentVolumeClaim
var preBindMsg string
// make the pvc definition first
@@ -227,7 +227,7 @@ func createPVCPV(c clientset.Interface, serverIP, ns string, preBind bool) (*api
// Note: in the pre-bind case the real PV name, which is generated, is not
// known until after the PV is instantiated. This is why the pv is created
// before the pvc.
-func createPVPVC(c clientset.Interface, serverIP, ns string, preBind bool) (*api.PersistentVolume, *api.PersistentVolumeClaim) {
+func createPVPVC(c clientset.Interface, serverIP, ns string, preBind bool) (*v1.PersistentVolume, *v1.PersistentVolumeClaim) {
preBindMsg := ""
if preBind {
@@ -256,8 +256,8 @@ func createPVPVC(c clientset.Interface, serverIP, ns string, preBind bool) (*api
func createPVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns, serverIP string) (pvmap, pvcmap) {
var i int
- var pv *api.PersistentVolume
- var pvc *api.PersistentVolumeClaim
+ var pv *v1.PersistentVolume
+ var pvc *v1.PersistentVolumeClaim
pvMap := make(pvmap, numpvs)
pvcMap := make(pvcmap, numpvcs)
@@ -292,16 +292,16 @@ func createPVsPVCs(numpvs, numpvcs int, c clientset.Interface, ns, serverIP stri
}
// Wait for the pv and pvc to bind to each other.
-func waitOnPVandPVC(c clientset.Interface, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) {
+func waitOnPVandPVC(c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
// Wait for newly created PVC to bind to the PV
framework.Logf("Waiting for PV %v to bind to PVC %v", pv.Name, pvc.Name)
- err := framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second)
+ err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 3*time.Second, 300*time.Second)
Expect(err).NotTo(HaveOccurred())
// Wait for PersistentVolume.Status.Phase to be Bound, which it should be
// since the PVC is already bound.
- err = framework.WaitForPersistentVolumePhase(api.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second)
+ err = framework.WaitForPersistentVolumePhase(v1.VolumeBound, c, pv.Name, 3*time.Second, 300*time.Second)
Expect(err).NotTo(HaveOccurred())
// Re-get the pv and pvc objects
@@ -335,7 +335,7 @@ func waitAndVerifyBinds(c clientset.Interface, ns string, pvols pvmap, claims pv
}
for pvName := range pvols {
- err := framework.WaitForPersistentVolumePhase(api.VolumeBound, c, pvName, 3*time.Second, 180*time.Second)
+ err := framework.WaitForPersistentVolumePhase(v1.VolumeBound, c, pvName, 3*time.Second, 180*time.Second)
if err != nil && len(pvols) > len(claims) {
framework.Logf("WARN: pv %v is not bound after max wait", pvName)
framework.Logf(" This may be ok since there are more pvs than pvcs")
@@ -352,7 +352,7 @@ func waitAndVerifyBinds(c clientset.Interface, ns string, pvols pvmap, claims pv
_, found := claims[pvcKey]
Expect(found).To(BeTrue())
- err = framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, c, ns, cr.Name, 3*time.Second, 180*time.Second)
+ err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, cr.Name, 3*time.Second, 180*time.Second)
Expect(err).NotTo(HaveOccurred())
actualBinds++
}
@@ -364,7 +364,7 @@ func waitAndVerifyBinds(c clientset.Interface, ns string, pvols pvmap, claims pv
}
// Test the pod's exit code to be zero.
-func testPodSuccessOrFail(c clientset.Interface, ns string, pod *api.Pod) {
+func testPodSuccessOrFail(c clientset.Interface, ns string, pod *v1.Pod) {
By("Pod should terminate with exitcode 0 (success)")
err := framework.WaitForPodSuccessInNamespace(c, pod.Name, ns)
@@ -373,7 +373,7 @@ func testPodSuccessOrFail(c clientset.Interface, ns string, pod *api.Pod) {
}
// Delete the passed in pod.
-func deletePod(f *framework.Framework, c clientset.Interface, ns string, pod *api.Pod) {
+func deletePod(f *framework.Framework, c clientset.Interface, ns string, pod *v1.Pod) {
framework.Logf("Deleting pod %v", pod.Name)
err := c.Core().Pods(ns).Delete(pod.Name, nil)
@@ -408,7 +408,7 @@ func createWaitAndDeletePod(f *framework.Framework, c clientset.Interface, ns st
// Validate PV/PVC, create and verify writer pod, delete the PVC, and validate the PV's
// phase. Note: the PV is deleted in the AfterEach, not here.
-func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *api.PersistentVolume, pvc *api.PersistentVolumeClaim) {
+func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim) {
// 1. verify that the PV and PVC have binded correctly
By("Validating the PV-PVC binding")
@@ -421,7 +421,7 @@ func completeTest(f *framework.Framework, c clientset.Interface, ns string, pv *
// 3. delete the PVC, wait for PV to become "Available"
By("Deleting the PVC to invoke the recycler")
- deletePVCandValidatePV(c, ns, pvc, pv, api.VolumeAvailable)
+ deletePVCandValidatePV(c, ns, pvc, pv, v1.VolumeAvailable)
}
// Validate pairs of PVs and PVCs, create and verify writer pod, delete PVC and validate
@@ -460,11 +460,11 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() {
var ns string
var NFSconfig VolumeTestConfig
var serverIP string
- var nfsServerPod *api.Pod
+ var nfsServerPod *v1.Pod
// config for the nfs-server pod in the default namespace
NFSconfig = VolumeTestConfig{
- namespace: api.NamespaceDefault,
+ namespace: v1.NamespaceDefault,
prefix: "nfs",
serverImage: "gcr.io/google_containers/volume-nfs:0.7",
serverPorts: []int{2049},
@@ -496,8 +496,8 @@ var _ = framework.KubeDescribe("PersistentVolumes", func() {
Context("with Single PV - PVC pairs", func() {
- var pv *api.PersistentVolume
- var pvc *api.PersistentVolumeClaim
+ var pv *v1.PersistentVolume
+ var pvc *v1.PersistentVolumeClaim
// Note: this is the only code where the pv is deleted.
AfterEach(func() {
@@ -627,41 +627,41 @@ func makePvcKey(ns, name string) types.NamespacedName {
// (instantiated) and thus the PV's ClaimRef cannot be completely filled-in in
// this func. Therefore, the ClaimRef's name is added later in
// createPVCPV.
-func makePersistentVolume(serverIP string, pvc *api.PersistentVolumeClaim) *api.PersistentVolume {
+func makePersistentVolume(serverIP string, pvc *v1.PersistentVolumeClaim) *v1.PersistentVolume {
// Specs are expected to match this test's PersistentVolumeClaim
- var claimRef *api.ObjectReference
+ var claimRef *v1.ObjectReference
if pvc != nil {
- claimRef = &api.ObjectReference{
+ claimRef = &v1.ObjectReference{
Name: pvc.Name,
Namespace: pvc.Namespace,
}
}
- return &api.PersistentVolume{
- ObjectMeta: api.ObjectMeta{
+ return &v1.PersistentVolume{
+ ObjectMeta: v1.ObjectMeta{
GenerateName: "nfs-",
Annotations: map[string]string{
volumehelper.VolumeGidAnnotationKey: "777",
},
},
- Spec: api.PersistentVolumeSpec{
- PersistentVolumeReclaimPolicy: api.PersistentVolumeReclaimRecycle,
- Capacity: api.ResourceList{
- api.ResourceName(api.ResourceStorage): resource.MustParse("2Gi"),
+ Spec: v1.PersistentVolumeSpec{
+ PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimRecycle,
+ Capacity: v1.ResourceList{
+ v1.ResourceName(v1.ResourceStorage): resource.MustParse("2Gi"),
},
- PersistentVolumeSource: api.PersistentVolumeSource{
- NFS: &api.NFSVolumeSource{
+ PersistentVolumeSource: v1.PersistentVolumeSource{
+ NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/exports",
ReadOnly: false,
},
},
- AccessModes: []api.PersistentVolumeAccessMode{
- api.ReadWriteOnce,
- api.ReadOnlyMany,
- api.ReadWriteMany,
+ AccessModes: []v1.PersistentVolumeAccessMode{
+ v1.ReadWriteOnce,
+ v1.ReadOnlyMany,
+ v1.ReadWriteMany,
},
ClaimRef: claimRef,
},
@@ -672,23 +672,23 @@ func makePersistentVolume(serverIP string, pvc *api.PersistentVolumeClaim) *api.
// Note: if this PVC is intended to be pre-bound to a PV, whose name is not
// known until the PV is instantiated, then the func createPVPVC will add
// pvc.Spec.VolumeName to this claim.
-func makePersistentVolumeClaim(ns string) *api.PersistentVolumeClaim {
+func makePersistentVolumeClaim(ns string) *v1.PersistentVolumeClaim {
// Specs are expected to match this test's PersistentVolume
- return &api.PersistentVolumeClaim{
- ObjectMeta: api.ObjectMeta{
+ return &v1.PersistentVolumeClaim{
+ ObjectMeta: v1.ObjectMeta{
GenerateName: "pvc-",
Namespace: ns,
},
- Spec: api.PersistentVolumeClaimSpec{
- AccessModes: []api.PersistentVolumeAccessMode{
- api.ReadWriteOnce,
- api.ReadOnlyMany,
- api.ReadWriteMany,
+ Spec: v1.PersistentVolumeClaimSpec{
+ AccessModes: []v1.PersistentVolumeAccessMode{
+ v1.ReadWriteOnce,
+ v1.ReadOnlyMany,
+ v1.ReadWriteMany,
},
- Resources: api.ResourceRequirements{
- Requests: api.ResourceList{
- api.ResourceName(api.ResourceStorage): resource.MustParse("1Gi"),
+ Resources: v1.ResourceRequirements{
+ Requests: v1.ResourceList{
+ v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
},
},
},
@@ -697,44 +697,44 @@ func makePersistentVolumeClaim(ns string) *api.PersistentVolumeClaim {
// Returns a pod definition based on the namespace. The pod references the PVC's
// name.
-func makeWritePod(ns string, pvcName string) *api.Pod {
+func makeWritePod(ns string, pvcName string) *v1.Pod {
// Prepare pod that mounts the NFS volume again and
// checks that /mnt/index.html was scrubbed there
var isPrivileged bool = true
- return &api.Pod{
+ return &v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
- APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
+ APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
GenerateName: "write-pod-",
Namespace: ns,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "write-pod",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh"},
Args: []string{"-c", "touch /mnt/SUCCESS && (id -G | grep -E '\\b777\\b')"},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: "nfs-pvc",
MountPath: "/mnt",
},
},
- SecurityContext: &api.SecurityContext{
+ SecurityContext: &v1.SecurityContext{
Privileged: &isPrivileged,
},
},
},
- RestartPolicy: api.RestartPolicyOnFailure,
- Volumes: []api.Volume{
+ RestartPolicy: v1.RestartPolicyOnFailure,
+ Volumes: []v1.Volume{
{
Name: "nfs-pvc",
- VolumeSource: api.VolumeSource{
- PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{
+ VolumeSource: v1.VolumeSource{
+ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvcName,
},
},
diff --git a/test/e2e/petset.go b/test/e2e/petset.go
index ad2a47ee40c..cc6b6486297 100644
--- a/test/e2e/petset.go
+++ b/test/e2e/petset.go
@@ -32,8 +32,9 @@ import (
apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
- "k8s.io/kubernetes/pkg/apis/apps"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/controller/petset"
"k8s.io/kubernetes/pkg/labels"
klabels "k8s.io/kubernetes/pkg/labels"
@@ -89,12 +90,12 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
"baz": "blah",
}
headlessSvcName := "test"
- var petMounts, podMounts []api.VolumeMount
+ var petMounts, podMounts []v1.VolumeMount
var ps *apps.StatefulSet
BeforeEach(func() {
- petMounts = []api.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
- podMounts = []api.VolumeMount{{Name: "home", MountPath: "/home"}}
+ petMounts = []v1.VolumeMount{{Name: "datadir", MountPath: "/data/"}}
+ podMounts = []v1.VolumeMount{{Name: "home", MountPath: "/home"}}
ps = newStatefulSet(psName, ns, headlessSvcName, 2, petMounts, podMounts, labels)
By("Creating service " + headlessSvcName + " in namespace " + ns)
@@ -113,7 +114,7 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
It("should provide basic identity", func() {
By("Creating statefulset " + psName + " in namespace " + ns)
- ps.Spec.Replicas = 3
+ *(ps.Spec.Replicas) = 3
setInitializedAnnotation(ps, "false")
_, err := c.Apps().StatefulSets(ns).Create(ps)
@@ -148,7 +149,7 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
It("should handle healthy pet restarts during scale", func() {
By("Creating statefulset " + psName + " in namespace " + ns)
- ps.Spec.Replicas = 2
+ *(ps.Spec.Replicas) = 2
setInitializedAnnotation(ps, "false")
_, err := c.Apps().StatefulSets(ns).Create(ps)
@@ -183,14 +184,14 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
It("should allow template updates", func() {
By("Creating stateful set " + psName + " in namespace " + ns)
- ps.Spec.Replicas = 2
+ *(ps.Spec.Replicas) = 2
ps, err := c.Apps().StatefulSets(ns).Create(ps)
Expect(err).NotTo(HaveOccurred())
pst := statefulSetTester{c: c}
- pst.waitForRunningAndReady(ps.Spec.Replicas, ps)
+ pst.waitForRunningAndReady(*ps.Spec.Replicas, ps)
newImage := newNginxImage
oldImage := ps.Spec.Template.Spec.Containers[0].Image
@@ -206,10 +207,10 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
pst.deletePetAtIndex(updateIndex, ps)
By("Waiting for all stateful pods to be running again")
- pst.waitForRunningAndReady(ps.Spec.Replicas, ps)
+ pst.waitForRunningAndReady(*ps.Spec.Replicas, ps)
By(fmt.Sprintf("Verifying stateful pod at index %d is updated", updateIndex))
- verify := func(pod *api.Pod) {
+ verify := func(pod *v1.Pod) {
podImage := pod.Spec.Containers[0].Image
Expect(podImage).To(Equal(newImage), fmt.Sprintf("Expected stateful pod image %s updated to %s", podImage, newImage))
}
@@ -218,7 +219,7 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
It("Scaling down before scale up is finished should wait until current pod will be running and ready before it will be removed", func() {
By("Creating stateful set " + psName + " in namespace " + ns + ", and pausing scale operations after each pod")
- testProbe := &api.Probe{Handler: api.Handler{HTTPGet: &api.HTTPGetAction{
+ testProbe := &v1.Probe{Handler: v1.Handler{HTTPGet: &v1.HTTPGetAction{
Path: "/index.html",
Port: intstr.IntOrString{IntVal: 80}}}}
ps := newStatefulSet(psName, ns, headlessSvcName, 1, nil, nil, labels)
@@ -247,8 +248,8 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
expectedPodName := ps.Name + "-1"
expectedPod, err := f.ClientSet.Core().Pods(ns).Get(expectedPodName)
Expect(err).NotTo(HaveOccurred())
- watcher, err := f.ClientSet.Core().Pods(ns).Watch(api.SingleObject(
- api.ObjectMeta{
+ watcher, err := f.ClientSet.Core().Pods(ns).Watch(v1.SingleObject(
+ v1.ObjectMeta{
Name: expectedPod.Name,
ResourceVersion: expectedPod.ResourceVersion,
},
@@ -258,16 +259,16 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
By("Verifying the 2nd pod is removed only when it becomes running and ready")
pst.restoreProbe(ps, testProbe)
_, err = watch.Until(statefulsetTimeout, watcher, func(event watch.Event) (bool, error) {
- pod := event.Object.(*api.Pod)
+ pod := event.Object.(*v1.Pod)
if event.Type == watch.Deleted && pod.Name == expectedPodName {
return false, fmt.Errorf("Pod %v was deleted before enter running", pod.Name)
}
framework.Logf("Observed event %v for pod %v. Phase %v, Pod is ready %v",
- event.Type, pod.Name, pod.Status.Phase, api.IsPodReady(pod))
+ event.Type, pod.Name, pod.Status.Phase, v1.IsPodReady(pod))
if pod.Name != expectedPodName {
return false, nil
}
- if pod.Status.Phase == api.PodRunning && api.IsPodReady(pod) {
+ if pod.Status.Phase == v1.PodRunning && v1.IsPodReady(pod) {
return true, nil
}
return false, nil
@@ -278,13 +279,13 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
It("Scaling should happen in predictable order and halt if any pet is unhealthy", func() {
psLabels := klabels.Set(labels)
By("Initializing watcher for selector " + psLabels.String())
- watcher, err := f.ClientSet.Core().Pods(ns).Watch(api.ListOptions{
- LabelSelector: psLabels.AsSelector(),
+ watcher, err := f.ClientSet.Core().Pods(ns).Watch(v1.ListOptions{
+ LabelSelector: psLabels.AsSelector().String(),
})
Expect(err).NotTo(HaveOccurred())
By("Creating stateful set " + psName + " in namespace " + ns)
- testProbe := &api.Probe{Handler: api.Handler{HTTPGet: &api.HTTPGetAction{
+ testProbe := &v1.Probe{Handler: v1.Handler{HTTPGet: &v1.HTTPGetAction{
Path: "/index.html",
Port: intstr.IntOrString{IntVal: 80}}}}
ps := newStatefulSet(psName, ns, headlessSvcName, 1, nil, nil, psLabels)
@@ -294,11 +295,11 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
By("Waiting until all stateful set " + psName + " replicas will be running in namespace " + ns)
pst := &statefulSetTester{c: c}
- pst.waitForRunningAndReady(ps.Spec.Replicas, ps)
+ pst.waitForRunningAndReady(*ps.Spec.Replicas, ps)
By("Confirming that stateful set scale up will halt with unhealthy pet")
pst.breakProbe(ps, testProbe)
- pst.waitForRunningAndNotReady(ps.Spec.Replicas, ps)
+ pst.waitForRunningAndNotReady(*ps.Spec.Replicas, ps)
pst.updateReplicas(ps, 3)
pst.confirmPetCount(1, ps, 10*time.Second)
@@ -312,7 +313,7 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
if event.Type != watch.Added {
return false, nil
}
- pod := event.Object.(*api.Pod)
+ pod := event.Object.(*v1.Pod)
if pod.Name == expectedOrder[0] {
expectedOrder = expectedOrder[1:]
}
@@ -322,8 +323,8 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
Expect(err).NotTo(HaveOccurred())
By("Scale down will halt with unhealthy pet")
- watcher, err = f.ClientSet.Core().Pods(ns).Watch(api.ListOptions{
- LabelSelector: psLabels.AsSelector(),
+ watcher, err = f.ClientSet.Core().Pods(ns).Watch(v1.ListOptions{
+ LabelSelector: psLabels.AsSelector().String(),
})
Expect(err).NotTo(HaveOccurred())
@@ -342,7 +343,7 @@ var _ = framework.KubeDescribe("StatefulSet [Slow]", func() {
if event.Type != watch.Deleted {
return false, nil
}
- pod := event.Object.(*api.Pod)
+ pod := event.Object.(*v1.Pod)
if pod.Name == expectedOrder[0] {
expectedOrder = expectedOrder[1:]
}
@@ -430,17 +431,17 @@ var _ = framework.KubeDescribe("Stateful Set recreate [Slow]", func() {
node := nodes.Items[0]
By("creating pod with conflicting port in namespace " + f.Namespace.Name)
- conflictingPort := api.ContainerPort{HostPort: 21017, ContainerPort: 21017, Name: "conflict"}
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ conflictingPort := v1.ContainerPort{HostPort: 21017, ContainerPort: 21017, Name: "conflict"}
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "nginx",
Image: "gcr.io/google_containers/nginx-slim:0.7",
- Ports: []api.ContainerPort{conflictingPort},
+ Ports: []v1.ContainerPort{conflictingPort},
},
},
NodeName: node.Name,
@@ -464,11 +465,11 @@ var _ = framework.KubeDescribe("Stateful Set recreate [Slow]", func() {
var initialPetPodUID types.UID
By("waiting until pet pod " + petPodName + " will be recreated and deleted at least once in namespace " + f.Namespace.Name)
- w, err := f.ClientSet.Core().Pods(f.Namespace.Name).Watch(api.SingleObject(api.ObjectMeta{Name: petPodName}))
+ w, err := f.ClientSet.Core().Pods(f.Namespace.Name).Watch(v1.SingleObject(v1.ObjectMeta{Name: petPodName}))
framework.ExpectNoError(err)
// we need to get UID from pod in any state and wait until stateful set controller will remove pod atleast once
_, err = watch.Until(petPodTimeout, w, func(event watch.Event) (bool, error) {
- pod := event.Object.(*api.Pod)
+ pod := event.Object.(*v1.Pod)
switch event.Type {
case watch.Deleted:
framework.Logf("Observed delete event for pet pod %v in namespace %v", pod.Name, pod.Namespace)
@@ -487,7 +488,7 @@ var _ = framework.KubeDescribe("Stateful Set recreate [Slow]", func() {
}
By("removing pod with conflicting port in namespace " + f.Namespace.Name)
- err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0))
+ err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0))
framework.ExpectNoError(err)
By("waiting when pet pod " + petPodName + " will be recreated in namespace " + f.Namespace.Name + " and will be in running state")
@@ -497,7 +498,7 @@ var _ = framework.KubeDescribe("Stateful Set recreate [Slow]", func() {
if err != nil {
return err
}
- if petPod.Status.Phase != api.PodRunning {
+ if petPod.Status.Phase != v1.PodRunning {
return fmt.Errorf("Pod %v is not in running phase: %v", petPod.Name, petPod.Status.Phase)
} else if petPod.UID == initialPetPodUID {
return fmt.Errorf("Pod %v wasn't recreated: %v == %v", petPod.Name, petPod.UID, initialPetPodUID)
@@ -508,7 +509,7 @@ var _ = framework.KubeDescribe("Stateful Set recreate [Slow]", func() {
})
func dumpDebugInfo(c clientset.Interface, ns string) {
- pl, _ := c.Core().Pods(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
+ pl, _ := c.Core().Pods(ns).List(v1.ListOptions{LabelSelector: labels.Everything().String()})
for _, p := range pl.Items {
desc, _ := framework.RunKubectl("describe", "po", p.Name, fmt.Sprintf("--namespace=%v", ns))
framework.Logf("\nOutput of kubectl describe %v:\n%v", p.Name, desc)
@@ -557,7 +558,7 @@ func (c *clusterAppTester) run() {
if restartCluster {
By("Restarting stateful set " + ps.Name)
c.tester.restart(ps)
- c.tester.waitForRunningAndReady(ps.Spec.Replicas, ps)
+ c.tester.waitForRunningAndReady(*ps.Spec.Replicas, ps)
}
}
@@ -746,9 +747,9 @@ func (p *statefulSetTester) createStatefulSet(manifestPath, ns string) *apps.Sta
framework.Logf(fmt.Sprintf("creating " + ps.Name + " service"))
framework.RunKubectlOrDie("create", "-f", mkpath("service.yaml"), fmt.Sprintf("--namespace=%v", ns))
- framework.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ps.Namespace, ps.Name, ps.Spec.Replicas, ps.Spec.Selector))
+ framework.Logf(fmt.Sprintf("creating statefulset %v/%v with %d replicas and selector %+v", ps.Namespace, ps.Name, *(ps.Spec.Replicas), ps.Spec.Selector))
framework.RunKubectlOrDie("create", "-f", mkpath("petset.yaml"), fmt.Sprintf("--namespace=%v", ns))
- p.waitForRunningAndReady(ps.Spec.Replicas, ps)
+ p.waitForRunningAndReady(*ps.Spec.Replicas, ps)
return ps
}
@@ -797,7 +798,7 @@ func (p *statefulSetTester) checkHostname(ps *apps.StatefulSet) error {
func (p *statefulSetTester) saturate(ps *apps.StatefulSet) {
// TODO: Watch events and check that creation timestamps don't overlap
var i int32
- for i = 0; i < ps.Spec.Replicas; i++ {
+ for i = 0; i < *(ps.Spec.Replicas); i++ {
framework.Logf("Waiting for pet at index " + fmt.Sprintf("%v", i+1) + " to enter Running")
p.waitForRunningAndReady(i+1, ps)
framework.Logf("Marking pet at index " + fmt.Sprintf("%v", i) + " healthy")
@@ -808,12 +809,12 @@ func (p *statefulSetTester) saturate(ps *apps.StatefulSet) {
func (p *statefulSetTester) deletePetAtIndex(index int, ps *apps.StatefulSet) {
name := getPodNameAtIndex(index, ps)
noGrace := int64(0)
- if err := p.c.Core().Pods(ps.Namespace).Delete(name, &api.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil {
+ if err := p.c.Core().Pods(ps.Namespace).Delete(name, &v1.DeleteOptions{GracePeriodSeconds: &noGrace}); err != nil {
framework.Failf("Failed to delete pet %v for StatefulSet %v/%v: %v", name, ps.Namespace, ps.Name, err)
}
}
-type verifyPodFunc func(*api.Pod)
+type verifyPodFunc func(*v1.Pod)
func (p *statefulSetTester) verifyPodAtIndex(index int, ps *apps.StatefulSet, verify verifyPodFunc) {
name := getPodNameAtIndex(index, ps)
@@ -831,9 +832,9 @@ func getPodNameAtIndex(index int, ps *apps.StatefulSet) string {
func (p *statefulSetTester) scale(ps *apps.StatefulSet, count int32) error {
name := ps.Name
ns := ps.Namespace
- p.update(ns, name, func(ps *apps.StatefulSet) { ps.Spec.Replicas = count })
+ p.update(ns, name, func(ps *apps.StatefulSet) { *(ps.Spec.Replicas) = count })
- var petList *api.PodList
+ var petList *v1.PodList
pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
petList = p.getPodList(ps)
if int32(len(petList.Items)) == count {
@@ -844,8 +845,8 @@ func (p *statefulSetTester) scale(ps *apps.StatefulSet, count int32) error {
if pollErr != nil {
unhealthy := []string{}
for _, pet := range petList.Items {
- delTs, phase, readiness := pet.DeletionTimestamp, pet.Status.Phase, api.IsPodReady(&pet)
- if delTs != nil || phase != api.PodRunning || !readiness {
+ delTs, phase, readiness := pet.DeletionTimestamp, pet.Status.Phase, v1.IsPodReady(&pet)
+ if delTs != nil || phase != v1.PodRunning || !readiness {
unhealthy = append(unhealthy, fmt.Sprintf("%v: deletion %v, phase %v, readiness %v", pet.Name, delTs, phase, readiness))
}
}
@@ -855,13 +856,13 @@ func (p *statefulSetTester) scale(ps *apps.StatefulSet, count int32) error {
}
func (p *statefulSetTester) updateReplicas(ps *apps.StatefulSet, count int32) {
- p.update(ps.Namespace, ps.Name, func(ps *apps.StatefulSet) { ps.Spec.Replicas = count })
+ p.update(ps.Namespace, ps.Name, func(ps *apps.StatefulSet) { ps.Spec.Replicas = &count })
}
func (p *statefulSetTester) restart(ps *apps.StatefulSet) {
- oldReplicas := ps.Spec.Replicas
+ oldReplicas := *(ps.Spec.Replicas)
ExpectNoError(p.scale(ps, 0))
- p.update(ps.Namespace, ps.Name, func(ps *apps.StatefulSet) { ps.Spec.Replicas = oldReplicas })
+ p.update(ps.Namespace, ps.Name, func(ps *apps.StatefulSet) { *(ps.Spec.Replicas) = oldReplicas })
}
func (p *statefulSetTester) update(ns, name string, update func(ps *apps.StatefulSet)) {
@@ -882,10 +883,10 @@ func (p *statefulSetTester) update(ns, name string, update func(ps *apps.Statefu
framework.Failf("too many retries draining statefulset %q", name)
}
-func (p *statefulSetTester) getPodList(ps *apps.StatefulSet) *api.PodList {
+func (p *statefulSetTester) getPodList(ps *apps.StatefulSet) *v1.PodList {
selector, err := unversioned.LabelSelectorAsSelector(ps.Spec.Selector)
ExpectNoError(err)
- podList, err := p.c.Core().Pods(ps.Namespace).List(api.ListOptions{LabelSelector: selector})
+ podList, err := p.c.Core().Pods(ps.Namespace).List(v1.ListOptions{LabelSelector: selector.String()})
ExpectNoError(err)
return podList
}
@@ -916,10 +917,10 @@ func (p *statefulSetTester) waitForRunning(numPets int32, ps *apps.StatefulSet,
return false, fmt.Errorf("Too many pods scheduled, expected %d got %d", numPets, len(podList.Items))
}
for _, p := range podList.Items {
- isReady := api.IsPodReady(&p)
+ isReady := v1.IsPodReady(&p)
desiredReadiness := shouldBeReady == isReady
- framework.Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, api.PodRunning, shouldBeReady, p.Status.Phase, isReady)
- if p.Status.Phase != api.PodRunning || !desiredReadiness {
+ framework.Logf("Waiting for pod %v to enter %v - Ready=%v, currently %v - Ready=%v", p.Name, v1.PodRunning, shouldBeReady, p.Status.Phase, isReady)
+ if p.Status.Phase != v1.PodRunning || !desiredReadiness {
return false, nil
}
}
@@ -938,7 +939,7 @@ func (p *statefulSetTester) waitForRunningAndNotReady(numPets int32, ps *apps.St
p.waitForRunning(numPets, ps, false)
}
-func (p *statefulSetTester) breakProbe(ps *apps.StatefulSet, probe *api.Probe) error {
+func (p *statefulSetTester) breakProbe(ps *apps.StatefulSet, probe *v1.Probe) error {
path := probe.HTTPGet.Path
if path == "" {
return fmt.Errorf("Path expected to be not empty: %v", path)
@@ -947,7 +948,7 @@ func (p *statefulSetTester) breakProbe(ps *apps.StatefulSet, probe *api.Probe) e
return p.execInPets(ps, cmd)
}
-func (p *statefulSetTester) restoreProbe(ps *apps.StatefulSet, probe *api.Probe) error {
+func (p *statefulSetTester) restoreProbe(ps *apps.StatefulSet, probe *v1.Probe) error {
path := probe.HTTPGet.Path
if path == "" {
return fmt.Errorf("Path expected to be not empty: %v", path)
@@ -960,7 +961,7 @@ func (p *statefulSetTester) setHealthy(ps *apps.StatefulSet) {
podList := p.getPodList(ps)
markedHealthyPod := ""
for _, pod := range podList.Items {
- if pod.Status.Phase != api.PodRunning {
+ if pod.Status.Phase != v1.PodRunning {
framework.Failf("Found pod in %v cannot set health", pod.Status.Phase)
}
if isInitialized(pod) {
@@ -969,7 +970,7 @@ func (p *statefulSetTester) setHealthy(ps *apps.StatefulSet) {
if markedHealthyPod != "" {
framework.Failf("Found multiple non-healthy pets: %v and %v", pod.Name, markedHealthyPod)
}
- p, err := framework.UpdatePodWithRetries(p.c, pod.Namespace, pod.Name, func(up *api.Pod) {
+ p, err := framework.UpdatePodWithRetries(p.c, pod.Namespace, pod.Name, func(up *v1.Pod) {
up.Annotations[petset.StatefulSetInitAnnotation] = "true"
})
ExpectNoError(err)
@@ -1001,7 +1002,7 @@ func (p *statefulSetTester) waitForStatus(ps *apps.StatefulSet, expectedReplicas
func deleteAllStatefulSets(c clientset.Interface, ns string) {
pst := &statefulSetTester{c: c}
- psList, err := c.Apps().StatefulSets(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
+ psList, err := c.Apps().StatefulSets(ns).List(v1.ListOptions{LabelSelector: labels.Everything().String()})
ExpectNoError(err)
// Scale down each statefulset, then delete it completely.
@@ -1023,7 +1024,7 @@ func deleteAllStatefulSets(c clientset.Interface, ns string) {
pvNames := sets.NewString()
// TODO: Don't assume all pvcs in the ns belong to a statefulset
pvcPollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
- pvcList, err := c.Core().PersistentVolumeClaims(ns).List(api.ListOptions{LabelSelector: labels.Everything()})
+ pvcList, err := c.Core().PersistentVolumeClaims(ns).List(v1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
framework.Logf("WARNING: Failed to list pvcs, retrying %v", err)
return false, nil
@@ -1043,7 +1044,7 @@ func deleteAllStatefulSets(c clientset.Interface, ns string) {
}
pollErr := wait.PollImmediate(statefulsetPoll, statefulsetTimeout, func() (bool, error) {
- pvList, err := c.Core().PersistentVolumes().List(api.ListOptions{LabelSelector: labels.Everything()})
+ pvList, err := c.Core().PersistentVolumes().List(v1.ListOptions{LabelSelector: labels.Everything().String()})
if err != nil {
framework.Logf("WARNING: Failed to list pvs, retrying %v", err)
return false, nil
@@ -1089,7 +1090,7 @@ func pollReadWithTimeout(pet petTester, petNumber int, key, expectedVal string)
return err
}
-func isInitialized(pod api.Pod) bool {
+func isInitialized(pod v1.Pod) bool {
initialized, ok := pod.Annotations[petset.StatefulSetInitAnnotation]
if !ok {
return false
@@ -1105,40 +1106,40 @@ func dec(i int64, exponent int) *inf.Dec {
return inf.NewDec(i, inf.Scale(-exponent))
}
-func newPVC(name string) api.PersistentVolumeClaim {
- return api.PersistentVolumeClaim{
- ObjectMeta: api.ObjectMeta{
+func newPVC(name string) v1.PersistentVolumeClaim {
+ return v1.PersistentVolumeClaim{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
Annotations: map[string]string{
"volume.alpha.kubernetes.io/storage-class": "anything",
},
},
- Spec: api.PersistentVolumeClaimSpec{
- AccessModes: []api.PersistentVolumeAccessMode{
- api.ReadWriteOnce,
+ Spec: v1.PersistentVolumeClaimSpec{
+ AccessModes: []v1.PersistentVolumeAccessMode{
+ v1.ReadWriteOnce,
},
- Resources: api.ResourceRequirements{
- Requests: api.ResourceList{
- api.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI),
+ Resources: v1.ResourceRequirements{
+ Requests: v1.ResourceList{
+ v1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI),
},
},
},
}
}
-func newStatefulSet(name, ns, governingSvcName string, replicas int32, petMounts []api.VolumeMount, podMounts []api.VolumeMount, labels map[string]string) *apps.StatefulSet {
+func newStatefulSet(name, ns, governingSvcName string, replicas int32, petMounts []v1.VolumeMount, podMounts []v1.VolumeMount, labels map[string]string) *apps.StatefulSet {
mounts := append(petMounts, podMounts...)
- claims := []api.PersistentVolumeClaim{}
+ claims := []v1.PersistentVolumeClaim{}
for _, m := range petMounts {
claims = append(claims, newPVC(m.Name))
}
- vols := []api.Volume{}
+ vols := []v1.Volume{}
for _, m := range podMounts {
- vols = append(vols, api.Volume{
+ vols = append(vols, v1.Volume{
Name: m.Name,
- VolumeSource: api.VolumeSource{
- HostPath: &api.HostPathVolumeSource{
+ VolumeSource: v1.VolumeSource{
+ HostPath: &v1.HostPathVolumeSource{
Path: fmt.Sprintf("/tmp/%v", m.Name),
},
},
@@ -1150,7 +1151,7 @@ func newStatefulSet(name, ns, governingSvcName string, replicas int32, petMounts
Kind: "StatefulSet",
APIVersion: "apps/v1beta1",
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
Namespace: ns,
},
@@ -1158,14 +1159,14 @@ func newStatefulSet(name, ns, governingSvcName string, replicas int32, petMounts
Selector: &unversioned.LabelSelector{
MatchLabels: labels,
},
- Replicas: replicas,
- Template: api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
+ Replicas: func(i int32) *int32 { return &i }(replicas),
+ Template: v1.PodTemplateSpec{
+ ObjectMeta: v1.ObjectMeta{
Labels: labels,
Annotations: map[string]string{},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "nginx",
Image: nginxImage,
diff --git a/test/e2e/pod_gc.go b/test/e2e/pod_gc.go
index 91c93b910d3..8121f5d1de0 100644
--- a/test/e2e/pod_gc.go
+++ b/test/e2e/pod_gc.go
@@ -22,7 +22,7 @@ import (
. "github.com/onsi/ginkgo"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
@@ -38,7 +38,7 @@ var _ = framework.KubeDescribe("Pod garbage collector [Feature:PodGarbageCollect
for count < 1000 {
pod, err := createTerminatingPod(f)
pod.ResourceVersion = ""
- pod.Status.Phase = api.PodFailed
+ pod.Status.Phase = v1.PodFailed
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).UpdateStatus(pod)
if err != nil {
framework.Failf("err failing pod: %v", err)
@@ -55,13 +55,13 @@ var _ = framework.KubeDescribe("Pod garbage collector [Feature:PodGarbageCollect
// The gc controller polls every 30s and fires off a goroutine per
// pod to terminate.
var err error
- var pods *api.PodList
+ var pods *v1.PodList
timeout := 2 * time.Minute
gcThreshold := 100
By(fmt.Sprintf("Waiting for gc controller to gc all but %d pods", gcThreshold))
pollErr := wait.Poll(1*time.Minute, timeout, func() (bool, error) {
- pods, err = f.ClientSet.Core().Pods(f.Namespace.Name).List(api.ListOptions{})
+ pods, err = f.ClientSet.Core().Pods(f.Namespace.Name).List(v1.ListOptions{})
if err != nil {
framework.Logf("Failed to list pod %v", err)
return false, nil
@@ -78,17 +78,17 @@ var _ = framework.KubeDescribe("Pod garbage collector [Feature:PodGarbageCollect
})
})
-func createTerminatingPod(f *framework.Framework) (*api.Pod, error) {
+func createTerminatingPod(f *framework.Framework) (*v1.Pod, error) {
uuid := uuid.NewUUID()
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: string(uuid),
Annotations: map[string]string{
"scheduler.alpha.kubernetes.io/name": "please don't schedule my pods",
},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: string(uuid),
Image: "gcr.io/google_containers/busybox:1.24",
diff --git a/test/e2e/pods.go b/test/e2e/pods.go
index fee5a4c9b47..ee4779f8b00 100644
--- a/test/e2e/pods.go
+++ b/test/e2e/pods.go
@@ -24,7 +24,7 @@ import (
"strconv"
"time"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait"
@@ -45,16 +45,16 @@ var _ = framework.KubeDescribe("Pods Delete Grace Period", func() {
By("creating the pod")
name := "pod-submit-remove-" + string(uuid.NewUUID())
value := strconv.Itoa(time.Now().Nanosecond())
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: map[string]string{
"name": "foo",
"time": value,
},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "nginx",
Image: "gcr.io/google_containers/nginx-slim:0.7",
@@ -65,12 +65,12 @@ var _ = framework.KubeDescribe("Pods Delete Grace Period", func() {
By("setting up watch")
selector := labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
- options := api.ListOptions{LabelSelector: selector}
+ options := v1.ListOptions{LabelSelector: selector.String()}
pods, err := podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pod")
Expect(len(pods.Items)).To(Equal(0))
- options = api.ListOptions{
- LabelSelector: selector,
+ options = v1.ListOptions{
+ LabelSelector: selector.String(),
ResourceVersion: pods.ListMeta.ResourceVersion,
}
w, err := podClient.Watch(options)
@@ -81,7 +81,7 @@ var _ = framework.KubeDescribe("Pods Delete Grace Period", func() {
By("verifying the pod is in kubernetes")
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
- options = api.ListOptions{LabelSelector: selector}
+ options = v1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pod")
Expect(len(pods.Items)).To(Equal(1))
@@ -159,13 +159,13 @@ var _ = framework.KubeDescribe("Pods Delete Grace Period", func() {
By("verifying pod deletion was observed")
deleted := false
timeout := false
- var lastPod *api.Pod
+ var lastPod *v1.Pod
timer := time.After(30 * time.Second)
for !deleted && !timeout {
select {
case event, _ := <-w.ResultChan():
if event.Type == watch.Deleted {
- lastPod = event.Object.(*api.Pod)
+ lastPod = event.Object.(*v1.Pod)
deleted = true
}
case <-timer:
@@ -180,7 +180,7 @@ var _ = framework.KubeDescribe("Pods Delete Grace Period", func() {
Expect(lastPod.Spec.TerminationGracePeriodSeconds).ToNot(BeZero())
selector = labels.SelectorFromSet(labels.Set(map[string]string{"time": value}))
- options = api.ListOptions{LabelSelector: selector}
+ options = v1.ListOptions{LabelSelector: selector.String()}
pods, err = podClient.List(options)
Expect(err).NotTo(HaveOccurred(), "failed to query for pods")
Expect(len(pods.Items)).To(Equal(0))
diff --git a/test/e2e/portforward.go b/test/e2e/portforward.go
index e3e6331ca8e..3a8974c0578 100644
--- a/test/e2e/portforward.go
+++ b/test/e2e/portforward.go
@@ -28,7 +28,7 @@ import (
"syscall"
"time"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/test/e2e/framework"
@@ -46,18 +46,18 @@ var (
portForwardPortToStdOutV = version.MustParse("v1.3.0-alpha.4")
)
-func pfPod(expectedClientData, chunks, chunkSize, chunkIntervalMillis string) *api.Pod {
- return &api.Pod{
- ObjectMeta: api.ObjectMeta{
+func pfPod(expectedClientData, chunks, chunkSize, chunkIntervalMillis string) *v1.Pod {
+ return &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "portforwardtester",
Image: "gcr.io/google_containers/portforwardtester:1.0",
- Env: []api.EnvVar{
+ Env: []v1.EnvVar{
{
Name: "BIND_PORT",
Value: "80",
@@ -81,7 +81,7 @@ func pfPod(expectedClientData, chunks, chunkSize, chunkIntervalMillis string) *a
},
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
}
diff --git a/test/e2e/pre_stop.go b/test/e2e/pre_stop.go
index 38dd3e5a9e9..6c2eab0a30d 100644
--- a/test/e2e/pre_stop.go
+++ b/test/e2e/pre_stop.go
@@ -21,8 +21,8 @@ import (
"fmt"
"time"
- "k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
@@ -36,16 +36,16 @@ type State struct {
func testPreStop(c clientset.Interface, ns string) {
// This is the server that will receive the preStop notification
- podDescr := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ podDescr := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "server",
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "server",
Image: "gcr.io/google_containers/nettest:1.7",
- Ports: []api.ContainerPort{{ContainerPort: 8080}},
+ Ports: []v1.ContainerPort{{ContainerPort: 8080}},
},
},
},
@@ -69,19 +69,19 @@ func testPreStop(c clientset.Interface, ns string) {
podOut, err := c.Core().Pods(ns).Get(podDescr.Name)
framework.ExpectNoError(err, "getting pod info")
- preStopDescr := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ preStopDescr := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: "tester",
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "tester",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"sleep", "600"},
- Lifecycle: &api.Lifecycle{
- PreStop: &api.Handler{
- Exec: &api.ExecAction{
+ Lifecycle: &v1.Lifecycle{
+ PreStop: &v1.Handler{
+ Exec: &v1.ExecAction{
Command: []string{
"wget", "-O-", "--post-data=" + val, fmt.Sprintf("http://%s:8080/write", podOut.Status.PodIP),
},
diff --git a/test/e2e/proxy.go b/test/e2e/proxy.go
index bb6e335d771..2637841aaa0 100644
--- a/test/e2e/proxy.go
+++ b/test/e2e/proxy.go
@@ -24,10 +24,10 @@ import (
"sync"
"time"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/net"
"k8s.io/kubernetes/test/e2e/framework"
@@ -49,7 +49,7 @@ const (
)
var _ = framework.KubeDescribe("Proxy", func() {
- version := registered.GroupOrDie(api.GroupName).GroupVersion.Version
+ version := registered.GroupOrDie(v1.GroupName).GroupVersion.Version
Context("version "+version, func() {
options := framework.FrameworkOptions{
ClientQPS: -1.0,
@@ -71,13 +71,13 @@ var _ = framework.KubeDescribe("Proxy", func() {
It("should proxy through a service and a pod [Conformance]", func() {
start := time.Now()
labels := map[string]string{"proxy-service-target": "true"}
- service, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(&api.Service{
- ObjectMeta: api.ObjectMeta{
+ service, err := f.ClientSet.Core().Services(f.Namespace.Name).Create(&v1.Service{
+ ObjectMeta: v1.ObjectMeta{
GenerateName: "proxy-service-",
},
- Spec: api.ServiceSpec{
+ Spec: v1.ServiceSpec{
Selector: labels,
- Ports: []api.ServicePort{
+ Ports: []v1.ServicePort{
{
Name: "portname1",
Port: 80,
@@ -107,14 +107,15 @@ var _ = framework.KubeDescribe("Proxy", func() {
// a simple server which serves the values of the
// environmental variables below.
By("starting an echo server on multiple ports")
- pods := []*api.Pod{}
+ pods := []*v1.Pod{}
cfg := testutils.RCConfig{
- Client: f.ClientSet,
- Image: "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab",
- Name: service.Name,
- Namespace: f.Namespace.Name,
- Replicas: 1,
- PollInterval: time.Second,
+ Client: f.ClientSet,
+ InternalClient: f.InternalClientset,
+ Image: "gcr.io/google_containers/porter:cd5cb5791ebaa8641955f0e8c2a9bed669b1eaab",
+ Name: service.Name,
+ Namespace: f.Namespace.Name,
+ Replicas: 1,
+ PollInterval: time.Second,
Env: map[string]string{
"SERVE_PORT_80": `test`,
"SERVE_PORT_1080": `test`,
@@ -132,9 +133,9 @@ var _ = framework.KubeDescribe("Proxy", func() {
"tlsdest1": 460,
"tlsdest2": 462,
},
- ReadinessProbe: &api.Probe{
- Handler: api.Handler{
- HTTPGet: &api.HTTPGetAction{
+ ReadinessProbe: &v1.Probe{
+ Handler: v1.Handler{
+ HTTPGet: &v1.HTTPGetAction{
Port: intstr.FromInt(80),
},
},
@@ -146,7 +147,7 @@ var _ = framework.KubeDescribe("Proxy", func() {
CreatedPods: &pods,
}
Expect(framework.RunRC(cfg)).NotTo(HaveOccurred())
- defer framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, cfg.Name)
+ defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, cfg.Name)
Expect(f.WaitForAnEndpoint(service.Name)).NotTo(HaveOccurred())
@@ -260,7 +261,7 @@ var _ = framework.KubeDescribe("Proxy", func() {
}
if len(errs) != 0 {
- body, err := f.ClientSet.Core().Pods(f.Namespace.Name).GetLogs(pods[0].Name, &api.PodLogOptions{}).Do().Raw()
+ body, err := f.ClientSet.Core().Pods(f.Namespace.Name).GetLogs(pods[0].Name, &v1.PodLogOptions{}).Do().Raw()
if err != nil {
framework.Logf("Error getting logs for pod %s: %v", pods[0].Name, err)
} else {
diff --git a/test/e2e/rc.go b/test/e2e/rc.go
index 126311c18ad..6f2b8bc560b 100644
--- a/test/e2e/rc.go
+++ b/test/e2e/rc.go
@@ -20,8 +20,8 @@ import (
"fmt"
"time"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/controller/replication"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/uuid"
@@ -51,21 +51,21 @@ var _ = framework.KubeDescribe("ReplicationController", func() {
})
})
-func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageName string, image string) *api.ReplicationController {
+func newRC(rsName string, replicas int32, rcPodLabels map[string]string, imageName string, image string) *v1.ReplicationController {
zero := int64(0)
- return &api.ReplicationController{
- ObjectMeta: api.ObjectMeta{
+ return &v1.ReplicationController{
+ ObjectMeta: v1.ObjectMeta{
Name: rsName,
},
- Spec: api.ReplicationControllerSpec{
- Replicas: replicas,
- Template: &api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
+ Spec: v1.ReplicationControllerSpec{
+ Replicas: func(i int32) *int32 { return &i }(replicas),
+ Template: &v1.PodTemplateSpec{
+ ObjectMeta: v1.ObjectMeta{
Labels: rcPodLabels,
},
- Spec: api.PodSpec{
+ Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero,
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: imageName,
Image: image,
@@ -89,25 +89,25 @@ func ServeImageOrFail(f *framework.Framework, test string, image string) {
// The source for the Docker containter kubernetes/serve_hostname is
// in contrib/for-demos/serve_hostname
By(fmt.Sprintf("Creating replication controller %s", name))
- controller, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{
- ObjectMeta: api.ObjectMeta{
+ controller, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(&v1.ReplicationController{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
- Spec: api.ReplicationControllerSpec{
- Replicas: replicas,
+ Spec: v1.ReplicationControllerSpec{
+ Replicas: func(i int32) *int32 { return &i }(replicas),
Selector: map[string]string{
"name": name,
},
- Template: &api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
+ Template: &v1.PodTemplateSpec{
+ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"name": name},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: name,
Image: image,
- Ports: []api.ContainerPort{{ContainerPort: 9376}},
+ Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},
},
@@ -118,7 +118,7 @@ func ServeImageOrFail(f *framework.Framework, test string, image string) {
// Cleanup the replication controller when we are done.
defer func() {
// Resize the replication controller to zero to get rid of pods.
- if err := framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, controller.Name); err != nil {
+ if err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, controller.Name); err != nil {
framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
}
}()
@@ -169,7 +169,7 @@ func rcConditionCheck(f *framework.Framework) {
if err != nil {
return false, err
}
- podQuota := quota.Status.Hard[api.ResourcePods]
+ podQuota := quota.Status.Hard[v1.ResourcePods]
quantity := resource.MustParse("2")
return (&podQuota).Cmp(quantity) == 0, nil
})
@@ -197,7 +197,7 @@ func rcConditionCheck(f *framework.Framework) {
}
conditions = rc.Status.Conditions
- cond := replication.GetCondition(rc.Status, api.ReplicationControllerReplicaFailure)
+ cond := replication.GetCondition(rc.Status, v1.ReplicationControllerReplicaFailure)
return cond != nil, nil
})
if err == wait.ErrWaitTimeout {
@@ -206,8 +206,9 @@ func rcConditionCheck(f *framework.Framework) {
Expect(err).NotTo(HaveOccurred())
By(fmt.Sprintf("Scaling down rc %q to satisfy pod quota", name))
- rc, err = framework.UpdateReplicationControllerWithRetries(c, namespace, name, func(update *api.ReplicationController) {
- update.Spec.Replicas = 2
+ rc, err = framework.UpdateReplicationControllerWithRetries(c, namespace, name, func(update *v1.ReplicationController) {
+ x := int32(2)
+ update.Spec.Replicas = &x
})
Expect(err).NotTo(HaveOccurred())
@@ -225,7 +226,7 @@ func rcConditionCheck(f *framework.Framework) {
}
conditions = rc.Status.Conditions
- cond := replication.GetCondition(rc.Status, api.ReplicationControllerReplicaFailure)
+ cond := replication.GetCondition(rc.Status, v1.ReplicationControllerReplicaFailure)
return cond == nil, nil
})
if err == wait.ErrWaitTimeout {
diff --git a/test/e2e/reboot.go b/test/e2e/reboot.go
index 73b842cac8f..313dbeb9e73 100644
--- a/test/e2e/reboot.go
+++ b/test/e2e/reboot.go
@@ -22,7 +22,8 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/sets"
@@ -64,7 +65,7 @@ var _ = framework.KubeDescribe("Reboot [Disruptive] [Feature:Reboot]", func() {
// events for the kube-system namespace on failures
namespaceName := api.NamespaceSystem
By(fmt.Sprintf("Collecting events from namespace %q.", namespaceName))
- events, err := f.ClientSet.Core().Events(namespaceName).List(api.ListOptions{})
+ events, err := f.ClientSet.Core().Events(namespaceName).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, e := range events.Items {
@@ -160,7 +161,7 @@ func testReboot(c clientset.Interface, rebootCmd string) {
}
}
-func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podNames []string, pods []*api.Pod) {
+func printStatusAndLogsForNotReadyPods(c clientset.Interface, ns string, podNames []string, pods []*v1.Pod) {
printFn := func(id, log string, err error, previous bool) {
prefix := "Retrieving log for container"
if previous {
diff --git a/test/e2e/replica_set.go b/test/e2e/replica_set.go
index 8e9b62e84af..c0678a7bd76 100644
--- a/test/e2e/replica_set.go
+++ b/test/e2e/replica_set.go
@@ -20,10 +20,10 @@ import (
"fmt"
"time"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
- "k8s.io/kubernetes/pkg/apis/extensions"
+ "k8s.io/kubernetes/pkg/api/v1"
+ extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/controller/replicaset"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/uuid"
@@ -37,18 +37,18 @@ import (
func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageName string, image string) *extensions.ReplicaSet {
zero := int64(0)
return &extensions.ReplicaSet{
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: rsName,
},
Spec: extensions.ReplicaSetSpec{
- Replicas: replicas,
- Template: api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
+ Replicas: func(i int32) *int32 { return &i }(replicas),
+ Template: v1.PodTemplateSpec{
+ ObjectMeta: v1.ObjectMeta{
Labels: rsPodLabels,
},
- Spec: api.PodSpec{
+ Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &zero,
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: imageName,
Image: image,
@@ -60,14 +60,14 @@ func newRS(rsName string, replicas int32, rsPodLabels map[string]string, imageNa
}
}
-func newPodQuota(name, number string) *api.ResourceQuota {
- return &api.ResourceQuota{
- ObjectMeta: api.ObjectMeta{
+func newPodQuota(name, number string) *v1.ResourceQuota {
+ return &v1.ResourceQuota{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
- Spec: api.ResourceQuotaSpec{
- Hard: api.ResourceList{
- api.ResourcePods: resource.MustParse(number),
+ Spec: v1.ResourceQuotaSpec{
+ Hard: v1.ResourceList{
+ v1.ResourcePods: resource.MustParse(number),
},
},
}
@@ -103,24 +103,24 @@ func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image strin
// in contrib/for-demos/serve_hostname
By(fmt.Sprintf("Creating ReplicaSet %s", name))
rs, err := f.ClientSet.Extensions().ReplicaSets(f.Namespace.Name).Create(&extensions.ReplicaSet{
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
Spec: extensions.ReplicaSetSpec{
- Replicas: replicas,
+ Replicas: func(i int32) *int32 { return &i }(replicas),
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{
"name": name,
}},
- Template: api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
+ Template: v1.PodTemplateSpec{
+ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"name": name},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: name,
Image: image,
- Ports: []api.ContainerPort{{ContainerPort: 9376}},
+ Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},
},
@@ -131,7 +131,7 @@ func ReplicaSetServeImageOrFail(f *framework.Framework, test string, image strin
// Cleanup the ReplicaSet when we are done.
defer func() {
// Resize the ReplicaSet to zero to get rid of pods.
- if err := framework.DeleteReplicaSet(f.ClientSet, f.Namespace.Name, rs.Name); err != nil {
+ if err := framework.DeleteReplicaSet(f.ClientSet, f.InternalClientset, f.Namespace.Name, rs.Name); err != nil {
framework.Logf("Failed to cleanup ReplicaSet %v: %v.", rs.Name, err)
}
}()
@@ -184,7 +184,7 @@ func rsConditionCheck(f *framework.Framework) {
return false, err
}
quantity := resource.MustParse("2")
- podQuota := quota.Status.Hard[api.ResourcePods]
+ podQuota := quota.Status.Hard[v1.ResourcePods]
return (&podQuota).Cmp(quantity) == 0, nil
})
if err == wait.ErrWaitTimeout {
@@ -222,7 +222,8 @@ func rsConditionCheck(f *framework.Framework) {
By(fmt.Sprintf("Scaling down replica set %q to satisfy pod quota", name))
rs, err = framework.UpdateReplicaSetWithRetries(c, namespace, name, func(update *extensions.ReplicaSet) {
- update.Spec.Replicas = 2
+ x := int32(2)
+ update.Spec.Replicas = &x
})
Expect(err).NotTo(HaveOccurred())
diff --git a/test/e2e/rescheduler.go b/test/e2e/rescheduler.go
index 962fba7888e..c1f3cde243a 100644
--- a/test/e2e/rescheduler.go
+++ b/test/e2e/rescheduler.go
@@ -21,6 +21,7 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/test/e2e/framework"
testutils "k8s.io/kubernetes/test/utils"
@@ -42,28 +43,28 @@ var _ = framework.KubeDescribe("Rescheduler [Serial]", func() {
nodeCount := len(nodes.Items)
Expect(nodeCount).NotTo(BeZero())
- cpu := nodes.Items[0].Status.Capacity[api.ResourceCPU]
+ cpu := nodes.Items[0].Status.Capacity[v1.ResourceCPU]
totalMillicores = int((&cpu).MilliValue()) * nodeCount
})
It("should ensure that critical pod is scheduled in case there is no resources available", func() {
By("reserving all available cpu")
err := reserveAllCpu(f, "reserve-all-cpu", totalMillicores)
- defer framework.DeleteRCAndPods(f.ClientSet, ns, "reserve-all-cpu")
+ defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, "reserve-all-cpu")
framework.ExpectNoError(err)
By("creating a new instance of Dashboard and waiting for Dashboard to be scheduled")
label := labels.SelectorFromSet(labels.Set(map[string]string{"k8s-app": "kubernetes-dashboard"}))
- listOpts := api.ListOptions{LabelSelector: label}
+ listOpts := v1.ListOptions{LabelSelector: label.String()}
deployments, err := f.ClientSet.Extensions().Deployments(api.NamespaceSystem).List(listOpts)
framework.ExpectNoError(err)
Expect(len(deployments.Items)).Should(Equal(1))
deployment := deployments.Items[0]
- replicas := uint(deployment.Spec.Replicas)
+ replicas := uint(*(deployment.Spec.Replicas))
- err = framework.ScaleDeployment(f.ClientSet, api.NamespaceSystem, deployment.Name, replicas+1, true)
- defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, api.NamespaceSystem, deployment.Name, replicas, true))
+ err = framework.ScaleDeployment(f.ClientSet, f.InternalClientset, api.NamespaceSystem, deployment.Name, replicas+1, true)
+ defer framework.ExpectNoError(framework.ScaleDeployment(f.ClientSet, f.InternalClientset, api.NamespaceSystem, deployment.Name, replicas, true))
framework.ExpectNoError(err)
})
@@ -74,7 +75,7 @@ func reserveAllCpu(f *framework.Framework, id string, millicores int) error {
replicas := millicores / 100
ReserveCpu(f, id, 1, 100)
- framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.Namespace.Name, id, uint(replicas), false))
+ framework.ExpectNoError(framework.ScaleRC(f.ClientSet, f.InternalClientset, f.Namespace.Name, id, uint(replicas), false))
for start := time.Now(); time.Since(start) < timeout; time.Sleep(10 * time.Second) {
pods, err := framework.GetPodsInNamespace(f.ClientSet, f.Namespace.Name, framework.ImagePullerLabels)
@@ -100,9 +101,9 @@ func reserveAllCpu(f *framework.Framework, id string, millicores int) error {
return fmt.Errorf("Pod name %s: Gave up waiting %v for %d pods to come up", id, timeout, replicas)
}
-func podRunningOrUnschedulable(pod *api.Pod) bool {
- _, cond := api.GetPodCondition(&pod.Status, api.PodScheduled)
- if cond != nil && cond.Status == api.ConditionFalse && cond.Reason == "Unschedulable" {
+func podRunningOrUnschedulable(pod *v1.Pod) bool {
+ _, cond := v1.GetPodCondition(&pod.Status, v1.PodScheduled)
+ if cond != nil && cond.Status == v1.ConditionFalse && cond.Reason == "Unschedulable" {
return true
}
running, _ := testutils.PodRunningReady(pod)
diff --git a/test/e2e/resize_nodes.go b/test/e2e/resize_nodes.go
index 43d0bfb7abd..65e54c9d9ef 100644
--- a/test/e2e/resize_nodes.go
+++ b/test/e2e/resize_nodes.go
@@ -25,8 +25,9 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/test/e2e/framework"
@@ -136,17 +137,17 @@ func WaitForGroupSize(group string, size int32) error {
return fmt.Errorf("timeout waiting %v for node instance group size to be %d", timeout, size)
}
-func svcByName(name string, port int) *api.Service {
- return &api.Service{
- ObjectMeta: api.ObjectMeta{
+func svcByName(name string, port int) *v1.Service {
+ return &v1.Service{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
- Spec: api.ServiceSpec{
- Type: api.ServiceTypeNodePort,
+ Spec: v1.ServiceSpec{
+ Type: v1.ServiceTypeNodePort,
Selector: map[string]string{
"name": name,
},
- Ports: []api.ServicePort{{
+ Ports: []v1.ServicePort{{
Port: int32(port),
TargetPort: intstr.FromInt(port),
}},
@@ -159,18 +160,18 @@ func newSVCByName(c clientset.Interface, ns, name string) error {
return err
}
-func rcByNamePort(name string, replicas int32, image string, port int, protocol api.Protocol,
- labels map[string]string, gracePeriod *int64) *api.ReplicationController {
+func rcByNamePort(name string, replicas int32, image string, port int, protocol v1.Protocol,
+ labels map[string]string, gracePeriod *int64) *v1.ReplicationController {
- return rcByNameContainer(name, replicas, image, labels, api.Container{
+ return rcByNameContainer(name, replicas, image, labels, v1.Container{
Name: name,
Image: image,
- Ports: []api.ContainerPort{{ContainerPort: int32(port), Protocol: protocol}},
+ Ports: []v1.ContainerPort{{ContainerPort: int32(port), Protocol: protocol}},
}, gracePeriod)
}
-func rcByNameContainer(name string, replicas int32, image string, labels map[string]string, c api.Container,
- gracePeriod *int64) *api.ReplicationController {
+func rcByNameContainer(name string, replicas int32, image string, labels map[string]string, c v1.Container,
+ gracePeriod *int64) *v1.ReplicationController {
zeroGracePeriod := int64(0)
@@ -179,25 +180,25 @@ func rcByNameContainer(name string, replicas int32, image string, labels map[str
if gracePeriod == nil {
gracePeriod = &zeroGracePeriod
}
- return &api.ReplicationController{
+ return &v1.ReplicationController{
TypeMeta: unversioned.TypeMeta{
Kind: "ReplicationController",
- APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String(),
+ APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String(),
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
- Spec: api.ReplicationControllerSpec{
- Replicas: replicas,
+ Spec: v1.ReplicationControllerSpec{
+ Replicas: func(i int32) *int32 { return &i }(replicas),
Selector: map[string]string{
"name": name,
},
- Template: &api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
+ Template: &v1.PodTemplateSpec{
+ ObjectMeta: v1.ObjectMeta{
Labels: labels,
},
- Spec: api.PodSpec{
- Containers: []api.Container{c},
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{c},
TerminationGracePeriodSeconds: gracePeriod,
},
},
@@ -206,10 +207,10 @@ func rcByNameContainer(name string, replicas int32, image string, labels map[str
}
// newRCByName creates a replication controller with a selector by name of name.
-func newRCByName(c clientset.Interface, ns, name string, replicas int32, gracePeriod *int64) (*api.ReplicationController, error) {
+func newRCByName(c clientset.Interface, ns, name string, replicas int32, gracePeriod *int64) (*v1.ReplicationController, error) {
By(fmt.Sprintf("creating replication controller %s", name))
return c.Core().ReplicationControllers(ns).Create(rcByNamePort(
- name, replicas, serveHostnameImage, 9376, api.ProtocolTCP, map[string]string{}, gracePeriod))
+ name, replicas, serveHostnameImage, 9376, v1.ProtocolTCP, map[string]string{}, gracePeriod))
}
func resizeRC(c clientset.Interface, ns, name string, replicas int32) error {
@@ -217,7 +218,7 @@ func resizeRC(c clientset.Interface, ns, name string, replicas int32) error {
if err != nil {
return err
}
- rc.Spec.Replicas = replicas
+ *(rc.Spec.Replicas) = replicas
_, err = c.Core().ReplicationControllers(rc.Namespace).Update(rc)
return err
}
diff --git a/test/e2e/resource_quota.go b/test/e2e/resource_quota.go
index ccfe7412372..2996e55b5fd 100644
--- a/test/e2e/resource_quota.go
+++ b/test/e2e/resource_quota.go
@@ -20,9 +20,9 @@ import (
"fmt"
"time"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
@@ -47,8 +47,8 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated")
- usedResources := api.ResourceList{}
- usedResources[api.ResourceQuotas] = resource.MustParse("1")
+ usedResources := v1.ResourceList{}
+ usedResources[v1.ResourceQuotas] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
@@ -61,20 +61,20 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated")
- usedResources := api.ResourceList{}
- usedResources[api.ResourceQuotas] = resource.MustParse("1")
+ usedResources := v1.ResourceList{}
+ usedResources[v1.ResourceQuotas] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a Service")
- service := newTestServiceForQuota("test-service", api.ServiceTypeClusterIP)
+ service := newTestServiceForQuota("test-service", v1.ServiceTypeClusterIP)
service, err = f.ClientSet.Core().Services(f.Namespace.Name).Create(service)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures service creation")
- usedResources = api.ResourceList{}
- usedResources[api.ResourceQuotas] = resource.MustParse("1")
- usedResources[api.ResourceServices] = resource.MustParse("1")
+ usedResources = v1.ResourceList{}
+ usedResources[v1.ResourceQuotas] = resource.MustParse("1")
+ usedResources[v1.ResourceServices] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
@@ -83,14 +83,14 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage")
- usedResources[api.ResourceServices] = resource.MustParse("0")
+ usedResources[v1.ResourceServices] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should create a ResourceQuota and capture the life of a secret.", func() {
By("Discovering how many secrets are in namespace by default")
- secrets, err := f.ClientSet.Core().Secrets(f.Namespace.Name).List(api.ListOptions{})
+ secrets, err := f.ClientSet.Core().Secrets(f.Namespace.Name).List(v1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
defaultSecrets := fmt.Sprintf("%d", len(secrets.Items))
hardSecrets := fmt.Sprintf("%d", len(secrets.Items)+1)
@@ -98,14 +98,14 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
By("Creating a ResourceQuota")
quotaName := "test-quota"
resourceQuota := newTestResourceQuota(quotaName)
- resourceQuota.Spec.Hard[api.ResourceSecrets] = resource.MustParse(hardSecrets)
+ resourceQuota.Spec.Hard[v1.ResourceSecrets] = resource.MustParse(hardSecrets)
resourceQuota, err = createResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuota)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated")
- usedResources := api.ResourceList{}
- usedResources[api.ResourceQuotas] = resource.MustParse("1")
- usedResources[api.ResourceSecrets] = resource.MustParse(defaultSecrets)
+ usedResources := v1.ResourceList{}
+ usedResources[v1.ResourceQuotas] = resource.MustParse("1")
+ usedResources[v1.ResourceSecrets] = resource.MustParse(defaultSecrets)
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
@@ -115,8 +115,8 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures secret creation")
- usedResources = api.ResourceList{}
- usedResources[api.ResourceSecrets] = resource.MustParse(hardSecrets)
+ usedResources = v1.ResourceList{}
+ usedResources[v1.ResourceSecrets] = resource.MustParse(hardSecrets)
// we expect there to be two secrets because each namespace will receive
// a service account token secret by default
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
@@ -127,7 +127,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage")
- usedResources[api.ResourceSecrets] = resource.MustParse(defaultSecrets)
+ usedResources[v1.ResourceSecrets] = resource.MustParse(defaultSecrets)
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
@@ -140,42 +140,42 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated")
- usedResources := api.ResourceList{}
- usedResources[api.ResourceQuotas] = resource.MustParse("1")
+ usedResources := v1.ResourceList{}
+ usedResources[v1.ResourceQuotas] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a Pod that fits quota")
podName := "test-pod"
- requests := api.ResourceList{}
- requests[api.ResourceCPU] = resource.MustParse("500m")
- requests[api.ResourceMemory] = resource.MustParse("252Mi")
- pod := newTestPodForQuota(f, podName, requests, api.ResourceList{})
+ requests := v1.ResourceList{}
+ requests[v1.ResourceCPU] = resource.MustParse("500m")
+ requests[v1.ResourceMemory] = resource.MustParse("252Mi")
+ pod := newTestPodForQuota(f, podName, requests, v1.ResourceList{})
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
podToUpdate := pod
By("Ensuring ResourceQuota status captures the pod usage")
- usedResources[api.ResourceQuotas] = resource.MustParse("1")
- usedResources[api.ResourcePods] = resource.MustParse("1")
- usedResources[api.ResourceCPU] = requests[api.ResourceCPU]
- usedResources[api.ResourceMemory] = requests[api.ResourceMemory]
+ usedResources[v1.ResourceQuotas] = resource.MustParse("1")
+ usedResources[v1.ResourcePods] = resource.MustParse("1")
+ usedResources[v1.ResourceCPU] = requests[v1.ResourceCPU]
+ usedResources[v1.ResourceMemory] = requests[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Not allowing a pod to be created that exceeds remaining quota")
- requests = api.ResourceList{}
- requests[api.ResourceCPU] = resource.MustParse("600m")
- requests[api.ResourceMemory] = resource.MustParse("100Mi")
- pod = newTestPodForQuota(f, "fail-pod", requests, api.ResourceList{})
+ requests = v1.ResourceList{}
+ requests[v1.ResourceCPU] = resource.MustParse("600m")
+ requests[v1.ResourceMemory] = resource.MustParse("100Mi")
+ pod = newTestPodForQuota(f, "fail-pod", requests, v1.ResourceList{})
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).To(HaveOccurred())
By("Ensuring a pod cannot update its resource requirements")
// a pod cannot dynamically update its resource requirements.
- requests = api.ResourceList{}
- requests[api.ResourceCPU] = resource.MustParse("100m")
- requests[api.ResourceMemory] = resource.MustParse("100Mi")
+ requests = v1.ResourceList{}
+ requests[v1.ResourceCPU] = resource.MustParse("100m")
+ requests[v1.ResourceMemory] = resource.MustParse("100Mi")
podToUpdate.Spec.Containers[0].Resources.Requests = requests
_, err = f.ClientSet.Core().Pods(f.Namespace.Name).Update(podToUpdate)
Expect(err).To(HaveOccurred())
@@ -185,14 +185,14 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
- err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0))
+ err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, v1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
- usedResources[api.ResourceQuotas] = resource.MustParse("1")
- usedResources[api.ResourcePods] = resource.MustParse("0")
- usedResources[api.ResourceCPU] = resource.MustParse("0")
- usedResources[api.ResourceMemory] = resource.MustParse("0")
+ usedResources[v1.ResourceQuotas] = resource.MustParse("1")
+ usedResources[v1.ResourcePods] = resource.MustParse("0")
+ usedResources[v1.ResourceCPU] = resource.MustParse("0")
+ usedResources[v1.ResourceMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
@@ -205,8 +205,8 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated")
- usedResources := api.ResourceList{}
- usedResources[api.ResourceQuotas] = resource.MustParse("1")
+ usedResources := v1.ResourceList{}
+ usedResources[v1.ResourceQuotas] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
@@ -216,9 +216,9 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures configMap creation")
- usedResources = api.ResourceList{}
- usedResources[api.ResourceQuotas] = resource.MustParse("1")
- usedResources[api.ResourceConfigMaps] = resource.MustParse("1")
+ usedResources = v1.ResourceList{}
+ usedResources[v1.ResourceQuotas] = resource.MustParse("1")
+ usedResources[v1.ResourceConfigMaps] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
@@ -227,7 +227,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage")
- usedResources[api.ResourceConfigMaps] = resource.MustParse("0")
+ usedResources[v1.ResourceConfigMaps] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
@@ -240,9 +240,9 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated")
- usedResources := api.ResourceList{}
- usedResources[api.ResourceQuotas] = resource.MustParse("1")
- usedResources[api.ResourceReplicationControllers] = resource.MustParse("0")
+ usedResources := v1.ResourceList{}
+ usedResources[v1.ResourceQuotas] = resource.MustParse("1")
+ usedResources[v1.ResourceReplicationControllers] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
@@ -252,8 +252,8 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures replication controller creation")
- usedResources = api.ResourceList{}
- usedResources[api.ResourceReplicationControllers] = resource.MustParse("1")
+ usedResources = v1.ResourceList{}
+ usedResources[v1.ResourceReplicationControllers] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
@@ -262,7 +262,7 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage")
- usedResources[api.ResourceReplicationControllers] = resource.MustParse("0")
+ usedResources[v1.ResourceReplicationControllers] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
@@ -275,10 +275,10 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status is calculated")
- usedResources := api.ResourceList{}
- usedResources[api.ResourceQuotas] = resource.MustParse("1")
- usedResources[api.ResourcePersistentVolumeClaims] = resource.MustParse("0")
- usedResources[api.ResourceRequestsStorage] = resource.MustParse("0")
+ usedResources := v1.ResourceList{}
+ usedResources[v1.ResourceQuotas] = resource.MustParse("1")
+ usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
+ usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
@@ -288,9 +288,9 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status captures persistent volume claimcreation")
- usedResources = api.ResourceList{}
- usedResources[api.ResourcePersistentVolumeClaims] = resource.MustParse("1")
- usedResources[api.ResourceRequestsStorage] = resource.MustParse("1Gi")
+ usedResources = v1.ResourceList{}
+ usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("1")
+ usedResources[v1.ResourceRequestsStorage] = resource.MustParse("1Gi")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
@@ -299,8 +299,8 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released usage")
- usedResources[api.ResourcePersistentVolumeClaims] = resource.MustParse("0")
- usedResources[api.ResourceRequestsStorage] = resource.MustParse("0")
+ usedResources[v1.ResourcePersistentVolumeClaims] = resource.MustParse("0")
+ usedResources[v1.ResourceRequestsStorage] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, quotaName, usedResources)
Expect(err).NotTo(HaveOccurred())
})
@@ -308,18 +308,18 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
It("should verify ResourceQuota with terminating scopes.", func() {
By("Creating a ResourceQuota with terminating scope")
quotaTerminatingName := "quota-terminating"
- resourceQuotaTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaTerminatingName, api.ResourceQuotaScopeTerminating))
+ resourceQuotaTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaTerminatingName, v1.ResourceQuotaScopeTerminating))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
- usedResources := api.ResourceList{}
- usedResources[api.ResourcePods] = resource.MustParse("0")
+ usedResources := v1.ResourceList{}
+ usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a ResourceQuota with not terminating scope")
quotaNotTerminatingName := "quota-not-terminating"
- resourceQuotaNotTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaNotTerminatingName, api.ResourceQuotaScopeNotTerminating))
+ resourceQuotaNotTerminating, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope(quotaNotTerminatingName, v1.ResourceQuotaScopeNotTerminating))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
@@ -328,44 +328,44 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
By("Creating a long running pod")
podName := "test-pod"
- requests := api.ResourceList{}
- requests[api.ResourceCPU] = resource.MustParse("500m")
- requests[api.ResourceMemory] = resource.MustParse("200Mi")
- limits := api.ResourceList{}
- limits[api.ResourceCPU] = resource.MustParse("1")
- limits[api.ResourceMemory] = resource.MustParse("400Mi")
+ requests := v1.ResourceList{}
+ requests[v1.ResourceCPU] = resource.MustParse("500m")
+ requests[v1.ResourceMemory] = resource.MustParse("200Mi")
+ limits := v1.ResourceList{}
+ limits[v1.ResourceCPU] = resource.MustParse("1")
+ limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod := newTestPodForQuota(f, podName, requests, limits)
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not terminating scope captures the pod usage")
- usedResources[api.ResourcePods] = resource.MustParse("1")
- usedResources[api.ResourceRequestsCPU] = requests[api.ResourceCPU]
- usedResources[api.ResourceRequestsMemory] = requests[api.ResourceMemory]
- usedResources[api.ResourceLimitsCPU] = limits[api.ResourceCPU]
- usedResources[api.ResourceLimitsMemory] = limits[api.ResourceMemory]
+ usedResources[v1.ResourcePods] = resource.MustParse("1")
+ usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
+ usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
+ usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
+ usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with terminating scope ignored the pod usage")
- usedResources[api.ResourcePods] = resource.MustParse("0")
- usedResources[api.ResourceRequestsCPU] = resource.MustParse("0")
- usedResources[api.ResourceRequestsMemory] = resource.MustParse("0")
- usedResources[api.ResourceLimitsCPU] = resource.MustParse("0")
- usedResources[api.ResourceLimitsMemory] = resource.MustParse("0")
+ usedResources[v1.ResourcePods] = resource.MustParse("0")
+ usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
+ usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
+ usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
+ usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
- err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0))
+ err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, v1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
- usedResources[api.ResourcePods] = resource.MustParse("0")
- usedResources[api.ResourceRequestsCPU] = resource.MustParse("0")
- usedResources[api.ResourceRequestsMemory] = resource.MustParse("0")
- usedResources[api.ResourceLimitsCPU] = resource.MustParse("0")
- usedResources[api.ResourceLimitsMemory] = resource.MustParse("0")
+ usedResources[v1.ResourcePods] = resource.MustParse("0")
+ usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
+ usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
+ usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
+ usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
@@ -378,50 +378,50 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with terminating scope captures the pod usage")
- usedResources[api.ResourcePods] = resource.MustParse("1")
- usedResources[api.ResourceRequestsCPU] = requests[api.ResourceCPU]
- usedResources[api.ResourceRequestsMemory] = requests[api.ResourceMemory]
- usedResources[api.ResourceLimitsCPU] = limits[api.ResourceCPU]
- usedResources[api.ResourceLimitsMemory] = limits[api.ResourceMemory]
+ usedResources[v1.ResourcePods] = resource.MustParse("1")
+ usedResources[v1.ResourceRequestsCPU] = requests[v1.ResourceCPU]
+ usedResources[v1.ResourceRequestsMemory] = requests[v1.ResourceMemory]
+ usedResources[v1.ResourceLimitsCPU] = limits[v1.ResourceCPU]
+ usedResources[v1.ResourceLimitsMemory] = limits[v1.ResourceMemory]
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not terminating scope ignored the pod usage")
- usedResources[api.ResourcePods] = resource.MustParse("0")
- usedResources[api.ResourceRequestsCPU] = resource.MustParse("0")
- usedResources[api.ResourceRequestsMemory] = resource.MustParse("0")
- usedResources[api.ResourceLimitsCPU] = resource.MustParse("0")
- usedResources[api.ResourceLimitsMemory] = resource.MustParse("0")
+ usedResources[v1.ResourcePods] = resource.MustParse("0")
+ usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
+ usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
+ usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
+ usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
- err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, api.NewDeleteOptions(0))
+ err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(podName, v1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
- usedResources[api.ResourcePods] = resource.MustParse("0")
- usedResources[api.ResourceRequestsCPU] = resource.MustParse("0")
- usedResources[api.ResourceRequestsMemory] = resource.MustParse("0")
- usedResources[api.ResourceLimitsCPU] = resource.MustParse("0")
- usedResources[api.ResourceLimitsMemory] = resource.MustParse("0")
+ usedResources[v1.ResourcePods] = resource.MustParse("0")
+ usedResources[v1.ResourceRequestsCPU] = resource.MustParse("0")
+ usedResources[v1.ResourceRequestsMemory] = resource.MustParse("0")
+ usedResources[v1.ResourceLimitsCPU] = resource.MustParse("0")
+ usedResources[v1.ResourceLimitsMemory] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaTerminating.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
It("should verify ResourceQuota with best effort scope.", func() {
By("Creating a ResourceQuota with best effort scope")
- resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-besteffort", api.ResourceQuotaScopeBestEffort))
+ resourceQuotaBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-besteffort", v1.ResourceQuotaScopeBestEffort))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
- usedResources := api.ResourceList{}
- usedResources[api.ResourcePods] = resource.MustParse("0")
+ usedResources := v1.ResourceList{}
+ usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a ResourceQuota with not best effort scope")
- resourceQuotaNotBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-not-besteffort", api.ResourceQuotaScopeNotBestEffort))
+ resourceQuotaNotBestEffort, err := createResourceQuota(f.ClientSet, f.Namespace.Name, newTestResourceQuotaWithScope("quota-not-besteffort", v1.ResourceQuotaScopeNotBestEffort))
Expect(err).NotTo(HaveOccurred())
By("Ensuring ResourceQuota status is calculated")
@@ -429,111 +429,111 @@ var _ = framework.KubeDescribe("ResourceQuota", func() {
Expect(err).NotTo(HaveOccurred())
By("Creating a best-effort pod")
- pod := newTestPodForQuota(f, podName, api.ResourceList{}, api.ResourceList{})
+ pod := newTestPodForQuota(f, podName, v1.ResourceList{}, v1.ResourceList{})
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with best effort scope captures the pod usage")
- usedResources[api.ResourcePods] = resource.MustParse("1")
+ usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not best effort ignored the pod usage")
- usedResources[api.ResourcePods] = resource.MustParse("0")
+ usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
- err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0))
+ err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
- usedResources[api.ResourcePods] = resource.MustParse("0")
+ usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Creating a not best-effort pod")
- requests := api.ResourceList{}
- requests[api.ResourceCPU] = resource.MustParse("500m")
- requests[api.ResourceMemory] = resource.MustParse("200Mi")
- limits := api.ResourceList{}
- limits[api.ResourceCPU] = resource.MustParse("1")
- limits[api.ResourceMemory] = resource.MustParse("400Mi")
+ requests := v1.ResourceList{}
+ requests[v1.ResourceCPU] = resource.MustParse("500m")
+ requests[v1.ResourceMemory] = resource.MustParse("200Mi")
+ limits := v1.ResourceList{}
+ limits[v1.ResourceCPU] = resource.MustParse("1")
+ limits[v1.ResourceMemory] = resource.MustParse("400Mi")
pod = newTestPodForQuota(f, "burstable-pod", requests, limits)
pod, err = f.ClientSet.Core().Pods(f.Namespace.Name).Create(pod)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with not best effort scope captures the pod usage")
- usedResources[api.ResourcePods] = resource.MustParse("1")
+ usedResources[v1.ResourcePods] = resource.MustParse("1")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota with best effort scope ignored the pod usage")
- usedResources[api.ResourcePods] = resource.MustParse("0")
+ usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
By("Deleting the pod")
- err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0))
+ err = f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0))
Expect(err).NotTo(HaveOccurred())
By("Ensuring resource quota status released the pod usage")
- usedResources[api.ResourcePods] = resource.MustParse("0")
+ usedResources[v1.ResourcePods] = resource.MustParse("0")
err = waitForResourceQuota(f.ClientSet, f.Namespace.Name, resourceQuotaNotBestEffort.Name, usedResources)
Expect(err).NotTo(HaveOccurred())
})
})
// newTestResourceQuotaWithScope returns a quota that enforces default constraints for testing with scopes
-func newTestResourceQuotaWithScope(name string, scope api.ResourceQuotaScope) *api.ResourceQuota {
- hard := api.ResourceList{}
- hard[api.ResourcePods] = resource.MustParse("5")
+func newTestResourceQuotaWithScope(name string, scope v1.ResourceQuotaScope) *v1.ResourceQuota {
+ hard := v1.ResourceList{}
+ hard[v1.ResourcePods] = resource.MustParse("5")
switch scope {
- case api.ResourceQuotaScopeTerminating, api.ResourceQuotaScopeNotTerminating:
- hard[api.ResourceRequestsCPU] = resource.MustParse("1")
- hard[api.ResourceRequestsMemory] = resource.MustParse("500Mi")
- hard[api.ResourceLimitsCPU] = resource.MustParse("2")
- hard[api.ResourceLimitsMemory] = resource.MustParse("1Gi")
+ case v1.ResourceQuotaScopeTerminating, v1.ResourceQuotaScopeNotTerminating:
+ hard[v1.ResourceRequestsCPU] = resource.MustParse("1")
+ hard[v1.ResourceRequestsMemory] = resource.MustParse("500Mi")
+ hard[v1.ResourceLimitsCPU] = resource.MustParse("2")
+ hard[v1.ResourceLimitsMemory] = resource.MustParse("1Gi")
}
- return &api.ResourceQuota{
- ObjectMeta: api.ObjectMeta{Name: name},
- Spec: api.ResourceQuotaSpec{Hard: hard, Scopes: []api.ResourceQuotaScope{scope}},
+ return &v1.ResourceQuota{
+ ObjectMeta: v1.ObjectMeta{Name: name},
+ Spec: v1.ResourceQuotaSpec{Hard: hard, Scopes: []v1.ResourceQuotaScope{scope}},
}
}
// newTestResourceQuota returns a quota that enforces default constraints for testing
-func newTestResourceQuota(name string) *api.ResourceQuota {
- hard := api.ResourceList{}
- hard[api.ResourcePods] = resource.MustParse("5")
- hard[api.ResourceServices] = resource.MustParse("10")
- hard[api.ResourceServicesNodePorts] = resource.MustParse("1")
- hard[api.ResourceServicesLoadBalancers] = resource.MustParse("1")
- hard[api.ResourceReplicationControllers] = resource.MustParse("10")
- hard[api.ResourceQuotas] = resource.MustParse("1")
- hard[api.ResourceCPU] = resource.MustParse("1")
- hard[api.ResourceMemory] = resource.MustParse("500Mi")
- hard[api.ResourceConfigMaps] = resource.MustParse("2")
- hard[api.ResourceSecrets] = resource.MustParse("10")
- hard[api.ResourcePersistentVolumeClaims] = resource.MustParse("10")
- hard[api.ResourceRequestsStorage] = resource.MustParse("10Gi")
- return &api.ResourceQuota{
- ObjectMeta: api.ObjectMeta{Name: name},
- Spec: api.ResourceQuotaSpec{Hard: hard},
+func newTestResourceQuota(name string) *v1.ResourceQuota {
+ hard := v1.ResourceList{}
+ hard[v1.ResourcePods] = resource.MustParse("5")
+ hard[v1.ResourceServices] = resource.MustParse("10")
+ hard[v1.ResourceServicesNodePorts] = resource.MustParse("1")
+ hard[v1.ResourceServicesLoadBalancers] = resource.MustParse("1")
+ hard[v1.ResourceReplicationControllers] = resource.MustParse("10")
+ hard[v1.ResourceQuotas] = resource.MustParse("1")
+ hard[v1.ResourceCPU] = resource.MustParse("1")
+ hard[v1.ResourceMemory] = resource.MustParse("500Mi")
+ hard[v1.ResourceConfigMaps] = resource.MustParse("2")
+ hard[v1.ResourceSecrets] = resource.MustParse("10")
+ hard[v1.ResourcePersistentVolumeClaims] = resource.MustParse("10")
+ hard[v1.ResourceRequestsStorage] = resource.MustParse("10Gi")
+ return &v1.ResourceQuota{
+ ObjectMeta: v1.ObjectMeta{Name: name},
+ Spec: v1.ResourceQuotaSpec{Hard: hard},
}
}
// newTestPodForQuota returns a pod that has the specified requests and limits
-func newTestPodForQuota(f *framework.Framework, name string, requests api.ResourceList, limits api.ResourceList) *api.Pod {
- return &api.Pod{
- ObjectMeta: api.ObjectMeta{
+func newTestPodForQuota(f *framework.Framework, name string, requests v1.ResourceList, limits v1.ResourceList) *v1.Pod {
+ return &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "pause",
Image: framework.GetPauseImageName(f.ClientSet),
- Resources: api.ResourceRequirements{
+ Resources: v1.ResourceRequirements{
Requests: requests,
Limits: limits,
},
@@ -544,20 +544,20 @@ func newTestPodForQuota(f *framework.Framework, name string, requests api.Resour
}
// newTestPersistentVolumeClaimForQuota returns a simple persistent volume claim
-func newTestPersistentVolumeClaimForQuota(name string) *api.PersistentVolumeClaim {
- return &api.PersistentVolumeClaim{
- ObjectMeta: api.ObjectMeta{
+func newTestPersistentVolumeClaimForQuota(name string) *v1.PersistentVolumeClaim {
+ return &v1.PersistentVolumeClaim{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
- Spec: api.PersistentVolumeClaimSpec{
- AccessModes: []api.PersistentVolumeAccessMode{
- api.ReadWriteOnce,
- api.ReadOnlyMany,
- api.ReadWriteMany,
+ Spec: v1.PersistentVolumeClaimSpec{
+ AccessModes: []v1.PersistentVolumeAccessMode{
+ v1.ReadWriteOnce,
+ v1.ReadOnlyMany,
+ v1.ReadWriteMany,
},
- Resources: api.ResourceRequirements{
- Requests: api.ResourceList{
- api.ResourceName(api.ResourceStorage): resource.MustParse("1Gi"),
+ Resources: v1.ResourceRequirements{
+ Requests: v1.ResourceList{
+ v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
},
},
},
@@ -565,22 +565,22 @@ func newTestPersistentVolumeClaimForQuota(name string) *api.PersistentVolumeClai
}
// newTestReplicationControllerForQuota returns a simple replication controller
-func newTestReplicationControllerForQuota(name, image string, replicas int32) *api.ReplicationController {
- return &api.ReplicationController{
- ObjectMeta: api.ObjectMeta{
+func newTestReplicationControllerForQuota(name, image string, replicas int32) *v1.ReplicationController {
+ return &v1.ReplicationController{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
- Spec: api.ReplicationControllerSpec{
- Replicas: replicas,
+ Spec: v1.ReplicationControllerSpec{
+ Replicas: func(i int32) *int32 { return &i }(replicas),
Selector: map[string]string{
"name": name,
},
- Template: &api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
+ Template: &v1.PodTemplateSpec{
+ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"name": name},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: name,
Image: image,
@@ -593,14 +593,14 @@ func newTestReplicationControllerForQuota(name, image string, replicas int32) *a
}
// newTestServiceForQuota returns a simple service
-func newTestServiceForQuota(name string, serviceType api.ServiceType) *api.Service {
- return &api.Service{
- ObjectMeta: api.ObjectMeta{
+func newTestServiceForQuota(name string, serviceType v1.ServiceType) *v1.Service {
+ return &v1.Service{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
- Spec: api.ServiceSpec{
+ Spec: v1.ServiceSpec{
Type: serviceType,
- Ports: []api.ServicePort{{
+ Ports: []v1.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}},
@@ -608,9 +608,9 @@ func newTestServiceForQuota(name string, serviceType api.ServiceType) *api.Servi
}
}
-func newTestConfigMapForQuota(name string) *api.ConfigMap {
- return &api.ConfigMap{
- ObjectMeta: api.ObjectMeta{
+func newTestConfigMapForQuota(name string) *v1.ConfigMap {
+ return &v1.ConfigMap{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
Data: map[string]string{
@@ -619,9 +619,9 @@ func newTestConfigMapForQuota(name string) *api.ConfigMap {
}
}
-func newTestSecretForQuota(name string) *api.Secret {
- return &api.Secret{
- ObjectMeta: api.ObjectMeta{
+func newTestSecretForQuota(name string) *v1.Secret {
+ return &v1.Secret{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
Data: map[string][]byte{
@@ -633,7 +633,7 @@ func newTestSecretForQuota(name string) *api.Secret {
}
// createResourceQuota in the specified namespace
-func createResourceQuota(c clientset.Interface, namespace string, resourceQuota *api.ResourceQuota) (*api.ResourceQuota, error) {
+func createResourceQuota(c clientset.Interface, namespace string, resourceQuota *v1.ResourceQuota) (*v1.ResourceQuota, error) {
return c.Core().ResourceQuotas(namespace).Create(resourceQuota)
}
@@ -643,7 +643,7 @@ func deleteResourceQuota(c clientset.Interface, namespace, name string) error {
}
// wait for resource quota status to show the expected used resources value
-func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used api.ResourceList) error {
+func waitForResourceQuota(c clientset.Interface, ns, quotaName string, used v1.ResourceList) error {
return wait.Poll(framework.Poll, resourceQuotaTimeout, func() (bool, error) {
resourceQuota, err := c.Core().ResourceQuotas(ns).Get(quotaName)
if err != nil {
diff --git a/test/e2e/restart.go b/test/e2e/restart.go
index bea33956afb..860ef7e833b 100644
--- a/test/e2e/restart.go
+++ b/test/e2e/restart.go
@@ -21,6 +21,7 @@ import (
"time"
"k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/fields"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/labels"
@@ -32,15 +33,15 @@ import (
. "github.com/onsi/gomega"
)
-func isNotRestartAlwaysMirrorPod(p *api.Pod) bool {
+func isNotRestartAlwaysMirrorPod(p *v1.Pod) bool {
if !kubepod.IsMirrorPod(p) {
return false
}
- return p.Spec.RestartPolicy != api.RestartPolicyAlways
+ return p.Spec.RestartPolicy != v1.RestartPolicyAlways
}
-func filterIrrelevantPods(pods []*api.Pod) []*api.Pod {
- var results []*api.Pod
+func filterIrrelevantPods(pods []*v1.Pod) []*v1.Pod {
+ var results []*v1.Pod
for _, p := range pods {
if isNotRestartAlwaysMirrorPod(p) {
// Mirror pods with restart policy == Never will not get
@@ -128,7 +129,7 @@ var _ = framework.KubeDescribe("Restart [Disruptive]", func() {
// returning their names if it can do so before timeout.
func waitForNPods(ps *testutils.PodStore, expect int, timeout time.Duration) ([]string, error) {
// Loop until we find expect pods or timeout is passed.
- var pods []*api.Pod
+ var pods []*v1.Pod
var errLast error
found := wait.Poll(framework.Poll, timeout, func() (bool, error) {
allPods := ps.List()
diff --git a/test/e2e/scheduler_predicates.go b/test/e2e/scheduler_predicates.go
index d0d9b7471cf..ac507c5a805 100644
--- a/test/e2e/scheduler_predicates.go
+++ b/test/e2e/scheduler_predicates.go
@@ -23,7 +23,8 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
@@ -44,12 +45,12 @@ type pausePodConfig struct {
Name string
Affinity string
Annotations, Labels, NodeSelector map[string]string
- Resources *api.ResourceRequirements
+ Resources *v1.ResourceRequirements
}
var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
var cs clientset.Interface
- var nodeList *api.NodeList
+ var nodeList *v1.NodeList
var systemPodsNo int
var totalPodCapacity int64
var RCName string
@@ -59,9 +60,9 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
AfterEach(func() {
rc, err := cs.Core().ReplicationControllers(ns).Get(RCName)
- if err == nil && rc.Spec.Replicas != 0 {
+ if err == nil && *(rc.Spec.Replicas) != 0 {
By("Cleaning up the replication controller")
- err := framework.DeleteRCAndPods(f.ClientSet, ns, RCName)
+ err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, RCName)
framework.ExpectNoError(err)
}
})
@@ -69,7 +70,7 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
BeforeEach(func() {
cs = f.ClientSet
ns = f.Namespace.Name
- nodeList = &api.NodeList{}
+ nodeList = &v1.NodeList{}
framework.WaitForAllNodesHealthy(cs, time.Minute)
masterNodes, nodeList = framework.GetMasterAndWorkerNodesOrDie(cs)
@@ -156,11 +157,11 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
}
framework.WaitForStableCluster(cs, masterNodes)
- pods, err := cs.Core().Pods(api.NamespaceAll).List(api.ListOptions{})
+ pods, err := cs.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{})
framework.ExpectNoError(err)
for _, pod := range pods.Items {
_, found := nodeToCapacityMap[pod.Spec.NodeName]
- if found && pod.Status.Phase != api.PodSucceeded && pod.Status.Phase != api.PodFailed {
+ if found && pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
framework.Logf("Pod %v requesting resource cpu=%vm on Node %v", pod.Name, getRequestedCPU(pod), pod.Spec.NodeName)
nodeToCapacityMap[pod.Spec.NodeName] -= getRequestedCPU(pod)
}
@@ -189,11 +190,11 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
*initPausePod(f, pausePodConfig{
Name: "",
Labels: map[string]string{"name": ""},
- Resources: &api.ResourceRequirements{
- Limits: api.ResourceList{
+ Resources: &v1.ResourceRequirements{
+ Limits: v1.ResourceList{
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
},
- Requests: api.ResourceList{
+ Requests: v1.ResourceList{
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
},
},
@@ -203,8 +204,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
createPausePod(f, pausePodConfig{
Name: podName,
Labels: map[string]string{"name": "additional"},
- Resources: &api.ResourceRequirements{
- Limits: api.ResourceList{
+ Resources: &v1.ResourceRequirements{
+ Limits: v1.ResourceList{
"cpu": *resource.NewMilliQuantity(milliCpuPerPod, "DecimalSI"),
},
},
@@ -511,8 +512,8 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
// cannot be scheduled onto it.
By("Launching two pods on two distinct nodes to get two node names")
CreateHostPortPods(f, "host-port", 2, true)
- defer framework.DeleteRCAndPods(f.ClientSet, ns, "host-port")
- podList, err := cs.Core().Pods(ns).List(api.ListOptions{})
+ defer framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, ns, "host-port")
+ podList, err := cs.Core().Pods(ns).List(v1.ListOptions{})
ExpectNoError(err)
Expect(len(podList.Items)).To(Equal(2))
nodeNames := []string{podList.Items[0].Spec.NodeName, podList.Items[1].Spec.NodeName}
@@ -671,10 +672,10 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
nodeName := getNodeThatCanRunPodWithoutToleration(f)
By("Trying to apply a random taint on the found node.")
- testTaint := api.Taint{
+ testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
- Effect: api.TaintEffectNoSchedule,
+ Effect: v1.TaintEffectNoSchedule,
}
framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, testTaint)
@@ -723,10 +724,10 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
nodeName := getNodeThatCanRunPodWithoutToleration(f)
By("Trying to apply a random taint on the found node.")
- testTaint := api.Taint{
+ testTaint := v1.Taint{
Key: fmt.Sprintf("kubernetes.io/e2e-taint-key-%s", string(uuid.NewUUID())),
Value: "testing-taint-value",
- Effect: api.TaintEffectNoSchedule,
+ Effect: v1.TaintEffectNoSchedule,
}
framework.AddOrUpdateTaintOnNode(cs, nodeName, testTaint)
framework.ExpectNodeHasTaint(cs, nodeName, testTaint)
@@ -757,25 +758,25 @@ var _ = framework.KubeDescribe("SchedulerPredicates [Serial]", func() {
})
})
-func initPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod {
+func initPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
if conf.Affinity != "" {
if conf.Annotations == nil {
conf.Annotations = map[string]string{
- api.AffinityAnnotationKey: conf.Affinity,
+ v1.AffinityAnnotationKey: conf.Affinity,
}
} else {
- conf.Annotations[api.AffinityAnnotationKey] = conf.Affinity
+ conf.Annotations[v1.AffinityAnnotationKey] = conf.Affinity
}
}
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: conf.Name,
Labels: conf.Labels,
Annotations: conf.Annotations,
},
- Spec: api.PodSpec{
+ Spec: v1.PodSpec{
NodeSelector: conf.NodeSelector,
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: podName,
Image: framework.GetPauseImageName(f.ClientSet),
@@ -789,13 +790,13 @@ func initPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod {
return pod
}
-func createPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod {
+func createPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Create(initPausePod(f, conf))
framework.ExpectNoError(err)
return pod
}
-func runPausePod(f *framework.Framework, conf pausePodConfig) *api.Pod {
+func runPausePod(f *framework.Framework, conf pausePodConfig) *v1.Pod {
pod := createPausePod(f, conf)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(f.ClientSet, pod))
pod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(conf.Name)
@@ -811,13 +812,13 @@ func runPodAndGetNodeName(f *framework.Framework, conf pausePodConfig) string {
pod := runPausePod(f, conf)
By("Explicitly delete pod here to free the resource it takes.")
- err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, api.NewDeleteOptions(0))
+ err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(pod.Name, v1.NewDeleteOptions(0))
framework.ExpectNoError(err)
return pod.Spec.NodeName
}
-func createPodWithNodeAffinity(f *framework.Framework) *api.Pod {
+func createPodWithNodeAffinity(f *framework.Framework) *v1.Pod {
return createPausePod(f, pausePodConfig{
Name: "with-nodeaffinity-" + string(uuid.NewUUID()),
Affinity: `{
@@ -836,7 +837,7 @@ func createPodWithNodeAffinity(f *framework.Framework) *api.Pod {
})
}
-func createPodWithPodAffinity(f *framework.Framework, topologyKey string) *api.Pod {
+func createPodWithPodAffinity(f *framework.Framework, topologyKey string) *v1.Pod {
return createPausePod(f, pausePodConfig{
Name: "with-podantiaffinity-" + string(uuid.NewUUID()),
Affinity: `{
@@ -869,23 +870,23 @@ func createPodWithPodAffinity(f *framework.Framework, topologyKey string) *api.P
}
// Returns a number of currently scheduled and not scheduled Pods.
-func getPodsScheduled(pods *api.PodList) (scheduledPods, notScheduledPods []api.Pod) {
+func getPodsScheduled(pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
for _, pod := range pods.Items {
if !masterNodes.Has(pod.Spec.NodeName) {
if pod.Spec.NodeName != "" {
- _, scheduledCondition := api.GetPodCondition(&pod.Status, api.PodScheduled)
+ _, scheduledCondition := v1.GetPodCondition(&pod.Status, v1.PodScheduled)
// We can't assume that the scheduledCondition is always set if Pod is assigned to Node,
// as e.g. DaemonController doesn't set it when assigning Pod to a Node. Currently
// Kubelet sets this condition when it gets a Pod without it, but if we were expecting
// that it would always be not nil, this would cause a rare race condition.
if scheduledCondition != nil {
- Expect(scheduledCondition.Status).To(Equal(api.ConditionTrue))
+ Expect(scheduledCondition.Status).To(Equal(v1.ConditionTrue))
}
scheduledPods = append(scheduledPods, pod)
} else {
- _, scheduledCondition := api.GetPodCondition(&pod.Status, api.PodScheduled)
+ _, scheduledCondition := v1.GetPodCondition(&pod.Status, v1.PodScheduled)
if scheduledCondition != nil {
- Expect(scheduledCondition.Status).To(Equal(api.ConditionFalse))
+ Expect(scheduledCondition.Status).To(Equal(v1.ConditionFalse))
}
if scheduledCondition.Reason == "Unschedulable" {
notScheduledPods = append(notScheduledPods, pod)
@@ -896,7 +897,7 @@ func getPodsScheduled(pods *api.PodList) (scheduledPods, notScheduledPods []api.
return
}
-func getRequestedCPU(pod api.Pod) int64 {
+func getRequestedCPU(pod v1.Pod) int64 {
var result int64
for _, container := range pod.Spec.Containers {
result += container.Resources.Requests.Cpu().MilliValue()
@@ -913,7 +914,7 @@ func waitForScheduler() {
// TODO: upgrade calls in PodAffinity tests when we're able to run them
func verifyResult(c clientset.Interface, expectedScheduled int, expectedNotScheduled int, ns string) {
- allPods, err := c.Core().Pods(ns).List(api.ListOptions{})
+ allPods, err := c.Core().Pods(ns).List(v1.ListOptions{})
framework.ExpectNoError(err)
scheduledPods, notScheduledPods := framework.GetPodsScheduled(masterNodes, allPods)
diff --git a/test/e2e/security_context.go b/test/e2e/security_context.go
index 18b26799a76..95bd93fed2b 100644
--- a/test/e2e/security_context.go
+++ b/test/e2e/security_context.go
@@ -25,7 +25,7 @@ package e2e
import (
"fmt"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/test/e2e/framework"
@@ -33,26 +33,25 @@ import (
. "github.com/onsi/gomega"
)
-func scTestPod(hostIPC bool, hostPID bool) *api.Pod {
+func scTestPod(hostIPC bool, hostPID bool) *v1.Pod {
podName := "security-context-" + string(uuid.NewUUID())
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": podName},
Annotations: map[string]string{},
},
- Spec: api.PodSpec{
- SecurityContext: &api.PodSecurityContext{
- HostIPC: hostIPC,
- HostPID: hostPID,
- },
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ HostIPC: hostIPC,
+ HostPID: hostPID,
+ SecurityContext: &v1.PodSecurityContext{},
+ Containers: []v1.Container{
{
Name: "test-container",
Image: "gcr.io/google_containers/busybox:1.24",
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
@@ -86,7 +85,7 @@ var _ = framework.KubeDescribe("Security Context [Feature:SecurityContext]", fun
var uid int64 = 1001
var overrideUid int64 = 1002
pod.Spec.SecurityContext.RunAsUser = &uid
- pod.Spec.Containers[0].SecurityContext = new(api.SecurityContext)
+ pod.Spec.Containers[0].SecurityContext = new(v1.SecurityContext)
pod.Spec.Containers[0].SecurityContext.RunAsUser = &overrideUid
pod.Spec.Containers[0].Command = []string{"sh", "-c", "id -u"}
@@ -110,33 +109,33 @@ var _ = framework.KubeDescribe("Security Context [Feature:SecurityContext]", fun
It("should support seccomp alpha unconfined annotation on the container [Feature:Seccomp]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false)
- pod.Annotations[api.SeccompContainerAnnotationKeyPrefix+"test-container"] = "unconfined"
- pod.Annotations[api.SeccompPodAnnotationKey] = "docker/default"
+ pod.Annotations[v1.SeccompContainerAnnotationKeyPrefix+"test-container"] = "unconfined"
+ pod.Annotations[v1.SeccompPodAnnotationKey] = "docker/default"
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
- f.TestContainerOutput(api.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
+ f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
})
It("should support seccomp alpha unconfined annotation on the pod [Feature:Seccomp]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false)
- pod.Annotations[api.SeccompPodAnnotationKey] = "unconfined"
+ pod.Annotations[v1.SeccompPodAnnotationKey] = "unconfined"
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
- f.TestContainerOutput(api.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
+ f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
})
It("should support seccomp alpha docker/default annotation [Feature:Seccomp]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false)
- pod.Annotations[api.SeccompContainerAnnotationKeyPrefix+"test-container"] = "docker/default"
+ pod.Annotations[v1.SeccompContainerAnnotationKeyPrefix+"test-container"] = "docker/default"
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
- f.TestContainerOutput(api.SeccompPodAnnotationKey, pod, 0, []string{"2"}) // seccomp filtered
+ f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"2"}) // seccomp filtered
})
It("should support seccomp default which is unconfined [Feature:Seccomp]", func() {
// TODO: port to SecurityContext as soon as seccomp is out of alpha
pod := scTestPod(false, false)
pod.Spec.Containers[0].Command = []string{"grep", "ecc", "/proc/self/status"}
- f.TestContainerOutput(api.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
+ f.TestContainerOutput(v1.SeccompPodAnnotationKey, pod, 0, []string{"0"}) // seccomp disabled
})
})
@@ -146,23 +145,23 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
pod := scTestPod(hostIPC, hostPID)
volumeName := "test-volume"
mountPath := "/mounted_volume"
- pod.Spec.Containers[0].VolumeMounts = []api.VolumeMount{
+ pod.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{
{
Name: volumeName,
MountPath: mountPath,
},
}
- pod.Spec.Volumes = []api.Volume{
+ pod.Spec.Volumes = []v1.Volume{
{
Name: volumeName,
- VolumeSource: api.VolumeSource{
- EmptyDir: &api.EmptyDirVolumeSource{
- Medium: api.StorageMediumDefault,
+ VolumeSource: v1.VolumeSource{
+ EmptyDir: &v1.EmptyDirVolumeSource{
+ Medium: v1.StorageMediumDefault,
},
},
},
}
- pod.Spec.SecurityContext.SELinuxOptions = &api.SELinuxOptions{
+ pod.Spec.SecurityContext.SELinuxOptions = &v1.SELinuxOptions{
Level: "s0:c0,c1",
}
pod.Spec.Containers[0].Command = []string{"sleep", "6000"}
@@ -190,17 +189,17 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
By(fmt.Sprintf("confirming a container with the same label can read the file under --volume-dir=%s", framework.TestContext.KubeVolumeDir))
pod = scTestPod(hostIPC, hostPID)
pod.Spec.NodeName = foundPod.Spec.NodeName
- volumeMounts := []api.VolumeMount{
+ volumeMounts := []v1.VolumeMount{
{
Name: volumeName,
MountPath: mountPath,
},
}
- volumes := []api.Volume{
+ volumes := []v1.Volume{
{
Name: volumeName,
- VolumeSource: api.VolumeSource{
- HostPath: &api.HostPathVolumeSource{
+ VolumeSource: v1.VolumeSource{
+ HostPath: &v1.HostPathVolumeSource{
Path: volumeHostPath,
},
},
@@ -209,7 +208,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
pod.Spec.Containers[0].VolumeMounts = volumeMounts
pod.Spec.Volumes = volumes
pod.Spec.Containers[0].Command = []string{"cat", testFilePath}
- pod.Spec.SecurityContext.SELinuxOptions = &api.SELinuxOptions{
+ pod.Spec.SecurityContext.SELinuxOptions = &v1.SELinuxOptions{
Level: "s0:c0,c1",
}
@@ -220,7 +219,7 @@ func testPodSELinuxLabeling(f *framework.Framework, hostIPC bool, hostPID bool)
pod.Spec.Volumes = volumes
pod.Spec.Containers[0].VolumeMounts = volumeMounts
pod.Spec.Containers[0].Command = []string{"sleep", "6000"}
- pod.Spec.SecurityContext.SELinuxOptions = &api.SELinuxOptions{
+ pod.Spec.SecurityContext.SELinuxOptions = &v1.SELinuxOptions{
Level: "s0:c2,c3",
}
_, err = client.Create(pod)
diff --git a/test/e2e/service.go b/test/e2e/service.go
index 95e06bbc36f..914766295db 100644
--- a/test/e2e/service.go
+++ b/test/e2e/service.go
@@ -30,10 +30,11 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
- "k8s.io/kubernetes/pkg/api/service"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ "k8s.io/kubernetes/pkg/api/v1/service"
+ "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/controller/endpoint"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/types"
@@ -83,10 +84,12 @@ var _ = framework.KubeDescribe("Services", func() {
f := framework.NewDefaultFramework("services")
var cs clientset.Interface
+ var internalClientset internalclientset.Interface
serviceLBNames := []string{}
BeforeEach(func() {
cs = f.ClientSet
+ internalClientset = f.InternalClientset
})
AfterEach(func() {
@@ -104,7 +107,7 @@ var _ = framework.KubeDescribe("Services", func() {
// TODO: We get coverage of TCP/UDP and multi-port services through the DNS test. We should have a simpler test for multi-port TCP here.
It("should provide secure master service [Conformance]", func() {
- _, err := cs.Core().Services(api.NamespaceDefault).Get("kubernetes")
+ _, err := cs.Core().Services(v1.NamespaceDefault).Get("kubernetes")
Expect(err).NotTo(HaveOccurred())
})
@@ -123,13 +126,13 @@ var _ = framework.KubeDescribe("Services", func() {
Expect(err).NotTo(HaveOccurred())
}()
- service := &api.Service{
- ObjectMeta: api.ObjectMeta{
+ service := &v1.Service{
+ ObjectMeta: v1.ObjectMeta{
Name: serviceName,
},
- Spec: api.ServiceSpec{
+ Spec: v1.ServiceSpec{
Selector: labels,
- Ports: []api.ServicePort{{
+ Ports: []v1.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}},
@@ -151,11 +154,11 @@ var _ = framework.KubeDescribe("Services", func() {
name1 := "pod1"
name2 := "pod2"
- createPodOrFail(cs, ns, name1, labels, []api.ContainerPort{{ContainerPort: 80}})
+ createPodOrFail(cs, ns, name1, labels, []v1.ContainerPort{{ContainerPort: 80}})
names[name1] = true
validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{name1: {80}})
- createPodOrFail(cs, ns, name2, labels, []api.ContainerPort{{ContainerPort: 80}})
+ createPodOrFail(cs, ns, name2, labels, []v1.ContainerPort{{ContainerPort: 80}})
names[name2] = true
validateEndpointsOrFail(cs, ns, serviceName, PortsByPodName{name1: {80}, name2: {80}})
@@ -185,13 +188,13 @@ var _ = framework.KubeDescribe("Services", func() {
svc2port := "svc2"
By("creating service " + serviceName + " in namespace " + ns)
- service := &api.Service{
- ObjectMeta: api.ObjectMeta{
+ service := &v1.Service{
+ ObjectMeta: v1.ObjectMeta{
Name: serviceName,
},
- Spec: api.ServiceSpec{
+ Spec: v1.ServiceSpec{
Selector: labels,
- Ports: []api.ServicePort{
+ Ports: []v1.ServicePort{
{
Name: "portname1",
Port: 80,
@@ -219,13 +222,13 @@ var _ = framework.KubeDescribe("Services", func() {
}
}()
- containerPorts1 := []api.ContainerPort{
+ containerPorts1 := []v1.ContainerPort{
{
Name: svc1port,
ContainerPort: int32(port1),
},
}
- containerPorts2 := []api.ContainerPort{
+ containerPorts2 := []v1.ContainerPort{
{
Name: svc2port,
ContainerPort: int32(port2),
@@ -261,7 +264,7 @@ var _ = framework.KubeDescribe("Services", func() {
jig := NewServiceTestJig(cs, serviceName)
servicePort := 8080
tcpService := jig.CreateTCPServiceWithPort(ns, nil, int32(servicePort))
- jig.SanityCheckService(tcpService, api.ServiceTypeClusterIP)
+ jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP)
defer func() {
framework.Logf("Cleaning up the sourceip test service")
err := cs.Core().Services(ns).Delete(serviceName, nil)
@@ -311,10 +314,10 @@ var _ = framework.KubeDescribe("Services", func() {
numPods, servicePort := 3, 80
By("creating service1 in namespace " + ns)
- podNames1, svc1IP, err := startServeHostnameService(cs, ns, "service1", servicePort, numPods)
+ podNames1, svc1IP, err := startServeHostnameService(cs, internalClientset, ns, "service1", servicePort, numPods)
Expect(err).NotTo(HaveOccurred())
By("creating service2 in namespace " + ns)
- podNames2, svc2IP, err := startServeHostnameService(cs, ns, "service2", servicePort, numPods)
+ podNames2, svc2IP, err := startServeHostnameService(cs, internalClientset, ns, "service2", servicePort, numPods)
Expect(err).NotTo(HaveOccurred())
hosts, err := framework.NodeSSHHosts(cs)
@@ -332,7 +335,7 @@ var _ = framework.KubeDescribe("Services", func() {
// Stop service 1 and make sure it is gone.
By("stopping service1")
- framework.ExpectNoError(stopServeHostnameService(f.ClientSet, ns, "service1"))
+ framework.ExpectNoError(stopServeHostnameService(f.ClientSet, f.InternalClientset, ns, "service1"))
By("verifying service1 is not up")
framework.ExpectNoError(verifyServeHostnameServiceDown(cs, host, svc1IP, servicePort))
@@ -341,7 +344,7 @@ var _ = framework.KubeDescribe("Services", func() {
// Start another service and verify both are up.
By("creating service3 in namespace " + ns)
- podNames3, svc3IP, err := startServeHostnameService(cs, ns, "service3", servicePort, numPods)
+ podNames3, svc3IP, err := startServeHostnameService(cs, internalClientset, ns, "service3", servicePort, numPods)
Expect(err).NotTo(HaveOccurred())
if svc2IP == svc3IP {
@@ -365,12 +368,12 @@ var _ = framework.KubeDescribe("Services", func() {
svc1 := "service1"
svc2 := "service2"
- defer func() { framework.ExpectNoError(stopServeHostnameService(f.ClientSet, ns, svc1)) }()
- podNames1, svc1IP, err := startServeHostnameService(cs, ns, svc1, servicePort, numPods)
+ defer func() { framework.ExpectNoError(stopServeHostnameService(f.ClientSet, f.InternalClientset, ns, svc1)) }()
+ podNames1, svc1IP, err := startServeHostnameService(cs, internalClientset, ns, svc1, servicePort, numPods)
Expect(err).NotTo(HaveOccurred())
- defer func() { framework.ExpectNoError(stopServeHostnameService(f.ClientSet, ns, svc2)) }()
- podNames2, svc2IP, err := startServeHostnameService(cs, ns, svc2, servicePort, numPods)
+ defer func() { framework.ExpectNoError(stopServeHostnameService(f.ClientSet, f.InternalClientset, ns, svc2)) }()
+ podNames2, svc2IP, err := startServeHostnameService(cs, internalClientset, ns, svc2, servicePort, numPods)
Expect(err).NotTo(HaveOccurred())
if svc1IP == svc2IP {
@@ -414,8 +417,10 @@ var _ = framework.KubeDescribe("Services", func() {
ns := f.Namespace.Name
numPods, servicePort := 3, 80
- defer func() { framework.ExpectNoError(stopServeHostnameService(f.ClientSet, ns, "service1")) }()
- podNames1, svc1IP, err := startServeHostnameService(cs, ns, "service1", servicePort, numPods)
+ defer func() {
+ framework.ExpectNoError(stopServeHostnameService(f.ClientSet, f.InternalClientset, ns, "service1"))
+ }()
+ podNames1, svc1IP, err := startServeHostnameService(cs, internalClientset, ns, "service1", servicePort, numPods)
Expect(err).NotTo(HaveOccurred())
hosts, err := framework.NodeSSHHosts(cs)
@@ -439,8 +444,10 @@ var _ = framework.KubeDescribe("Services", func() {
framework.ExpectNoError(verifyServeHostnameServiceUp(cs, ns, host, podNames1, svc1IP, servicePort))
// Create a new service and check if it's not reusing IP.
- defer func() { framework.ExpectNoError(stopServeHostnameService(f.ClientSet, ns, "service2")) }()
- podNames2, svc2IP, err := startServeHostnameService(cs, ns, "service2", servicePort, numPods)
+ defer func() {
+ framework.ExpectNoError(stopServeHostnameService(f.ClientSet, f.InternalClientset, ns, "service2"))
+ }()
+ podNames2, svc2IP, err := startServeHostnameService(cs, internalClientset, ns, "service2", servicePort, numPods)
Expect(err).NotTo(HaveOccurred())
if svc1IP == svc2IP {
@@ -461,10 +468,10 @@ var _ = framework.KubeDescribe("Services", func() {
nodeIP := pickNodeIP(jig.Client) // for later
By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
- service := jig.CreateTCPServiceOrFail(ns, func(svc *api.Service) {
- svc.Spec.Type = api.ServiceTypeNodePort
+ service := jig.CreateTCPServiceOrFail(ns, func(svc *v1.Service) {
+ svc.Spec.Type = v1.ServiceTypeNodePort
})
- jig.SanityCheckService(service, api.ServiceTypeNodePort)
+ jig.SanityCheckService(service, v1.ServiceTypeNodePort)
nodePort := int(service.Spec.Ports[0].NodePort)
By("creating pod to be part of service " + serviceName)
@@ -521,11 +528,11 @@ var _ = framework.KubeDescribe("Services", func() {
By("creating a TCP service " + serviceName + " with type=ClusterIP in namespace " + ns1)
tcpService := jig.CreateTCPServiceOrFail(ns1, nil)
- jig.SanityCheckService(tcpService, api.ServiceTypeClusterIP)
+ jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP)
By("creating a UDP service " + serviceName + " with type=ClusterIP in namespace " + ns2)
udpService := jig.CreateUDPServiceOrFail(ns2, nil)
- jig.SanityCheckService(udpService, api.ServiceTypeClusterIP)
+ jig.SanityCheckService(udpService, v1.ServiceTypeClusterIP)
By("verifying that TCP and UDP use the same port")
if tcpService.Spec.Ports[0].Port != udpService.Spec.Ports[0].Port {
@@ -543,18 +550,18 @@ var _ = framework.KubeDescribe("Services", func() {
// Change the services to NodePort.
By("changing the TCP service to type=NodePort")
- tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *api.Service) {
- s.Spec.Type = api.ServiceTypeNodePort
+ tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *v1.Service) {
+ s.Spec.Type = v1.ServiceTypeNodePort
})
- jig.SanityCheckService(tcpService, api.ServiceTypeNodePort)
+ jig.SanityCheckService(tcpService, v1.ServiceTypeNodePort)
tcpNodePort := int(tcpService.Spec.Ports[0].NodePort)
framework.Logf("TCP node port: %d", tcpNodePort)
By("changing the UDP service to type=NodePort")
- udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) {
- s.Spec.Type = api.ServiceTypeNodePort
+ udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *v1.Service) {
+ s.Spec.Type = v1.ServiceTypeNodePort
})
- jig.SanityCheckService(udpService, api.ServiceTypeNodePort)
+ jig.SanityCheckService(udpService, v1.ServiceTypeNodePort)
udpNodePort := int(udpService.Spec.Ports[0].NodePort)
framework.Logf("UDP node port: %d", udpNodePort)
@@ -587,15 +594,15 @@ var _ = framework.KubeDescribe("Services", func() {
}
By("changing the TCP service to type=LoadBalancer")
- tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *api.Service) {
+ tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *v1.Service) {
s.Spec.LoadBalancerIP = requestedIP // will be "" if not applicable
- s.Spec.Type = api.ServiceTypeLoadBalancer
+ s.Spec.Type = v1.ServiceTypeLoadBalancer
})
if loadBalancerSupportsUDP {
By("changing the UDP service to type=LoadBalancer")
- udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) {
- s.Spec.Type = api.ServiceTypeLoadBalancer
+ udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *v1.Service) {
+ s.Spec.Type = v1.ServiceTypeLoadBalancer
})
}
serviceLBNames = append(serviceLBNames, getLoadBalancerName(tcpService))
@@ -606,7 +613,7 @@ var _ = framework.KubeDescribe("Services", func() {
By("waiting for the TCP service to have a load balancer")
// Wait for the load balancer to be created asynchronously
tcpService = jig.WaitForLoadBalancerOrFail(ns1, tcpService.Name, loadBalancerCreateTimeout)
- jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer)
+ jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
if int(tcpService.Spec.Ports[0].NodePort) != tcpNodePort {
framework.Failf("TCP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", tcpNodePort, tcpService.Spec.Ports[0].NodePort)
}
@@ -637,7 +644,7 @@ var _ = framework.KubeDescribe("Services", func() {
By("waiting for the UDP service to have a load balancer")
// 2nd one should be faster since they ran in parallel.
udpService = jig.WaitForLoadBalancerOrFail(ns2, udpService.Name, loadBalancerCreateTimeout)
- jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer)
+ jig.SanityCheckService(udpService, v1.ServiceTypeLoadBalancer)
if int(udpService.Spec.Ports[0].NodePort) != udpNodePort {
framework.Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort)
}
@@ -668,7 +675,7 @@ var _ = framework.KubeDescribe("Services", func() {
By("changing the TCP service's NodePort")
tcpService = jig.ChangeServiceNodePortOrFail(ns1, tcpService.Name, tcpNodePort)
- jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer)
+ jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
tcpNodePortOld := tcpNodePort
tcpNodePort = int(tcpService.Spec.Ports[0].NodePort)
if tcpNodePort == tcpNodePortOld {
@@ -682,9 +689,9 @@ var _ = framework.KubeDescribe("Services", func() {
By("changing the UDP service's NodePort")
udpService = jig.ChangeServiceNodePortOrFail(ns2, udpService.Name, udpNodePort)
if loadBalancerSupportsUDP {
- jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer)
+ jig.SanityCheckService(udpService, v1.ServiceTypeLoadBalancer)
} else {
- jig.SanityCheckService(udpService, api.ServiceTypeNodePort)
+ jig.SanityCheckService(udpService, v1.ServiceTypeNodePort)
}
udpNodePortOld := udpNodePort
udpNodePort = int(udpService.Spec.Ports[0].NodePort)
@@ -719,10 +726,10 @@ var _ = framework.KubeDescribe("Services", func() {
// Change the services' main ports.
By("changing the TCP service's port")
- tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *api.Service) {
+ tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *v1.Service) {
s.Spec.Ports[0].Port++
})
- jig.SanityCheckService(tcpService, api.ServiceTypeLoadBalancer)
+ jig.SanityCheckService(tcpService, v1.ServiceTypeLoadBalancer)
svcPortOld := svcPort
svcPort = int(tcpService.Spec.Ports[0].Port)
if svcPort == svcPortOld {
@@ -736,13 +743,13 @@ var _ = framework.KubeDescribe("Services", func() {
}
By("changing the UDP service's port")
- udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) {
+ udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *v1.Service) {
s.Spec.Ports[0].Port++
})
if loadBalancerSupportsUDP {
- jig.SanityCheckService(udpService, api.ServiceTypeLoadBalancer)
+ jig.SanityCheckService(udpService, v1.ServiceTypeLoadBalancer)
} else {
- jig.SanityCheckService(udpService, api.ServiceTypeNodePort)
+ jig.SanityCheckService(udpService, v1.ServiceTypeNodePort)
}
if int(udpService.Spec.Ports[0].Port) != svcPort {
framework.Failf("UDP Spec.Ports[0].Port (%d) did not change", udpService.Spec.Ports[0].Port)
@@ -773,23 +780,23 @@ var _ = framework.KubeDescribe("Services", func() {
// Change the services back to ClusterIP.
By("changing TCP service back to type=ClusterIP")
- tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *api.Service) {
- s.Spec.Type = api.ServiceTypeClusterIP
+ tcpService = jig.UpdateServiceOrFail(ns1, tcpService.Name, func(s *v1.Service) {
+ s.Spec.Type = v1.ServiceTypeClusterIP
s.Spec.Ports[0].NodePort = 0
})
// Wait for the load balancer to be destroyed asynchronously
tcpService = jig.WaitForLoadBalancerDestroyOrFail(ns1, tcpService.Name, tcpIngressIP, svcPort, loadBalancerCreateTimeout)
- jig.SanityCheckService(tcpService, api.ServiceTypeClusterIP)
+ jig.SanityCheckService(tcpService, v1.ServiceTypeClusterIP)
By("changing UDP service back to type=ClusterIP")
- udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) {
- s.Spec.Type = api.ServiceTypeClusterIP
+ udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *v1.Service) {
+ s.Spec.Type = v1.ServiceTypeClusterIP
s.Spec.Ports[0].NodePort = 0
})
if loadBalancerSupportsUDP {
// Wait for the load balancer to be destroyed asynchronously
udpService = jig.WaitForLoadBalancerDestroyOrFail(ns2, udpService.Name, udpIngressIP, svcPort, loadBalancerCreateTimeout)
- jig.SanityCheckService(udpService, api.ServiceTypeClusterIP)
+ jig.SanityCheckService(udpService, v1.ServiceTypeClusterIP)
}
By("checking the TCP NodePort is closed")
@@ -821,24 +828,24 @@ var _ = framework.KubeDescribe("Services", func() {
}()
By("creating service " + serviceName + " with same NodePort but different protocols in namespace " + ns)
- service := &api.Service{
- ObjectMeta: api.ObjectMeta{
+ service := &v1.Service{
+ ObjectMeta: v1.ObjectMeta{
Name: t.ServiceName,
Namespace: t.Namespace,
},
- Spec: api.ServiceSpec{
+ Spec: v1.ServiceSpec{
Selector: t.Labels,
- Type: api.ServiceTypeNodePort,
- Ports: []api.ServicePort{
+ Type: v1.ServiceTypeNodePort,
+ Ports: []v1.ServicePort{
{
Name: "tcp-port",
Port: 53,
- Protocol: api.ProtocolTCP,
+ Protocol: v1.ProtocolTCP,
},
{
Name: "udp-port",
Port: 53,
- Protocol: api.ProtocolUDP,
+ Protocol: v1.ProtocolUDP,
},
},
},
@@ -872,11 +879,11 @@ var _ = framework.KubeDescribe("Services", func() {
By("creating service " + serviceName1 + " with type NodePort in namespace " + ns)
service := t.BuildServiceSpec()
- service.Spec.Type = api.ServiceTypeNodePort
+ service.Spec.Type = v1.ServiceTypeNodePort
result, err := t.CreateService(service)
Expect(err).NotTo(HaveOccurred())
- if result.Spec.Type != api.ServiceTypeNodePort {
+ if result.Spec.Type != v1.ServiceTypeNodePort {
framework.Failf("got unexpected Spec.Type for new service: %v", result)
}
if len(result.Spec.Ports) != 1 {
@@ -890,7 +897,7 @@ var _ = framework.KubeDescribe("Services", func() {
By("creating service " + serviceName2 + " with conflicting NodePort")
service2 := t.BuildServiceSpec()
service2.Name = serviceName2
- service2.Spec.Type = api.ServiceTypeNodePort
+ service2.Spec.Type = v1.ServiceTypeNodePort
service2.Spec.Ports[0].NodePort = port.NodePort
result2, err := t.CreateService(service2)
if err == nil {
@@ -923,13 +930,13 @@ var _ = framework.KubeDescribe("Services", func() {
}()
service := t.BuildServiceSpec()
- service.Spec.Type = api.ServiceTypeNodePort
+ service.Spec.Type = v1.ServiceTypeNodePort
By("creating service " + serviceName + " with type NodePort in namespace " + ns)
service, err := t.CreateService(service)
Expect(err).NotTo(HaveOccurred())
- if service.Spec.Type != api.ServiceTypeNodePort {
+ if service.Spec.Type != v1.ServiceTypeNodePort {
framework.Failf("got unexpected Spec.Type for new service: %v", service)
}
if len(service.Spec.Ports) != 1 {
@@ -952,7 +959,7 @@ var _ = framework.KubeDescribe("Services", func() {
}
}
By(fmt.Sprintf("changing service "+serviceName+" to out-of-range NodePort %d", outOfRangeNodePort))
- result, err := updateService(cs, ns, serviceName, func(s *api.Service) {
+ result, err := updateService(cs, ns, serviceName, func(s *v1.Service) {
s.Spec.Ports[0].NodePort = int32(outOfRangeNodePort)
})
if err == nil {
@@ -967,7 +974,7 @@ var _ = framework.KubeDescribe("Services", func() {
By(fmt.Sprintf("creating service "+serviceName+" with out-of-range NodePort %d", outOfRangeNodePort))
service = t.BuildServiceSpec()
- service.Spec.Type = api.ServiceTypeNodePort
+ service.Spec.Type = v1.ServiceTypeNodePort
service.Spec.Ports[0].NodePort = int32(outOfRangeNodePort)
service, err = t.CreateService(service)
if err == nil {
@@ -991,13 +998,13 @@ var _ = framework.KubeDescribe("Services", func() {
}()
service := t.BuildServiceSpec()
- service.Spec.Type = api.ServiceTypeNodePort
+ service.Spec.Type = v1.ServiceTypeNodePort
By("creating service " + serviceName + " with type NodePort in namespace " + ns)
service, err := t.CreateService(service)
Expect(err).NotTo(HaveOccurred())
- if service.Spec.Type != api.ServiceTypeNodePort {
+ if service.Spec.Type != v1.ServiceTypeNodePort {
framework.Failf("got unexpected Spec.Type for new service: %v", service)
}
if len(service.Spec.Ports) != 1 {
@@ -1033,7 +1040,7 @@ var _ = framework.KubeDescribe("Services", func() {
By(fmt.Sprintf("creating service "+serviceName+" with same NodePort %d", nodePort))
service = t.BuildServiceSpec()
- service.Spec.Type = api.ServiceTypeNodePort
+ service.Spec.Type = v1.ServiceTypeNodePort
service.Spec.Ports[0].NodePort = nodePort
service, err = t.CreateService(service)
Expect(err).NotTo(HaveOccurred())
@@ -1054,13 +1061,13 @@ var _ = framework.KubeDescribe("Services", func() {
service := t.BuildServiceSpec()
service.Annotations = map[string]string{endpoint.TolerateUnreadyEndpointsAnnotation: "true"}
- rcSpec := rcByNameContainer(t.name, 1, t.image, t.Labels, api.Container{
+ rcSpec := rcByNameContainer(t.name, 1, t.image, t.Labels, v1.Container{
Name: t.name,
Image: t.image,
- Ports: []api.ContainerPort{{ContainerPort: int32(80), Protocol: api.ProtocolTCP}},
- ReadinessProbe: &api.Probe{
- Handler: api.Handler{
- Exec: &api.ExecAction{
+ Ports: []v1.ContainerPort{{ContainerPort: int32(80), Protocol: v1.ProtocolTCP}},
+ ReadinessProbe: &v1.Probe{
+ Handler: v1.Handler{
+ Exec: &v1.ExecAction{
Command: []string{"/bin/false"},
},
},
@@ -1139,7 +1146,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
framework.Failf("Service HealthCheck NodePort was not allocated")
}
defer func() {
- jig.ChangeServiceType(svc.Namespace, svc.Name, api.ServiceTypeClusterIP, loadBalancerCreateTimeout)
+ jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
// Make sure we didn't leak the health check node port.
for name, ips := range jig.getEndpointNodes(svc) {
@@ -1201,7 +1208,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, false)
serviceLBNames = append(serviceLBNames, getLoadBalancerName(svc))
defer func() {
- jig.ChangeServiceType(svc.Namespace, svc.Name, api.ServiceTypeClusterIP, loadBalancerCreateTimeout)
+ jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
}()
@@ -1210,9 +1217,9 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
framework.Failf("Service HealthCheck NodePort was not allocated")
}
- ips := collectAddresses(nodes, api.NodeExternalIP)
+ ips := collectAddresses(nodes, v1.NodeExternalIP)
if len(ips) == 0 {
- ips = collectAddresses(nodes, api.NodeLegacyHostIP)
+ ips = collectAddresses(nodes, v1.NodeLegacyHostIP)
}
ingressIP := getIngressPoint(&svc.Status.LoadBalancer.Ingress[0])
@@ -1224,7 +1231,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
endpointNodeName := nodes.Items[i].Name
By("creating a pod to be part of the service " + serviceName + " on node " + endpointNodeName)
- jig.RunOrFail(namespace, func(rc *api.ReplicationController) {
+ jig.RunOrFail(namespace, func(rc *v1.ReplicationController) {
rc.Name = serviceName
if endpointNodeName != "" {
rc.Spec.Template.Spec.NodeName = endpointNodeName
@@ -1248,7 +1255,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
// Make sure the loadbalancer picked up the helth check change
jig.TestReachableHTTP(ingressIP, svcTCPPort, kubeProxyLagTimeout)
}
- framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, namespace, serviceName))
+ framework.ExpectNoError(framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, namespace, serviceName))
}
})
@@ -1261,7 +1268,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true)
serviceLBNames = append(serviceLBNames, getLoadBalancerName(svc))
defer func() {
- jig.ChangeServiceType(svc.Namespace, svc.Name, api.ServiceTypeClusterIP, loadBalancerCreateTimeout)
+ jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
}()
@@ -1310,7 +1317,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
svc := jig.createOnlyLocalLoadBalancerService(namespace, serviceName, loadBalancerCreateTimeout, true)
serviceLBNames = append(serviceLBNames, getLoadBalancerName(svc))
defer func() {
- jig.ChangeServiceType(svc.Namespace, svc.Name, api.ServiceTypeClusterIP, loadBalancerCreateTimeout)
+ jig.ChangeServiceType(svc.Namespace, svc.Name, v1.ServiceTypeClusterIP, loadBalancerCreateTimeout)
Expect(cs.Core().Services(svc.Namespace).Delete(svc.Name, nil)).NotTo(HaveOccurred())
}()
@@ -1318,7 +1325,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
healthCheckNodePort := int(service.GetServiceHealthCheckNodePort(svc))
By("turning ESIPP off")
- svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *api.Service) {
+ svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
svc.ObjectMeta.Annotations[service.BetaAnnotationExternalTraffic] =
service.AnnotationValueExternalTrafficGlobal
})
@@ -1332,7 +1339,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
if _, ok := endpointNodeMap[n.Name]; ok {
continue
}
- noEndpointNodeMap[n.Name] = getNodeAddresses(&n, api.NodeExternalIP)
+ noEndpointNodeMap[n.Name] = getNodeAddresses(&n, v1.NodeExternalIP)
}
svcTCPPort := int(svc.Spec.Ports[0].Port)
@@ -1382,7 +1389,7 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
// creation will fail.
By("turning ESIPP annotation back on")
- svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *api.Service) {
+ svc = jig.UpdateServiceOrFail(svc.Namespace, svc.Name, func(svc *v1.Service) {
svc.ObjectMeta.Annotations[service.BetaAnnotationExternalTraffic] =
service.AnnotationValueExternalTrafficLocal
// Request the same healthCheckNodePort as before, to test the user-requested allocation path
@@ -1407,8 +1414,8 @@ var _ = framework.KubeDescribe("ESIPP [Slow][Feature:ExternalTrafficLocalOnly]",
// updateService fetches a service, calls the update function on it,
// and then attempts to send the updated service. It retries up to 2
// times in the face of timeouts and conflicts.
-func updateService(c clientset.Interface, namespace, serviceName string, update func(*api.Service)) (*api.Service, error) {
- var service *api.Service
+func updateService(c clientset.Interface, namespace, serviceName string, update func(*v1.Service)) (*v1.Service, error) {
+ var service *v1.Service
var err error
for i := 0; i < 3; i++ {
service, err = c.Core().Services(namespace).Get(serviceName)
@@ -1427,7 +1434,7 @@ func updateService(c clientset.Interface, namespace, serviceName string, update
return service, err
}
-func getContainerPortsByPodUID(endpoints *api.Endpoints) PortsByPodUID {
+func getContainerPortsByPodUID(endpoints *v1.Endpoints) PortsByPodUID {
m := PortsByPodUID{}
for _, ss := range endpoints.Subsets {
for _, port := range ss.Ports {
@@ -1526,7 +1533,7 @@ func validateEndpointsOrFail(c clientset.Interface, namespace, serviceName strin
i++
}
- if pods, err := c.Core().Pods(api.NamespaceAll).List(api.ListOptions{}); err == nil {
+ if pods, err := c.Core().Pods(v1.NamespaceAll).List(v1.ListOptions{}); err == nil {
for _, pod := range pods.Items {
framework.Logf("Pod %s\t%s\t%s\t%s", pod.Namespace, pod.Name, pod.Spec.NodeName, pod.DeletionTimestamp)
}
@@ -1537,16 +1544,16 @@ func validateEndpointsOrFail(c clientset.Interface, namespace, serviceName strin
}
// newExecPodSpec returns the pod spec of exec pod
-func newExecPodSpec(ns, generateName string) *api.Pod {
+func newExecPodSpec(ns, generateName string) *v1.Pod {
immediate := int64(0)
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
GenerateName: generateName,
Namespace: ns,
},
- Spec: api.PodSpec{
+ Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &immediate,
- Containers: []api.Container{
+ Containers: []v1.Container{
{
Name: "exec",
Image: "gcr.io/google_containers/busybox:1.24",
@@ -1571,7 +1578,7 @@ func createExecPodOrFail(client clientset.Interface, ns, generateName string) st
if err != nil {
return false, nil
}
- return retrievedPod.Status.Phase == api.PodRunning, nil
+ return retrievedPod.Status.Phase == v1.PodRunning, nil
})
Expect(err).NotTo(HaveOccurred())
return created.Name
@@ -1590,28 +1597,28 @@ func createExecPodOnNode(client clientset.Interface, ns, nodeName, generateName
if err != nil {
return false, nil
}
- return retrievedPod.Status.Phase == api.PodRunning, nil
+ return retrievedPod.Status.Phase == v1.PodRunning, nil
})
Expect(err).NotTo(HaveOccurred())
return created.Name
}
-func createPodOrFail(c clientset.Interface, ns, name string, labels map[string]string, containerPorts []api.ContainerPort) {
+func createPodOrFail(c clientset.Interface, ns, name string, labels map[string]string, containerPorts []v1.ContainerPort) {
By(fmt.Sprintf("creating pod %s in namespace %s", name, ns))
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
Labels: labels,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "pause",
Image: framework.GetPauseImageName(c),
Ports: containerPorts,
// Add a dummy environment variable to work around a docker issue.
// https://github.com/docker/docker/issues/14203
- Env: []api.EnvVar{{Name: "FOO", Value: " "}},
+ Env: []v1.EnvVar{{Name: "FOO", Value: " "}},
},
},
},
@@ -1626,7 +1633,7 @@ func deletePodOrFail(c clientset.Interface, ns, name string) {
Expect(err).NotTo(HaveOccurred())
}
-func getNodeAddresses(node *api.Node, addressType api.NodeAddressType) (ips []string) {
+func getNodeAddresses(node *v1.Node, addressType v1.NodeAddressType) (ips []string) {
for j := range node.Status.Addresses {
nodeAddress := &node.Status.Addresses[j]
if nodeAddress.Type == addressType {
@@ -1636,7 +1643,7 @@ func getNodeAddresses(node *api.Node, addressType api.NodeAddressType) (ips []st
return
}
-func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []string {
+func collectAddresses(nodes *v1.NodeList, addressType v1.NodeAddressType) []string {
ips := []string{}
for i := range nodes.Items {
ips = append(ips, getNodeAddresses(&nodes.Items[i], addressType)...)
@@ -1647,9 +1654,9 @@ func collectAddresses(nodes *api.NodeList, addressType api.NodeAddressType) []st
func getNodePublicIps(c clientset.Interface) ([]string, error) {
nodes := framework.GetReadySchedulableNodesOrDie(c)
- ips := collectAddresses(nodes, api.NodeExternalIP)
+ ips := collectAddresses(nodes, v1.NodeExternalIP)
if len(ips) == 0 {
- ips = collectAddresses(nodes, api.NodeLegacyHostIP)
+ ips = collectAddresses(nodes, v1.NodeLegacyHostIP)
}
return ips, nil
}
@@ -1840,16 +1847,16 @@ func testNotReachableUDP(ip string, port int, request string) (bool, error) {
}
// Creates a replication controller that serves its hostname and a service on top of it.
-func startServeHostnameService(c clientset.Interface, ns, name string, port, replicas int) ([]string, string, error) {
+func startServeHostnameService(c clientset.Interface, internalClient internalclientset.Interface, ns, name string, port, replicas int) ([]string, string, error) {
podNames := make([]string, replicas)
By("creating service " + name + " in namespace " + ns)
- _, err := c.Core().Services(ns).Create(&api.Service{
- ObjectMeta: api.ObjectMeta{
+ _, err := c.Core().Services(ns).Create(&v1.Service{
+ ObjectMeta: v1.ObjectMeta{
Name: name,
},
- Spec: api.ServiceSpec{
- Ports: []api.ServicePort{{
+ Spec: v1.ServiceSpec{
+ Ports: []v1.ServicePort{{
Port: int32(port),
TargetPort: intstr.FromInt(9376),
Protocol: "TCP",
@@ -1863,10 +1870,11 @@ func startServeHostnameService(c clientset.Interface, ns, name string, port, rep
return podNames, "", err
}
- var createdPods []*api.Pod
+ var createdPods []*v1.Pod
maxContainerFailures := 0
config := testutils.RCConfig{
Client: c,
+ InternalClient: internalClient,
Image: "gcr.io/google_containers/serve_hostname:v1.4",
Name: name,
Namespace: ns,
@@ -1901,8 +1909,8 @@ func startServeHostnameService(c clientset.Interface, ns, name string, port, rep
return podNames, serviceIP, nil
}
-func stopServeHostnameService(clientset clientset.Interface, ns, name string) error {
- if err := framework.DeleteRCAndPods(clientset, ns, name); err != nil {
+func stopServeHostnameService(clientset clientset.Interface, internalClientset internalclientset.Interface, ns, name string) error {
+ if err := framework.DeleteRCAndPods(clientset, internalClientset, ns, name); err != nil {
return err
}
if err := clientset.Core().Services(ns).Delete(name, nil); err != nil {
@@ -2043,19 +2051,19 @@ func NewServiceTestJig(client clientset.Interface, name string) *ServiceTestJig
return j
}
-// newServiceTemplate returns the default api.Service template for this jig, but
+// newServiceTemplate returns the default v1.Service template for this jig, but
// does not actually create the Service. The default Service has the same name
// as the jig and exposes the given port.
-func (j *ServiceTestJig) newServiceTemplate(namespace string, proto api.Protocol, port int32) *api.Service {
- service := &api.Service{
- ObjectMeta: api.ObjectMeta{
+func (j *ServiceTestJig) newServiceTemplate(namespace string, proto v1.Protocol, port int32) *v1.Service {
+ service := &v1.Service{
+ ObjectMeta: v1.ObjectMeta{
Namespace: namespace,
Name: j.Name,
Labels: j.Labels,
},
- Spec: api.ServiceSpec{
+ Spec: v1.ServiceSpec{
Selector: j.Labels,
- Ports: []api.ServicePort{
+ Ports: []v1.ServicePort{
{
Protocol: proto,
Port: port,
@@ -2069,8 +2077,8 @@ func (j *ServiceTestJig) newServiceTemplate(namespace string, proto api.Protocol
// CreateTCPServiceWithPort creates a new TCP Service with given port based on the
// jig's defaults. Callers can provide a function to tweak the Service object before
// it is created.
-func (j *ServiceTestJig) CreateTCPServiceWithPort(namespace string, tweak func(svc *api.Service), port int32) *api.Service {
- svc := j.newServiceTemplate(namespace, api.ProtocolTCP, port)
+func (j *ServiceTestJig) CreateTCPServiceWithPort(namespace string, tweak func(svc *v1.Service), port int32) *v1.Service {
+ svc := j.newServiceTemplate(namespace, v1.ProtocolTCP, port)
if tweak != nil {
tweak(svc)
}
@@ -2084,8 +2092,8 @@ func (j *ServiceTestJig) CreateTCPServiceWithPort(namespace string, tweak func(s
// CreateTCPServiceOrFail creates a new TCP Service based on the jig's
// defaults. Callers can provide a function to tweak the Service object before
// it is created.
-func (j *ServiceTestJig) CreateTCPServiceOrFail(namespace string, tweak func(svc *api.Service)) *api.Service {
- svc := j.newServiceTemplate(namespace, api.ProtocolTCP, 80)
+func (j *ServiceTestJig) CreateTCPServiceOrFail(namespace string, tweak func(svc *v1.Service)) *v1.Service {
+ svc := j.newServiceTemplate(namespace, v1.ProtocolTCP, 80)
if tweak != nil {
tweak(svc)
}
@@ -2099,8 +2107,8 @@ func (j *ServiceTestJig) CreateTCPServiceOrFail(namespace string, tweak func(svc
// CreateUDPServiceOrFail creates a new UDP Service based on the jig's
// defaults. Callers can provide a function to tweak the Service object before
// it is created.
-func (j *ServiceTestJig) CreateUDPServiceOrFail(namespace string, tweak func(svc *api.Service)) *api.Service {
- svc := j.newServiceTemplate(namespace, api.ProtocolUDP, 80)
+func (j *ServiceTestJig) CreateUDPServiceOrFail(namespace string, tweak func(svc *v1.Service)) *v1.Service {
+ svc := j.newServiceTemplate(namespace, v1.ProtocolUDP, 80)
if tweak != nil {
tweak(svc)
}
@@ -2111,9 +2119,9 @@ func (j *ServiceTestJig) CreateUDPServiceOrFail(namespace string, tweak func(svc
return result
}
-func (j *ServiceTestJig) ChangeServiceType(namespace, name string, newType api.ServiceType, timeout time.Duration) {
+func (j *ServiceTestJig) ChangeServiceType(namespace, name string, newType v1.ServiceType, timeout time.Duration) {
ingressIP := ""
- svc := j.UpdateServiceOrFail(namespace, name, func(s *api.Service) {
+ svc := j.UpdateServiceOrFail(namespace, name, func(s *v1.Service) {
for _, ing := range s.Status.LoadBalancer.Ingress {
if ing.IP != "" {
ingressIP = ing.IP
@@ -2130,35 +2138,35 @@ func (j *ServiceTestJig) ChangeServiceType(namespace, name string, newType api.S
// createOnlyLocalNodePortService creates a loadbalancer service and sanity checks its
// nodePort. If createPod is true, it also creates an RC with 1 replica of
// the standard netexec container used everywhere in this test.
-func (j *ServiceTestJig) createOnlyLocalNodePortService(namespace, serviceName string, createPod bool) *api.Service {
+func (j *ServiceTestJig) createOnlyLocalNodePortService(namespace, serviceName string, createPod bool) *v1.Service {
By("creating a service " + namespace + "/" + serviceName + " with type=NodePort and annotation for local-traffic-only")
- svc := j.CreateTCPServiceOrFail(namespace, func(svc *api.Service) {
- svc.Spec.Type = api.ServiceTypeNodePort
+ svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
+ svc.Spec.Type = v1.ServiceTypeNodePort
svc.ObjectMeta.Annotations = map[string]string{
service.BetaAnnotationExternalTraffic: service.AnnotationValueExternalTrafficLocal}
- svc.Spec.Ports = []api.ServicePort{{Protocol: "TCP", Port: 80}}
+ svc.Spec.Ports = []v1.ServicePort{{Protocol: "TCP", Port: 80}}
})
if createPod {
By("creating a pod to be part of the service " + serviceName)
j.RunOrFail(namespace, nil)
}
- j.SanityCheckService(svc, api.ServiceTypeNodePort)
+ j.SanityCheckService(svc, v1.ServiceTypeNodePort)
return svc
}
// createOnlyLocalLoadBalancerService creates a loadbalancer service and waits for it to
// acquire an ingress IP. If createPod is true, it also creates an RC with 1
// replica of the standard netexec container used everywhere in this test.
-func (j *ServiceTestJig) createOnlyLocalLoadBalancerService(namespace, serviceName string, timeout time.Duration, createPod bool) *api.Service {
+func (j *ServiceTestJig) createOnlyLocalLoadBalancerService(namespace, serviceName string, timeout time.Duration, createPod bool) *v1.Service {
By("creating a service " + namespace + "/" + serviceName + " with type=LoadBalancer and annotation for local-traffic-only")
- svc := j.CreateTCPServiceOrFail(namespace, func(svc *api.Service) {
- svc.Spec.Type = api.ServiceTypeLoadBalancer
+ svc := j.CreateTCPServiceOrFail(namespace, func(svc *v1.Service) {
+ svc.Spec.Type = v1.ServiceTypeLoadBalancer
// We need to turn affinity off for our LB distribution tests
- svc.Spec.SessionAffinity = api.ServiceAffinityNone
+ svc.Spec.SessionAffinity = v1.ServiceAffinityNone
svc.ObjectMeta.Annotations = map[string]string{
service.BetaAnnotationExternalTraffic: service.AnnotationValueExternalTrafficLocal}
- svc.Spec.Ports = []api.ServicePort{{Protocol: "TCP", Port: 80}}
+ svc.Spec.Ports = []v1.ServicePort{{Protocol: "TCP", Port: 80}}
})
if createPod {
@@ -2167,13 +2175,13 @@ func (j *ServiceTestJig) createOnlyLocalLoadBalancerService(namespace, serviceNa
}
By("waiting for loadbalancer for service " + namespace + "/" + serviceName)
svc = j.WaitForLoadBalancerOrFail(namespace, serviceName, timeout)
- j.SanityCheckService(svc, api.ServiceTypeLoadBalancer)
+ j.SanityCheckService(svc, v1.ServiceTypeLoadBalancer)
return svc
}
// getEndpointNodes returns a map of nodenames:external-ip on which the
// endpoints of the given Service are running.
-func (j *ServiceTestJig) getEndpointNodes(svc *api.Service) map[string][]string {
+func (j *ServiceTestJig) getEndpointNodes(svc *v1.Service) map[string][]string {
nodes := j.getNodes(maxNodesForEndpointsTests)
endpoints, err := j.Client.Core().Endpoints(svc.Namespace).Get(svc.Name)
if err != nil {
@@ -2193,7 +2201,7 @@ func (j *ServiceTestJig) getEndpointNodes(svc *api.Service) map[string][]string
nodeMap := map[string][]string{}
for _, n := range nodes.Items {
if epNodes.Has(n.Name) {
- nodeMap[n.Name] = getNodeAddresses(&n, api.NodeExternalIP)
+ nodeMap[n.Name] = getNodeAddresses(&n, v1.NodeExternalIP)
}
}
return nodeMap
@@ -2201,7 +2209,7 @@ func (j *ServiceTestJig) getEndpointNodes(svc *api.Service) map[string][]string
// getNodes returns the first maxNodesForTest nodes. Useful in large clusters
// where we don't eg: want to create an endpoint per node.
-func (j *ServiceTestJig) getNodes(maxNodesForTest int) (nodes *api.NodeList) {
+func (j *ServiceTestJig) getNodes(maxNodesForTest int) (nodes *v1.NodeList) {
nodes = framework.GetReadySchedulableNodesOrDie(j.Client)
if len(nodes.Items) <= maxNodesForTest {
maxNodesForTest = len(nodes.Items)
@@ -2233,12 +2241,12 @@ func (j *ServiceTestJig) waitForEndpointOnNode(namespace, serviceName, nodeName
framework.ExpectNoError(err)
}
-func (j *ServiceTestJig) SanityCheckService(svc *api.Service, svcType api.ServiceType) {
+func (j *ServiceTestJig) SanityCheckService(svc *v1.Service, svcType v1.ServiceType) {
if svc.Spec.Type != svcType {
framework.Failf("unexpected Spec.Type (%s) for service, expected %s", svc.Spec.Type, svcType)
}
expectNodePorts := false
- if svcType != api.ServiceTypeClusterIP {
+ if svcType != v1.ServiceTypeClusterIP {
expectNodePorts = true
}
for i, port := range svc.Spec.Ports {
@@ -2253,7 +2261,7 @@ func (j *ServiceTestJig) SanityCheckService(svc *api.Service, svcType api.Servic
}
}
expectIngress := false
- if svcType == api.ServiceTypeLoadBalancer {
+ if svcType == v1.ServiceTypeLoadBalancer {
expectIngress = true
}
hasIngress := len(svc.Status.LoadBalancer.Ingress) != 0
@@ -2272,7 +2280,7 @@ func (j *ServiceTestJig) SanityCheckService(svc *api.Service, svcType api.Servic
// UpdateService fetches a service, calls the update function on it, and
// then attempts to send the updated service. It tries up to 3 times in the
// face of timeouts and conflicts.
-func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*api.Service)) (*api.Service, error) {
+func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*v1.Service)) (*v1.Service, error) {
for i := 0; i < 3; i++ {
service, err := j.Client.Core().Services(namespace).Get(name)
if err != nil {
@@ -2293,7 +2301,7 @@ func (j *ServiceTestJig) UpdateService(namespace, name string, update func(*api.
// UpdateServiceOrFail fetches a service, calls the update function on it, and
// then attempts to send the updated service. It tries up to 3 times in the
// face of timeouts and conflicts.
-func (j *ServiceTestJig) UpdateServiceOrFail(namespace, name string, update func(*api.Service)) *api.Service {
+func (j *ServiceTestJig) UpdateServiceOrFail(namespace, name string, update func(*v1.Service)) *v1.Service {
svc, err := j.UpdateService(namespace, name, update)
if err != nil {
framework.Failf(err.Error())
@@ -2301,14 +2309,14 @@ func (j *ServiceTestJig) UpdateServiceOrFail(namespace, name string, update func
return svc
}
-func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, initial int) *api.Service {
+func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, initial int) *v1.Service {
var err error
- var service *api.Service
+ var service *v1.Service
for i := 1; i < ServiceNodePortRange.Size; i++ {
offs1 := initial - ServiceNodePortRange.Base
offs2 := (offs1 + i) % ServiceNodePortRange.Size
newPort := ServiceNodePortRange.Base + offs2
- service, err = j.UpdateService(namespace, name, func(s *api.Service) {
+ service, err = j.UpdateService(namespace, name, func(s *v1.Service) {
s.Spec.Ports[0].NodePort = int32(newPort)
})
if err != nil && strings.Contains(err.Error(), "provided port is already allocated") {
@@ -2324,8 +2332,8 @@ func (j *ServiceTestJig) ChangeServiceNodePortOrFail(namespace, name string, ini
return service
}
-func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string, timeout time.Duration) *api.Service {
- var service *api.Service
+func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string, timeout time.Duration) *v1.Service {
+ var service *v1.Service
framework.Logf("Waiting up to %v for service %q to have a LoadBalancer", timeout, name)
pollFunc := func() (bool, error) {
svc, err := j.Client.Core().Services(namespace).Get(name)
@@ -2344,7 +2352,7 @@ func (j *ServiceTestJig) WaitForLoadBalancerOrFail(namespace, name string, timeo
return service
}
-func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string, ip string, port int, timeout time.Duration) *api.Service {
+func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string, ip string, port int, timeout time.Duration) *v1.Service {
// TODO: once support ticket 21807001 is resolved, reduce this timeout back to something reasonable
defer func() {
if err := framework.EnsureLoadBalancerResourcesDeleted(ip, strconv.Itoa(port)); err != nil {
@@ -2352,7 +2360,7 @@ func (j *ServiceTestJig) WaitForLoadBalancerDestroyOrFail(namespace, name string
}
}()
- var service *api.Service
+ var service *v1.Service
framework.Logf("Waiting up to %v for service %q to have no LoadBalancer", timeout, name)
pollFunc := func() (bool, error) {
svc, err := j.Client.Core().Services(namespace).Get(name)
@@ -2425,7 +2433,7 @@ func (j *ServiceTestJig) TestHTTPHealthCheckNodePort(host string, port int, requ
return pass, fail, statusMsg
}
-func getIngressPoint(ing *api.LoadBalancerIngress) string {
+func getIngressPoint(ing *v1.LoadBalancerIngress) string {
host := ing.IP
if host == "" {
host = ing.Hostname
@@ -2433,33 +2441,33 @@ func getIngressPoint(ing *api.LoadBalancerIngress) string {
return host
}
-// newRCTemplate returns the default api.ReplicationController object for
+// newRCTemplate returns the default v1.ReplicationController object for
// this jig, but does not actually create the RC. The default RC has the same
// name as the jig and runs the "netexec" container.
-func (j *ServiceTestJig) newRCTemplate(namespace string) *api.ReplicationController {
- rc := &api.ReplicationController{
- ObjectMeta: api.ObjectMeta{
+func (j *ServiceTestJig) newRCTemplate(namespace string) *v1.ReplicationController {
+ rc := &v1.ReplicationController{
+ ObjectMeta: v1.ObjectMeta{
Namespace: namespace,
Name: j.Name,
Labels: j.Labels,
},
- Spec: api.ReplicationControllerSpec{
- Replicas: 1,
+ Spec: v1.ReplicationControllerSpec{
+ Replicas: func(i int) *int32 { x := int32(i); return &x }(1),
Selector: j.Labels,
- Template: &api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
+ Template: &v1.PodTemplateSpec{
+ ObjectMeta: v1.ObjectMeta{
Labels: j.Labels,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "netexec",
Image: "gcr.io/google_containers/netexec:1.7",
Args: []string{"--http-port=80", "--udp-port=80"},
- ReadinessProbe: &api.Probe{
+ ReadinessProbe: &v1.Probe{
PeriodSeconds: 3,
- Handler: api.Handler{
- HTTPGet: &api.HTTPGetAction{
+ Handler: v1.Handler{
+ HTTPGet: &v1.HTTPGetAction{
Port: intstr.FromInt(80),
Path: "/hostName",
},
@@ -2478,7 +2486,7 @@ func (j *ServiceTestJig) newRCTemplate(namespace string) *api.ReplicationControl
// RunOrFail creates a ReplicationController and Pod(s) and waits for the
// Pod(s) to be running. Callers can provide a function to tweak the RC object
// before it is created.
-func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *api.ReplicationController)) *api.ReplicationController {
+func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *v1.ReplicationController)) *v1.ReplicationController {
rc := j.newRCTemplate(namespace)
if tweak != nil {
tweak(rc)
@@ -2487,7 +2495,7 @@ func (j *ServiceTestJig) RunOrFail(namespace string, tweak func(rc *api.Replicat
if err != nil {
framework.Failf("Failed to created RC %q: %v", rc.Name, err)
}
- pods, err := j.waitForPodsCreated(namespace, int(rc.Spec.Replicas))
+ pods, err := j.waitForPodsCreated(namespace, int(*(rc.Spec.Replicas)))
if err != nil {
framework.Failf("Failed to create pods: %v", err)
}
@@ -2503,7 +2511,7 @@ func (j *ServiceTestJig) waitForPodsCreated(namespace string, replicas int) ([]s
label := labels.SelectorFromSet(labels.Set(j.Labels))
framework.Logf("Waiting up to %v for %d pods to be created", timeout, replicas)
for start := time.Now(); time.Since(start) < timeout; time.Sleep(2 * time.Second) {
- options := api.ListOptions{LabelSelector: label}
+ options := v1.ListOptions{LabelSelector: label.String()}
pods, err := j.Client.Core().Pods(namespace).List(options)
if err != nil {
return nil, err
@@ -2568,15 +2576,15 @@ func NewServerTest(client clientset.Interface, namespace string, serviceName str
}
// Build default config for a service (which can then be changed)
-func (t *ServiceTestFixture) BuildServiceSpec() *api.Service {
- service := &api.Service{
- ObjectMeta: api.ObjectMeta{
+func (t *ServiceTestFixture) BuildServiceSpec() *v1.Service {
+ service := &v1.Service{
+ ObjectMeta: v1.ObjectMeta{
Name: t.ServiceName,
Namespace: t.Namespace,
},
- Spec: api.ServiceSpec{
+ Spec: v1.ServiceSpec{
Selector: t.Labels,
- Ports: []api.ServicePort{{
+ Ports: []v1.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}},
@@ -2587,8 +2595,8 @@ func (t *ServiceTestFixture) BuildServiceSpec() *api.Service {
// CreateWebserverRC creates rc-backed pods with the well-known webserver
// configuration and records it for cleanup.
-func (t *ServiceTestFixture) CreateWebserverRC(replicas int32) *api.ReplicationController {
- rcSpec := rcByNamePort(t.name, replicas, t.image, 80, api.ProtocolTCP, t.Labels, nil)
+func (t *ServiceTestFixture) CreateWebserverRC(replicas int32) *v1.ReplicationController {
+ rcSpec := rcByNamePort(t.name, replicas, t.image, 80, v1.ProtocolTCP, t.Labels, nil)
rcAct, err := t.createRC(rcSpec)
if err != nil {
framework.Failf("Failed to create rc %s: %v", rcSpec.Name, err)
@@ -2600,7 +2608,7 @@ func (t *ServiceTestFixture) CreateWebserverRC(replicas int32) *api.ReplicationC
}
// createRC creates a replication controller and records it for cleanup.
-func (t *ServiceTestFixture) createRC(rc *api.ReplicationController) (*api.ReplicationController, error) {
+func (t *ServiceTestFixture) createRC(rc *v1.ReplicationController) (*v1.ReplicationController, error) {
rc, err := t.Client.Core().ReplicationControllers(t.Namespace).Create(rc)
if err == nil {
t.rcs[rc.Name] = true
@@ -2609,7 +2617,7 @@ func (t *ServiceTestFixture) createRC(rc *api.ReplicationController) (*api.Repli
}
// Create a service, and record it for cleanup
-func (t *ServiceTestFixture) CreateService(service *api.Service) (*api.Service, error) {
+func (t *ServiceTestFixture) CreateService(service *v1.Service) (*v1.Service, error) {
result, err := t.Client.Core().Services(t.Namespace).Create(service)
if err == nil {
t.services[service.Name] = true
@@ -2635,7 +2643,8 @@ func (t *ServiceTestFixture) Cleanup() []error {
if err != nil {
errs = append(errs, err)
}
- old.Spec.Replicas = 0
+ x := int32(0)
+ old.Spec.Replicas = &x
if _, err := t.Client.Core().ReplicationControllers(t.Namespace).Update(old); err != nil {
errs = append(errs, err)
}
@@ -2659,21 +2668,21 @@ func (t *ServiceTestFixture) Cleanup() []error {
}
// newEchoServerPodSpec returns the pod spec of echo server pod
-func newEchoServerPodSpec(podName string) *api.Pod {
+func newEchoServerPodSpec(podName string) *v1.Pod {
port := 8080
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: podName,
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "echoserver",
Image: "gcr.io/google_containers/echoserver:1.4",
- Ports: []api.ContainerPort{{ContainerPort: int32(port)}},
+ Ports: []v1.ContainerPort{{ContainerPort: int32(port)}},
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
return pod
@@ -2735,7 +2744,7 @@ func execSourceipTest(f *framework.Framework, c clientset.Interface, ns, nodeNam
return execPod.Status.PodIP, outputs[1]
}
-func getLoadBalancerName(service *api.Service) string {
+func getLoadBalancerName(service *v1.Service) string {
//GCE requires that the name of a load balancer starts with a lower case letter.
ret := "a" + string(service.UID)
ret = strings.Replace(ret, "-", "", -1)
diff --git a/test/e2e/service_accounts.go b/test/e2e/service_accounts.go
index ffa1ce81bed..47c63e00f68 100644
--- a/test/e2e/service_accounts.go
+++ b/test/e2e/service_accounts.go
@@ -20,8 +20,8 @@ import (
"fmt"
"time"
- "k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/uuid"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/pkg/version"
@@ -39,7 +39,7 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
It("should ensure a single API token exists", func() {
// wait for the service account to reference a single secret
- var secrets []api.ObjectReference
+ var secrets []v1.ObjectReference
framework.ExpectNoError(wait.Poll(time.Millisecond*500, time.Second*10, func() (bool, error) {
By("waiting for a single token reference")
sa, err := f.ClientSet.Core().ServiceAccounts(f.Namespace.Name).Get("default")
@@ -178,9 +178,9 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
framework.Logf("Error getting secret %s: %v", secretRef.Name, err)
continue
}
- if secret.Type == api.SecretTypeServiceAccountToken {
- tokenContent = string(secret.Data[api.ServiceAccountTokenKey])
- rootCAContent = string(secret.Data[api.ServiceAccountRootCAKey])
+ if secret.Type == v1.SecretTypeServiceAccountToken {
+ tokenContent = string(secret.Data[v1.ServiceAccountTokenKey])
+ rootCAContent = string(secret.Data[v1.ServiceAccountRootCAKey])
return true, nil
}
}
@@ -189,52 +189,52 @@ var _ = framework.KubeDescribe("ServiceAccounts", func() {
return false, nil
}))
- pod := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ pod := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
GenerateName: "pod-service-account-" + string(uuid.NewUUID()) + "-",
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "token-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{
- fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, api.ServiceAccountTokenKey),
+ fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey),
},
},
{
Name: "root-ca-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{
- fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, api.ServiceAccountRootCAKey),
+ fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountRootCAKey),
},
},
},
- RestartPolicy: api.RestartPolicyNever,
+ RestartPolicy: v1.RestartPolicyNever,
},
}
supportsTokenNamespace, _ := framework.ServerVersionGTE(serviceAccountTokenNamespaceVersion, f.ClientSet.Discovery())
if supportsTokenNamespace {
- pod.Spec.Containers = append(pod.Spec.Containers, api.Container{
+ pod.Spec.Containers = append(pod.Spec.Containers, v1.Container{
Name: "namespace-test",
Image: "gcr.io/google_containers/mounttest:0.7",
Args: []string{
- fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, api.ServiceAccountNamespaceKey),
+ fmt.Sprintf("--file_content=%s/%s", serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey),
},
})
}
f.TestContainerOutput("consume service account token", pod, 0, []string{
- fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, api.ServiceAccountTokenKey, tokenContent),
+ fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountTokenKey, tokenContent),
})
f.TestContainerOutput("consume service account root CA", pod, 1, []string{
- fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, api.ServiceAccountRootCAKey, rootCAContent),
+ fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountRootCAKey, rootCAContent),
})
if supportsTokenNamespace {
f.TestContainerOutput("consume service account namespace", pod, 2, []string{
- fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, api.ServiceAccountNamespaceKey, f.Namespace.Name),
+ fmt.Sprintf(`content of file "%s/%s": %s`, serviceaccount.DefaultAPITokenMountPath, v1.ServiceAccountNamespaceKey, f.Namespace.Name),
})
}
})
diff --git a/test/e2e/service_latency.go b/test/e2e/service_latency.go
index b8856f9dd97..03f5ab3879a 100644
--- a/test/e2e/service_latency.go
+++ b/test/e2e/service_latency.go
@@ -22,7 +22,7 @@ import (
"strings"
"time"
- "k8s.io/kubernetes/pkg/api"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/runtime"
@@ -118,12 +118,13 @@ var _ = framework.KubeDescribe("Service endpoints latency", func() {
func runServiceLatencies(f *framework.Framework, inParallel, total int) (output []time.Duration, err error) {
cfg := testutils.RCConfig{
- Client: f.ClientSet,
- Image: framework.GetPauseImageName(f.ClientSet),
- Name: "svc-latency-rc",
- Namespace: f.Namespace.Name,
- Replicas: 1,
- PollInterval: time.Second,
+ Client: f.ClientSet,
+ InternalClient: f.InternalClientset,
+ Image: framework.GetPauseImageName(f.ClientSet),
+ Name: "svc-latency-rc",
+ Namespace: f.Namespace.Name,
+ Replicas: 1,
+ PollInterval: time.Second,
}
if err := framework.RunRC(cfg); err != nil {
return nil, err
@@ -179,7 +180,7 @@ func runServiceLatencies(f *framework.Framework, inParallel, total int) (output
type endpointQuery struct {
endpointsName string
- endpoints *api.Endpoints
+ endpoints *v1.Endpoints
result chan<- struct{}
}
@@ -188,7 +189,7 @@ type endpointQueries struct {
stop chan struct{}
requestChan chan *endpointQuery
- seenChan chan *api.Endpoints
+ seenChan chan *v1.Endpoints
}
func newQuerier() *endpointQueries {
@@ -197,7 +198,7 @@ func newQuerier() *endpointQueries {
stop: make(chan struct{}, 100),
requestChan: make(chan *endpointQuery),
- seenChan: make(chan *api.Endpoints, 100),
+ seenChan: make(chan *v1.Endpoints, 100),
}
go eq.join()
return eq
@@ -257,7 +258,7 @@ func (eq *endpointQueries) join() {
}
// request blocks until the requested endpoint is seen.
-func (eq *endpointQueries) request(endpointsName string) *api.Endpoints {
+func (eq *endpointQueries) request(endpointsName string) *v1.Endpoints {
result := make(chan struct{})
req := &endpointQuery{
endpointsName: endpointsName,
@@ -269,7 +270,7 @@ func (eq *endpointQueries) request(endpointsName string) *api.Endpoints {
}
// marks e as added; does not block.
-func (eq *endpointQueries) added(e *api.Endpoints) {
+func (eq *endpointQueries) added(e *v1.Endpoints) {
eq.seenChan <- e
}
@@ -277,26 +278,26 @@ func (eq *endpointQueries) added(e *api.Endpoints) {
func startEndpointWatcher(f *framework.Framework, q *endpointQueries) {
_, controller := cache.NewInformer(
&cache.ListWatch{
- ListFunc: func(options api.ListOptions) (runtime.Object, error) {
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
obj, err := f.ClientSet.Core().Endpoints(f.Namespace.Name).List(options)
return runtime.Object(obj), err
},
- WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return f.ClientSet.Core().Endpoints(f.Namespace.Name).Watch(options)
},
},
- &api.Endpoints{},
+ &v1.Endpoints{},
0,
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
- if e, ok := obj.(*api.Endpoints); ok {
+ if e, ok := obj.(*v1.Endpoints); ok {
if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 {
q.added(e)
}
}
},
UpdateFunc: func(old, cur interface{}) {
- if e, ok := cur.(*api.Endpoints); ok {
+ if e, ok := cur.(*v1.Endpoints); ok {
if len(e.Subsets) > 0 && len(e.Subsets[0].Addresses) > 0 {
q.added(e)
}
@@ -315,15 +316,15 @@ func startEndpointWatcher(f *framework.Framework, q *endpointQueries) {
func singleServiceLatency(f *framework.Framework, name string, q *endpointQueries) (time.Duration, error) {
// Make a service that points to that pod.
- svc := &api.Service{
- ObjectMeta: api.ObjectMeta{
+ svc := &v1.Service{
+ ObjectMeta: v1.ObjectMeta{
GenerateName: "latency-svc-",
},
- Spec: api.ServiceSpec{
- Ports: []api.ServicePort{{Protocol: api.ProtocolTCP, Port: 80}},
+ Spec: v1.ServiceSpec{
+ Ports: []v1.ServicePort{{Protocol: v1.ProtocolTCP, Port: 80}},
Selector: map[string]string{"name": name},
- Type: api.ServiceTypeClusterIP,
- SessionAffinity: api.ServiceAffinityNone,
+ Type: v1.ServiceTypeClusterIP,
+ SessionAffinity: v1.ServiceAffinityNone,
},
}
startTime := time.Now()
diff --git a/test/e2e/serviceloadbalancers.go b/test/e2e/serviceloadbalancers.go
index 2936ffc5918..d210036022a 100644
--- a/test/e2e/serviceloadbalancers.go
+++ b/test/e2e/serviceloadbalancers.go
@@ -22,7 +22,8 @@ import (
"net/http"
"k8s.io/kubernetes/pkg/api"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait"
@@ -111,7 +112,7 @@ func (h *haproxyControllerTester) start(namespace string) (err error) {
// Find the pods of the rc we just created.
labelSelector := labels.SelectorFromSet(
labels.Set(map[string]string{"name": h.rcName}))
- options := api.ListOptions{LabelSelector: labelSelector}
+ options := v1.ListOptions{LabelSelector: labelSelector.String()}
pods, err := h.client.Core().Pods(h.rcNamespace).List(options)
if err != nil {
return err
@@ -262,8 +263,8 @@ func simpleGET(c *http.Client, url, host string) (string, error) {
}
// rcFromManifest reads a .json/yaml file and returns the rc in it.
-func rcFromManifest(fileName string) *api.ReplicationController {
- var controller api.ReplicationController
+func rcFromManifest(fileName string) *v1.ReplicationController {
+ var controller v1.ReplicationController
framework.Logf("Parsing rc from %v", fileName)
data := framework.ReadOrDie(fileName)
@@ -275,8 +276,8 @@ func rcFromManifest(fileName string) *api.ReplicationController {
}
// svcFromManifest reads a .json/yaml file and returns the rc in it.
-func svcFromManifest(fileName string) *api.Service {
- var svc api.Service
+func svcFromManifest(fileName string) *v1.Service {
+ var svc v1.Service
framework.Logf("Parsing service from %v", fileName)
data := framework.ReadOrDie(fileName)
diff --git a/test/e2e/third-party.go b/test/e2e/third-party.go
index bfd40d34bc2..caa52c71c58 100644
--- a/test/e2e/third-party.go
+++ b/test/e2e/third-party.go
@@ -24,8 +24,9 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
+ "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered"
- "k8s.io/kubernetes/pkg/apis/extensions"
+ extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/wait"
"k8s.io/kubernetes/test/e2e/framework"
@@ -45,7 +46,7 @@ var data = `{
type Foo struct {
unversioned.TypeMeta `json:",inline"`
- api.ObjectMeta `json:"metadata,omitempty" description:"standard object metadata"`
+ v1.ObjectMeta `json:"metadata,omitempty" description:"standard object metadata"`
SomeField string `json:"someField"`
OtherField int `json:"otherField"`
@@ -64,7 +65,7 @@ var _ = Describe("ThirdParty resources [Flaky] [Disruptive]", func() {
f := framework.NewDefaultFramework("thirdparty")
rsrc := &extensions.ThirdPartyResource{
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: "foo.company.com",
},
Versions: []extensions.APIVersion{
@@ -120,7 +121,7 @@ var _ = Describe("ThirdParty resources [Flaky] [Disruptive]", func() {
TypeMeta: unversioned.TypeMeta{
Kind: "Foo",
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: "foo",
},
SomeField: "bar",
diff --git a/test/e2e/ubernetes_lite.go b/test/e2e/ubernetes_lite.go
index 9e8a9061d25..504bae4f3f5 100644
--- a/test/e2e/ubernetes_lite.go
+++ b/test/e2e/ubernetes_lite.go
@@ -22,9 +22,9 @@ import (
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/intstr"
"k8s.io/kubernetes/pkg/util/sets"
@@ -61,16 +61,16 @@ var _ = framework.KubeDescribe("Multi-AZ Clusters", func() {
func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string) {
// First create the service
serviceName := "test-service"
- serviceSpec := &api.Service{
- ObjectMeta: api.ObjectMeta{
+ serviceSpec := &v1.Service{
+ ObjectMeta: v1.ObjectMeta{
Name: serviceName,
Namespace: f.Namespace.Name,
},
- Spec: api.ServiceSpec{
+ Spec: v1.ServiceSpec{
Selector: map[string]string{
"service": serviceName,
},
- Ports: []api.ServicePort{{
+ Ports: []v1.ServicePort{{
Port: 80,
TargetPort: intstr.FromInt(80),
}},
@@ -80,13 +80,13 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
Expect(err).NotTo(HaveOccurred())
// Now create some pods behind the service
- podSpec := &api.Pod{
- ObjectMeta: api.ObjectMeta{
+ podSpec := &v1.Pod{
+ ObjectMeta: v1.ObjectMeta{
Name: serviceName,
Labels: map[string]string{"service": serviceName},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "test",
Image: framework.GetPauseImageName(f.ClientSet),
@@ -113,7 +113,7 @@ func SpreadServiceOrFail(f *framework.Framework, replicaCount int, image string)
}
// Find the name of the zone in which a Node is running
-func getZoneNameForNode(node api.Node) (string, error) {
+func getZoneNameForNode(node v1.Node) (string, error) {
for key, value := range node.Labels {
if key == unversioned.LabelZoneFailureDomain {
return value, nil
@@ -126,7 +126,7 @@ func getZoneNameForNode(node api.Node) (string, error) {
// Find the names of all zones in which we have nodes in this cluster.
func getZoneNames(c clientset.Interface) ([]string, error) {
zoneNames := sets.NewString()
- nodes, err := c.Core().Nodes().List(api.ListOptions{})
+ nodes, err := c.Core().Nodes().List(v1.ListOptions{})
if err != nil {
return nil, err
}
@@ -148,7 +148,7 @@ func getZoneCount(c clientset.Interface) (int, error) {
}
// Find the name of the zone in which the pod is scheduled
-func getZoneNameForPod(c clientset.Interface, pod api.Pod) (string, error) {
+func getZoneNameForPod(c clientset.Interface, pod v1.Pod) (string, error) {
By(fmt.Sprintf("Getting zone name for pod %s, on node %s", pod.Name, pod.Spec.NodeName))
node, err := c.Core().Nodes().Get(pod.Spec.NodeName)
Expect(err).NotTo(HaveOccurred())
@@ -157,7 +157,7 @@ func getZoneNameForPod(c clientset.Interface, pod api.Pod) (string, error) {
// Determine whether a set of pods are approximately evenly spread
// across a given set of zones
-func checkZoneSpreading(c clientset.Interface, pods *api.PodList, zoneNames []string) (bool, error) {
+func checkZoneSpreading(c clientset.Interface, pods *v1.PodList, zoneNames []string) (bool, error) {
podsPerZone := make(map[string]int)
for _, zoneName := range zoneNames {
podsPerZone[zoneName] = 0
@@ -190,26 +190,26 @@ func checkZoneSpreading(c clientset.Interface, pods *api.PodList, zoneNames []st
func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
name := "ubelite-spread-rc-" + string(uuid.NewUUID())
By(fmt.Sprintf("Creating replication controller %s", name))
- controller, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(&api.ReplicationController{
- ObjectMeta: api.ObjectMeta{
+ controller, err := f.ClientSet.Core().ReplicationControllers(f.Namespace.Name).Create(&v1.ReplicationController{
+ ObjectMeta: v1.ObjectMeta{
Namespace: f.Namespace.Name,
Name: name,
},
- Spec: api.ReplicationControllerSpec{
- Replicas: replicaCount,
+ Spec: v1.ReplicationControllerSpec{
+ Replicas: &replicaCount,
Selector: map[string]string{
"name": name,
},
- Template: &api.PodTemplateSpec{
- ObjectMeta: api.ObjectMeta{
+ Template: &v1.PodTemplateSpec{
+ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"name": name},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: name,
Image: image,
- Ports: []api.ContainerPort{{ContainerPort: 9376}},
+ Ports: []v1.ContainerPort{{ContainerPort: 9376}},
},
},
},
@@ -220,7 +220,7 @@ func SpreadRCOrFail(f *framework.Framework, replicaCount int32, image string) {
// Cleanup the replication controller when we are done.
defer func() {
// Resize the replication controller to zero to get rid of pods.
- if err := framework.DeleteRCAndPods(f.ClientSet, f.Namespace.Name, controller.Name); err != nil {
+ if err := framework.DeleteRCAndPods(f.ClientSet, f.InternalClientset, f.Namespace.Name, controller.Name); err != nil {
framework.Logf("Failed to cleanup replication controller %v: %v.", controller.Name, err)
}
}()
diff --git a/test/e2e/volume_provisioning.go b/test/e2e/volume_provisioning.go
index 18a6981c92b..dc5606b7f33 100644
--- a/test/e2e/volume_provisioning.go
+++ b/test/e2e/volume_provisioning.go
@@ -19,12 +19,12 @@ package e2e
import (
"time"
- "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned"
- "k8s.io/kubernetes/pkg/apis/storage"
- storageutil "k8s.io/kubernetes/pkg/apis/storage/util"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
+ storageutil "k8s.io/kubernetes/pkg/apis/storage/v1beta1/util"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/test/e2e/framework"
. "github.com/onsi/ginkgo"
@@ -39,8 +39,8 @@ const (
expectedSize = "2Gi"
)
-func testDynamicProvisioning(client clientset.Interface, claim *api.PersistentVolumeClaim) {
- err := framework.WaitForPersistentVolumeClaimPhase(api.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
+func testDynamicProvisioning(client clientset.Interface, claim *v1.PersistentVolumeClaim) {
+ err := framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
By("checking the claim")
@@ -54,16 +54,16 @@ func testDynamicProvisioning(client clientset.Interface, claim *api.PersistentVo
// Check sizes
expectedCapacity := resource.MustParse(expectedSize)
- pvCapacity := pv.Spec.Capacity[api.ResourceName(api.ResourceStorage)]
+ pvCapacity := pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)]
Expect(pvCapacity.Value()).To(Equal(expectedCapacity.Value()))
requestedCapacity := resource.MustParse(requestedSize)
- claimCapacity := claim.Spec.Resources.Requests[api.ResourceName(api.ResourceStorage)]
+ claimCapacity := claim.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
Expect(claimCapacity.Value()).To(Equal(requestedCapacity.Value()))
// Check PV properties
- Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(api.PersistentVolumeReclaimDelete))
- expectedAccessModes := []api.PersistentVolumeAccessMode{api.ReadWriteOnce}
+ Expect(pv.Spec.PersistentVolumeReclaimPolicy).To(Equal(v1.PersistentVolumeReclaimDelete))
+ expectedAccessModes := []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
Expect(pv.Spec.AccessModes).To(Equal(expectedAccessModes))
Expect(pv.Spec.ClaimRef.Name).To(Equal(claim.ObjectMeta.Name))
Expect(pv.Spec.ClaimRef.Namespace).To(Equal(claim.ObjectMeta.Namespace))
@@ -153,19 +153,19 @@ var _ = framework.KubeDescribe("Dynamic provisioning", func() {
})
})
-func newClaim(ns string, alpha bool) *api.PersistentVolumeClaim {
- claim := api.PersistentVolumeClaim{
- ObjectMeta: api.ObjectMeta{
+func newClaim(ns string, alpha bool) *v1.PersistentVolumeClaim {
+ claim := v1.PersistentVolumeClaim{
+ ObjectMeta: v1.ObjectMeta{
GenerateName: "pvc-",
Namespace: ns,
},
- Spec: api.PersistentVolumeClaimSpec{
- AccessModes: []api.PersistentVolumeAccessMode{
- api.ReadWriteOnce,
+ Spec: v1.PersistentVolumeClaimSpec{
+ AccessModes: []v1.PersistentVolumeAccessMode{
+ v1.ReadWriteOnce,
},
- Resources: api.ResourceRequirements{
- Requests: api.ResourceList{
- api.ResourceName(api.ResourceStorage): resource.MustParse(requestedSize),
+ Resources: v1.ResourceRequirements{
+ Requests: v1.ResourceList{
+ v1.ResourceName(v1.ResourceStorage): resource.MustParse(requestedSize),
},
},
},
@@ -187,22 +187,22 @@ func newClaim(ns string, alpha bool) *api.PersistentVolumeClaim {
// runInPodWithVolume runs a command in a pod with given claim mounted to /mnt directory.
func runInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
- pod := &api.Pod{
+ pod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
GenerateName: "pvc-volume-tester-",
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: "volume-tester",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh"},
Args: []string{"-c", command},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: "my-volume",
MountPath: "/mnt/test",
@@ -210,12 +210,12 @@ func runInPodWithVolume(c clientset.Interface, ns, claimName, command string) {
},
},
},
- RestartPolicy: api.RestartPolicyNever,
- Volumes: []api.Volume{
+ RestartPolicy: v1.RestartPolicyNever,
+ Volumes: []v1.Volume{
{
Name: "my-volume",
- VolumeSource: api.VolumeSource{
- PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{
+ VolumeSource: v1.VolumeSource{
+ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claimName,
ReadOnly: false,
},
@@ -248,7 +248,7 @@ func newStorageClass() *storage.StorageClass {
TypeMeta: unversioned.TypeMeta{
Kind: "StorageClass",
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: "fast",
},
Provisioner: pluginName,
diff --git a/test/e2e/volumes.go b/test/e2e/volumes.go
index 9432bb4a035..14634aaf220 100644
--- a/test/e2e/volumes.go
+++ b/test/e2e/volumes.go
@@ -46,10 +46,10 @@ import (
"strings"
"time"
- "k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned"
- clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
+ "k8s.io/kubernetes/pkg/api/v1"
+ clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/test/e2e/framework"
"github.com/golang/glog"
@@ -78,31 +78,31 @@ type VolumeTestConfig struct {
// Starts a container specified by config.serverImage and exports all
// config.serverPorts from it. The returned pod should be used to get the server
// IP address and create appropriate VolumeSource.
-func startVolumeServer(client clientset.Interface, config VolumeTestConfig) *api.Pod {
+func startVolumeServer(client clientset.Interface, config VolumeTestConfig) *v1.Pod {
podClient := client.Core().Pods(config.namespace)
portCount := len(config.serverPorts)
- serverPodPorts := make([]api.ContainerPort, portCount)
+ serverPodPorts := make([]v1.ContainerPort, portCount)
for i := 0; i < portCount; i++ {
portName := fmt.Sprintf("%s-%d", config.prefix, i)
- serverPodPorts[i] = api.ContainerPort{
+ serverPodPorts[i] = v1.ContainerPort{
Name: portName,
ContainerPort: int32(config.serverPorts[i]),
- Protocol: api.ProtocolTCP,
+ Protocol: v1.ProtocolTCP,
}
}
volumeCount := len(config.volumes)
- volumes := make([]api.Volume, volumeCount)
- mounts := make([]api.VolumeMount, volumeCount)
+ volumes := make([]v1.Volume, volumeCount)
+ mounts := make([]v1.VolumeMount, volumeCount)
i := 0
for src, dst := range config.volumes {
mountName := fmt.Sprintf("path%d", i)
volumes[i].Name = mountName
- volumes[i].VolumeSource.HostPath = &api.HostPathVolumeSource{
+ volumes[i].VolumeSource.HostPath = &v1.HostPathVolumeSource{
Path: src,
}
@@ -116,24 +116,24 @@ func startVolumeServer(client clientset.Interface, config VolumeTestConfig) *api
By(fmt.Sprint("creating ", config.prefix, " server pod"))
privileged := new(bool)
*privileged = true
- serverPod := &api.Pod{
+ serverPod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-server",
Labels: map[string]string{
"role": config.prefix + "-server",
},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: config.prefix + "-server",
Image: config.serverImage,
- SecurityContext: &api.SecurityContext{
+ SecurityContext: &v1.SecurityContext{
Privileged: privileged,
},
Args: config.serverArgs,
@@ -194,21 +194,21 @@ func volumeTestCleanup(f *framework.Framework, config VolumeTestConfig) {
// Start a client pod using given VolumeSource (exported by startVolumeServer())
// and check that the pod sees the data from the server pod.
-func testVolumeClient(client clientset.Interface, config VolumeTestConfig, volume api.VolumeSource, fsGroup *int64, expectedContent string) {
+func testVolumeClient(client clientset.Interface, config VolumeTestConfig, volume v1.VolumeSource, fsGroup *int64, expectedContent string) {
By(fmt.Sprint("starting ", config.prefix, " client"))
- clientPod := &api.Pod{
+ clientPod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-client",
Labels: map[string]string{
"role": config.prefix + "-client",
},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: config.prefix + "-client",
Image: "gcr.io/google_containers/busybox:1.24",
@@ -221,7 +221,7 @@ func testVolumeClient(client clientset.Interface, config VolumeTestConfig, volum
"-c",
"while true ; do cat /opt/index.html ; sleep 2 ; ls -altrh /opt/ ; sleep 2 ; done ",
},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: config.prefix + "-volume",
MountPath: "/opt/",
@@ -229,12 +229,12 @@ func testVolumeClient(client clientset.Interface, config VolumeTestConfig, volum
},
},
},
- SecurityContext: &api.PodSecurityContext{
- SELinuxOptions: &api.SELinuxOptions{
+ SecurityContext: &v1.PodSecurityContext{
+ SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1",
},
},
- Volumes: []api.Volume{
+ Volumes: []v1.Volume{
{
Name: config.prefix + "-volume",
VolumeSource: volume,
@@ -268,29 +268,29 @@ func testVolumeClient(client clientset.Interface, config VolumeTestConfig, volum
// Insert index.html with given content into given volume. It does so by
// starting and auxiliary pod which writes the file there.
// The volume must be writable.
-func injectHtml(client clientset.Interface, config VolumeTestConfig, volume api.VolumeSource, content string) {
+func injectHtml(client clientset.Interface, config VolumeTestConfig, volume v1.VolumeSource, content string) {
By(fmt.Sprint("starting ", config.prefix, " injector"))
podClient := client.Core().Pods(config.namespace)
- injectPod := &api.Pod{
+ injectPod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-injector",
Labels: map[string]string{
"role": config.prefix + "-injector",
},
},
- Spec: api.PodSpec{
- Containers: []api.Container{
+ Spec: v1.PodSpec{
+ Containers: []v1.Container{
{
Name: config.prefix + "-injector",
Image: "gcr.io/google_containers/busybox:1.24",
Command: []string{"/bin/sh"},
Args: []string{"-c", "echo '" + content + "' > /mnt/index.html && chmod o+rX /mnt /mnt/index.html"},
- VolumeMounts: []api.VolumeMount{
+ VolumeMounts: []v1.VolumeMount{
{
Name: config.prefix + "-volume",
MountPath: "/mnt",
@@ -298,13 +298,13 @@ func injectHtml(client clientset.Interface, config VolumeTestConfig, volume api.
},
},
},
- SecurityContext: &api.PodSecurityContext{
- SELinuxOptions: &api.SELinuxOptions{
+ SecurityContext: &v1.PodSecurityContext{
+ SELinuxOptions: &v1.SELinuxOptions{
Level: "s0:c0,c1",
},
},
- RestartPolicy: api.RestartPolicyNever,
- Volumes: []api.Volume{
+ RestartPolicy: v1.RestartPolicyNever,
+ Volumes: []v1.Volume{
{
Name: config.prefix + "-volume",
VolumeSource: volume,
@@ -354,7 +354,7 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
clean := true
// filled in BeforeEach
var cs clientset.Interface
- var namespace *api.Namespace
+ var namespace *v1.Namespace
BeforeEach(func() {
cs = f.ClientSet
@@ -383,8 +383,8 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
serverIP := pod.Status.PodIP
framework.Logf("NFS server IP address: %v", serverIP)
- volume := api.VolumeSource{
- NFS: &api.NFSVolumeSource{
+ volume := v1.VolumeSource{
+ NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/",
ReadOnly: true,
@@ -418,26 +418,26 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
framework.Logf("Gluster server IP address: %v", serverIP)
// create Endpoints for the server
- endpoints := api.Endpoints{
+ endpoints := v1.Endpoints{
TypeMeta: unversioned.TypeMeta{
Kind: "Endpoints",
APIVersion: "v1",
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-server",
},
- Subsets: []api.EndpointSubset{
+ Subsets: []v1.EndpointSubset{
{
- Addresses: []api.EndpointAddress{
+ Addresses: []v1.EndpointAddress{
{
IP: serverIP,
},
},
- Ports: []api.EndpointPort{
+ Ports: []v1.EndpointPort{
{
Name: "gluster",
Port: 24007,
- Protocol: api.ProtocolTCP,
+ Protocol: v1.ProtocolTCP,
},
},
},
@@ -456,8 +456,8 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
framework.Failf("Failed to create endpoints for Gluster server: %v", err)
}
- volume := api.VolumeSource{
- Glusterfs: &api.GlusterfsVolumeSource{
+ volume := v1.VolumeSource{
+ Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: config.prefix + "-server",
// 'test_vol' comes from test/images/volumes-tester/gluster/run_gluster.sh
Path: "test_vol",
@@ -500,8 +500,8 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
serverIP := pod.Status.PodIP
framework.Logf("iSCSI server IP address: %v", serverIP)
- volume := api.VolumeSource{
- ISCSI: &api.ISCSIVolumeSource{
+ volume := v1.VolumeSource{
+ ISCSI: &v1.ISCSIVolumeSource{
TargetPortal: serverIP + ":3260",
// from test/images/volumes-tester/iscsi/initiatorname.iscsi
IQN: "iqn.2003-01.org.linux-iscsi.f21.x8664:sn.4b0aae584f7c",
@@ -544,12 +544,12 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
framework.Logf("Ceph server IP address: %v", serverIP)
// create secrets for the server
- secret := api.Secret{
+ secret := v1.Secret{
TypeMeta: unversioned.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-secret",
},
Data: map[string][]byte{
@@ -571,13 +571,13 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
framework.Failf("Failed to create secrets for Ceph RBD: %v", err)
}
- volume := api.VolumeSource{
- RBD: &api.RBDVolumeSource{
+ volume := v1.VolumeSource{
+ RBD: &v1.RBDVolumeSource{
CephMonitors: []string{serverIP},
RBDPool: "rbd",
RBDImage: "foo",
RadosUser: "admin",
- SecretRef: &api.LocalObjectReference{
+ SecretRef: &v1.LocalObjectReference{
Name: config.prefix + "-secret",
},
FSType: "ext2",
@@ -615,12 +615,12 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
time.Sleep(20 * time.Second)
// create ceph secret
- secret := &api.Secret{
+ secret := &v1.Secret{
TypeMeta: unversioned.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
- ObjectMeta: api.ObjectMeta{
+ ObjectMeta: v1.ObjectMeta{
Name: config.prefix + "-secret",
},
// Must use the ceph keyring at contrib/for-tests/volumes-ceph/ceph/init.sh
@@ -644,11 +644,11 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
framework.Failf("unable to create test secret %s: %v", secret.Name, err)
}
- volume := api.VolumeSource{
- CephFS: &api.CephFSVolumeSource{
+ volume := v1.VolumeSource{
+ CephFS: &v1.CephFSVolumeSource{
Monitors: []string{serverIP + ":6789"},
User: "kube",
- SecretRef: &api.LocalObjectReference{Name: config.prefix + "-secret"},
+ SecretRef: &v1.LocalObjectReference{Name: config.prefix + "-secret"},
ReadOnly: true,
},
}
@@ -714,8 +714,8 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
volumeTestCleanup(f, config)
}
}()
- volume := api.VolumeSource{
- Cinder: &api.CinderVolumeSource{
+ volume := v1.VolumeSource{
+ Cinder: &v1.CinderVolumeSource{
VolumeID: volumeID,
FSType: "ext3",
ReadOnly: false,
@@ -758,8 +758,8 @@ var _ = framework.KubeDescribe("Volumes [Feature:Volumes]", func() {
volumeTestCleanup(f, config)
}
}()
- volume := api.VolumeSource{
- GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
+ volume := v1.VolumeSource{
+ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: volumeName,
FSType: "ext3",
ReadOnly: false,