cmd/kube-controller-manager

This commit is contained in:
Chao Xu
2016-11-18 12:50:17 -08:00
parent 48536eaef9
commit 7eeb71f698
109 changed files with 4380 additions and 4153 deletions

View File

@@ -34,10 +34,11 @@ import (
"k8s.io/kubernetes/cmd/kube-controller-manager/app/options" "k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/batch"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/leaderelection" "k8s.io/kubernetes/pkg/client/leaderelection"
"k8s.io/kubernetes/pkg/client/leaderelection/resourcelock" "k8s.io/kubernetes/pkg/client/leaderelection/resourcelock"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
@@ -159,8 +160,8 @@ func Run(s *options.CMServer) error {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controller-manager"}) recorder := eventBroadcaster.NewRecorder(v1.EventSource{Component: "controller-manager"})
run := func(stop <-chan struct{}) { run := func(stop <-chan struct{}) {
rootClientBuilder := controller.SimpleControllerClientBuilder{ rootClientBuilder := controller.SimpleControllerClientBuilder{
@@ -194,7 +195,7 @@ func Run(s *options.CMServer) error {
// TODO: enable other lock types // TODO: enable other lock types
rl := resourcelock.EndpointsLock{ rl := resourcelock.EndpointsLock{
EndpointsMeta: api.ObjectMeta{ EndpointsMeta: v1.ObjectMeta{
Namespace: "kube-system", Namespace: "kube-system",
Name: "kube-controller-manager", Name: "kube-controller-manager",
}, },
@@ -225,7 +226,7 @@ func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootCl
return rootClientBuilder.ClientOrDie(serviceAccountName) return rootClientBuilder.ClientOrDie(serviceAccountName)
} }
discoveryClient := client("controller-discovery").Discovery() discoveryClient := client("controller-discovery").Discovery()
sharedInformers := informers.NewSharedInformerFactory(client("shared-informers"), ResyncPeriod(s)()) sharedInformers := informers.NewSharedInformerFactory(client("shared-informers"), nil, ResyncPeriod(s)())
// always start the SA token controller first using a full-power client, since it needs to mint tokens for the rest // always start the SA token controller first using a full-power client, since it needs to mint tokens for the rest
if len(s.ServiceAccountKeyFile) > 0 { if len(s.ServiceAccountKeyFile) > 0 {
@@ -392,7 +393,7 @@ func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootCl
return gvr, nil return gvr, nil
} }
} }
namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, gvrFn, s.NamespaceSyncPeriod.Duration, api.FinalizerKubernetes) namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, gvrFn, s.NamespaceSyncPeriod.Duration, v1.FinalizerKubernetes)
go namespaceController.Run(int(s.ConcurrentNamespaceSyncs), wait.NeverStop) go namespaceController.Run(int(s.ConcurrentNamespaceSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter)) time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

View File

@@ -22,11 +22,11 @@ import (
"strings" "strings"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/certificates" certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@@ -62,7 +62,7 @@ func NewCertificateController(kubeClient clientset.Interface, syncPeriod time.Du
// Send events to the apiserver // Send events to the apiserver
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
// Configure cfssl signer // Configure cfssl signer
// TODO: support non-default policy and remote/pkcs11 signing // TODO: support non-default policy and remote/pkcs11 signing
@@ -84,10 +84,10 @@ func NewCertificateController(kubeClient clientset.Interface, syncPeriod time.Du
// Manage the addition/update of certificate requests // Manage the addition/update of certificate requests
cc.csrStore.Store, cc.csrController = cache.NewInformer( cc.csrStore.Store, cc.csrController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return cc.kubeClient.Certificates().CertificateSigningRequests().List(options) return cc.kubeClient.Certificates().CertificateSigningRequests().List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return cc.kubeClient.Certificates().CertificateSigningRequests().Watch(options) return cc.kubeClient.Certificates().CertificateSigningRequests().Watch(options)
}, },
}, },
@@ -240,7 +240,7 @@ func (cc *CertificateController) maybeAutoApproveCSR(csr *certificates.Certifica
return csr, nil return csr, nil
} }
x509cr, err := certutil.ParseCSR(csr) x509cr, err := certutil.ParseCSRV1alpha1(csr)
if err != nil { if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to parse csr %q: %v", csr.Name, err)) utilruntime.HandleError(fmt.Errorf("unable to parse csr %q: %v", csr.Name, err))
return csr, nil return csr, nil

View File

@@ -16,7 +16,7 @@ limitations under the License.
package certificates package certificates
import "k8s.io/kubernetes/pkg/apis/certificates" import certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
// IsCertificateRequestApproved returns true if a certificate request has the // IsCertificateRequestApproved returns true if a certificate request has the
// "Approved" condition and no "Denied" conditions; false otherwise. // "Approved" condition and no "Denied" conditions; false otherwise.

View File

@@ -22,9 +22,10 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors" apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@@ -76,7 +77,7 @@ type SAControllerClientBuilder struct {
// CoreClient is used to provision service accounts if needed and watch for their associated tokens // CoreClient is used to provision service accounts if needed and watch for their associated tokens
// to construct a controller client // to construct a controller client
CoreClient unversionedcore.CoreInterface CoreClient v1core.CoreV1Interface
// Namespace is the namespace used to host the service accounts that will back the // Namespace is the namespace used to host the service accounts that will back the
// controllers. It must be highly privileged namespace which normal users cannot inspect. // controllers. It must be highly privileged namespace which normal users cannot inspect.
@@ -96,26 +97,26 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro
// check to see if the namespace exists. If it isn't a NotFound, just try to create the SA. // check to see if the namespace exists. If it isn't a NotFound, just try to create the SA.
// It'll probably fail, but perhaps that will have a better message. // It'll probably fail, but perhaps that will have a better message.
if _, err := b.CoreClient.Namespaces().Get(b.Namespace); apierrors.IsNotFound(err) { if _, err := b.CoreClient.Namespaces().Get(b.Namespace); apierrors.IsNotFound(err) {
_, err = b.CoreClient.Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: b.Namespace}}) _, err = b.CoreClient.Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: b.Namespace}})
if err != nil && !apierrors.IsAlreadyExists(err) { if err != nil && !apierrors.IsAlreadyExists(err) {
return nil, err return nil, err
} }
} }
sa, err = b.CoreClient.ServiceAccounts(b.Namespace).Create( sa, err = b.CoreClient.ServiceAccounts(b.Namespace).Create(
&api.ServiceAccount{ObjectMeta: api.ObjectMeta{Namespace: b.Namespace, Name: name}}) &v1.ServiceAccount{ObjectMeta: v1.ObjectMeta{Namespace: b.Namespace, Name: name}})
if err != nil { if err != nil {
return nil, err return nil, err
} }
} }
lw := &cache.ListWatch{ lw := &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)}) options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(v1.SecretTypeServiceAccountToken)}).String()
return b.CoreClient.Secrets(b.Namespace).List(options) return b.CoreClient.Secrets(b.Namespace).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)}) options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(v1.SecretTypeServiceAccountToken)}).String()
return b.CoreClient.Secrets(b.Namespace).Watch(options) return b.CoreClient.Secrets(b.Namespace).Watch(options)
}, },
} }
@@ -128,13 +129,13 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro
return false, fmt.Errorf("error watching") return false, fmt.Errorf("error watching")
case watch.Added, watch.Modified: case watch.Added, watch.Modified:
secret := event.Object.(*api.Secret) secret := event.Object.(*v1.Secret)
if !serviceaccount.IsServiceAccountToken(secret, sa) || if !serviceaccount.IsServiceAccountToken(secret, sa) ||
len(secret.Data[api.ServiceAccountTokenKey]) == 0 { len(secret.Data[v1.ServiceAccountTokenKey]) == 0 {
return false, nil return false, nil
} }
// TODO maybe verify the token is valid // TODO maybe verify the token is valid
clientConfig.BearerToken = string(secret.Data[api.ServiceAccountTokenKey]) clientConfig.BearerToken = string(secret.Data[v1.ServiceAccountTokenKey])
restclient.AddUserAgent(clientConfig, serviceaccount.MakeUsername(b.Namespace, name)) restclient.AddUserAgent(clientConfig, serviceaccount.MakeUsername(b.Namespace, name))
return true, nil return true, nil

View File

@@ -21,15 +21,15 @@ import (
"strings" "strings"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
) )
type PodControllerRefManager struct { type PodControllerRefManager struct {
podControl PodControlInterface podControl PodControlInterface
controllerObject api.ObjectMeta controllerObject v1.ObjectMeta
controllerSelector labels.Selector controllerSelector labels.Selector
controllerKind unversioned.GroupVersionKind controllerKind unversioned.GroupVersionKind
} }
@@ -38,7 +38,7 @@ type PodControllerRefManager struct {
// methods to manage the controllerRef of pods. // methods to manage the controllerRef of pods.
func NewPodControllerRefManager( func NewPodControllerRefManager(
podControl PodControlInterface, podControl PodControlInterface,
controllerObject api.ObjectMeta, controllerObject v1.ObjectMeta,
controllerSelector labels.Selector, controllerSelector labels.Selector,
controllerKind unversioned.GroupVersionKind, controllerKind unversioned.GroupVersionKind,
) *PodControllerRefManager { ) *PodControllerRefManager {
@@ -53,10 +53,10 @@ func NewPodControllerRefManager(
// controllerRef pointing to other object are ignored) 3. controlledDoesNotMatch // controllerRef pointing to other object are ignored) 3. controlledDoesNotMatch
// are the pods that have a controllerRef pointing to the controller, but their // are the pods that have a controllerRef pointing to the controller, but their
// labels no longer match the selector. // labels no longer match the selector.
func (m *PodControllerRefManager) Classify(pods []*api.Pod) ( func (m *PodControllerRefManager) Classify(pods []*v1.Pod) (
matchesAndControlled []*api.Pod, matchesAndControlled []*v1.Pod,
matchesNeedsController []*api.Pod, matchesNeedsController []*v1.Pod,
controlledDoesNotMatch []*api.Pod) { controlledDoesNotMatch []*v1.Pod) {
for i := range pods { for i := range pods {
pod := pods[i] pod := pods[i]
if !IsPodActive(pod) { if !IsPodActive(pod) {
@@ -91,7 +91,7 @@ func (m *PodControllerRefManager) Classify(pods []*api.Pod) (
// getControllerOf returns the controllerRef if controllee has a controller, // getControllerOf returns the controllerRef if controllee has a controller,
// otherwise returns nil. // otherwise returns nil.
func getControllerOf(controllee api.ObjectMeta) *api.OwnerReference { func getControllerOf(controllee v1.ObjectMeta) *v1.OwnerReference {
for _, owner := range controllee.OwnerReferences { for _, owner := range controllee.OwnerReferences {
// controlled by other controller // controlled by other controller
if owner.Controller != nil && *owner.Controller == true { if owner.Controller != nil && *owner.Controller == true {
@@ -103,7 +103,7 @@ func getControllerOf(controllee api.ObjectMeta) *api.OwnerReference {
// AdoptPod sends a patch to take control of the pod. It returns the error if // AdoptPod sends a patch to take control of the pod. It returns the error if
// the patching fails. // the patching fails.
func (m *PodControllerRefManager) AdoptPod(pod *api.Pod) error { func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error {
// we should not adopt any pods if the controller is about to be deleted // we should not adopt any pods if the controller is about to be deleted
if m.controllerObject.DeletionTimestamp != nil { if m.controllerObject.DeletionTimestamp != nil {
return fmt.Errorf("cancel the adopt attempt for pod %s because the controlller is being deleted", return fmt.Errorf("cancel the adopt attempt for pod %s because the controlller is being deleted",
@@ -118,7 +118,7 @@ func (m *PodControllerRefManager) AdoptPod(pod *api.Pod) error {
// ReleasePod sends a patch to free the pod from the control of the controller. // ReleasePod sends a patch to free the pod from the control of the controller.
// It returns the error if the patching fails. 404 and 422 errors are ignored. // It returns the error if the patching fails. 404 and 422 errors are ignored.
func (m *PodControllerRefManager) ReleasePod(pod *api.Pod) error { func (m *PodControllerRefManager) ReleasePod(pod *v1.Pod) error {
glog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s", glog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s",
pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.controllerObject.Name) pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.controllerObject.Name)
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.controllerObject.UID, pod.UID) deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.controllerObject.UID, pod.UID)

View File

@@ -26,11 +26,13 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/api/validation" "k8s.io/kubernetes/pkg/api/validation"
"k8s.io/kubernetes/pkg/apis/extensions" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/clock"
@@ -363,11 +365,11 @@ const (
// created as an interface to allow testing. // created as an interface to allow testing.
type PodControlInterface interface { type PodControlInterface interface {
// CreatePods creates new pods according to the spec. // CreatePods creates new pods according to the spec.
CreatePods(namespace string, template *api.PodTemplateSpec, object runtime.Object) error CreatePods(namespace string, template *v1.PodTemplateSpec, object runtime.Object) error
// CreatePodsOnNode creates a new pod according to the spec on the specified node. // CreatePodsOnNode creates a new pod according to the spec on the specified node.
CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error CreatePodsOnNode(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object) error
// CreatePodsWithControllerRef creates new pods according to the spec, and sets object as the pod's controller. // CreatePodsWithControllerRef creates new pods according to the spec, and sets object as the pod's controller.
CreatePodsWithControllerRef(namespace string, template *api.PodTemplateSpec, object runtime.Object, controllerRef *api.OwnerReference) error CreatePodsWithControllerRef(namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *v1.OwnerReference) error
// DeletePod deletes the pod identified by podID. // DeletePod deletes the pod identified by podID.
DeletePod(namespace string, podID string, object runtime.Object) error DeletePod(namespace string, podID string, object runtime.Object) error
// PatchPod patches the pod. // PatchPod patches the pod.
@@ -382,7 +384,7 @@ type RealPodControl struct {
var _ PodControlInterface = &RealPodControl{} var _ PodControlInterface = &RealPodControl{}
func getPodsLabelSet(template *api.PodTemplateSpec) labels.Set { func getPodsLabelSet(template *v1.PodTemplateSpec) labels.Set {
desiredLabels := make(labels.Set) desiredLabels := make(labels.Set)
for k, v := range template.Labels { for k, v := range template.Labels {
desiredLabels[k] = v desiredLabels[k] = v
@@ -390,18 +392,18 @@ func getPodsLabelSet(template *api.PodTemplateSpec) labels.Set {
return desiredLabels return desiredLabels
} }
func getPodsFinalizers(template *api.PodTemplateSpec) []string { func getPodsFinalizers(template *v1.PodTemplateSpec) []string {
desiredFinalizers := make([]string, len(template.Finalizers)) desiredFinalizers := make([]string, len(template.Finalizers))
copy(desiredFinalizers, template.Finalizers) copy(desiredFinalizers, template.Finalizers)
return desiredFinalizers return desiredFinalizers
} }
func getPodsAnnotationSet(template *api.PodTemplateSpec, object runtime.Object) (labels.Set, error) { func getPodsAnnotationSet(template *v1.PodTemplateSpec, object runtime.Object) (labels.Set, error) {
desiredAnnotations := make(labels.Set) desiredAnnotations := make(labels.Set)
for k, v := range template.Annotations { for k, v := range template.Annotations {
desiredAnnotations[k] = v desiredAnnotations[k] = v
} }
createdByRef, err := api.GetReference(object) createdByRef, err := v1.GetReference(object)
if err != nil { if err != nil {
return desiredAnnotations, fmt.Errorf("unable to get controller reference: %v", err) return desiredAnnotations, fmt.Errorf("unable to get controller reference: %v", err)
} }
@@ -409,15 +411,15 @@ func getPodsAnnotationSet(template *api.PodTemplateSpec, object runtime.Object)
// TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients // TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients
// would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment. // would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment.
// We need to consistently handle this case of annotation versioning. // We need to consistently handle this case of annotation versioning.
codec := api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: api.GroupName, Version: "v1"}) codec := api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: v1.GroupName, Version: "v1"})
createdByRefJson, err := runtime.Encode(codec, &api.SerializedReference{ createdByRefJson, err := runtime.Encode(codec, &v1.SerializedReference{
Reference: *createdByRef, Reference: *createdByRef,
}) })
if err != nil { if err != nil {
return desiredAnnotations, fmt.Errorf("unable to serialize controller reference: %v", err) return desiredAnnotations, fmt.Errorf("unable to serialize controller reference: %v", err)
} }
desiredAnnotations[api.CreatedByAnnotation] = string(createdByRefJson) desiredAnnotations[v1.CreatedByAnnotation] = string(createdByRefJson)
return desiredAnnotations, nil return desiredAnnotations, nil
} }
@@ -430,11 +432,11 @@ func getPodsPrefix(controllerName string) string {
return prefix return prefix
} }
func (r RealPodControl) CreatePods(namespace string, template *api.PodTemplateSpec, object runtime.Object) error { func (r RealPodControl) CreatePods(namespace string, template *v1.PodTemplateSpec, object runtime.Object) error {
return r.createPods("", namespace, template, object, nil) return r.createPods("", namespace, template, object, nil)
} }
func (r RealPodControl) CreatePodsWithControllerRef(namespace string, template *api.PodTemplateSpec, controllerObject runtime.Object, controllerRef *api.OwnerReference) error { func (r RealPodControl) CreatePodsWithControllerRef(namespace string, template *v1.PodTemplateSpec, controllerObject runtime.Object, controllerRef *v1.OwnerReference) error {
if controllerRef == nil { if controllerRef == nil {
return fmt.Errorf("controllerRef is nil") return fmt.Errorf("controllerRef is nil")
} }
@@ -450,7 +452,7 @@ func (r RealPodControl) CreatePodsWithControllerRef(namespace string, template *
return r.createPods("", namespace, template, controllerObject, controllerRef) return r.createPods("", namespace, template, controllerObject, controllerRef)
} }
func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error { func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object) error {
return r.createPods(nodeName, namespace, template, object, nil) return r.createPods(nodeName, namespace, template, object, nil)
} }
@@ -459,7 +461,7 @@ func (r RealPodControl) PatchPod(namespace, name string, data []byte) error {
return err return err
} }
func GetPodFromTemplate(template *api.PodTemplateSpec, parentObject runtime.Object, controllerRef *api.OwnerReference) (*api.Pod, error) { func GetPodFromTemplate(template *v1.PodTemplateSpec, parentObject runtime.Object, controllerRef *v1.OwnerReference) (*v1.Pod, error) {
desiredLabels := getPodsLabelSet(template) desiredLabels := getPodsLabelSet(template)
desiredFinalizers := getPodsFinalizers(template) desiredFinalizers := getPodsFinalizers(template)
desiredAnnotations, err := getPodsAnnotationSet(template, parentObject) desiredAnnotations, err := getPodsAnnotationSet(template, parentObject)
@@ -472,8 +474,8 @@ func GetPodFromTemplate(template *api.PodTemplateSpec, parentObject runtime.Obje
} }
prefix := getPodsPrefix(accessor.GetName()) prefix := getPodsPrefix(accessor.GetName())
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: desiredLabels, Labels: desiredLabels,
Annotations: desiredAnnotations, Annotations: desiredAnnotations,
GenerateName: prefix, GenerateName: prefix,
@@ -483,13 +485,15 @@ func GetPodFromTemplate(template *api.PodTemplateSpec, parentObject runtime.Obje
if controllerRef != nil { if controllerRef != nil {
pod.OwnerReferences = append(pod.OwnerReferences, *controllerRef) pod.OwnerReferences = append(pod.OwnerReferences, *controllerRef)
} }
if err := api.Scheme.Convert(&template.Spec, &pod.Spec, nil); err != nil { clone, err := conversion.NewCloner().DeepCopy(&template.Spec)
return nil, fmt.Errorf("unable to convert pod template: %v", err) if err != nil {
return nil, err
} }
pod.Spec = *clone.(*v1.PodSpec)
return pod, nil return pod, nil
} }
func (r RealPodControl) createPods(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object, controllerRef *api.OwnerReference) error { func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *v1.OwnerReference) error {
pod, err := GetPodFromTemplate(template, object, controllerRef) pod, err := GetPodFromTemplate(template, object, controllerRef)
if err != nil { if err != nil {
return err return err
@@ -501,7 +505,7 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *api.Pod
return fmt.Errorf("unable to create pods, no labels") return fmt.Errorf("unable to create pods, no labels")
} }
if newPod, err := r.KubeClient.Core().Pods(namespace).Create(pod); err != nil { if newPod, err := r.KubeClient.Core().Pods(namespace).Create(pod); err != nil {
r.Recorder.Eventf(object, api.EventTypeWarning, FailedCreatePodReason, "Error creating: %v", err) r.Recorder.Eventf(object, v1.EventTypeWarning, FailedCreatePodReason, "Error creating: %v", err)
return fmt.Errorf("unable to create pods: %v", err) return fmt.Errorf("unable to create pods: %v", err)
} else { } else {
accessor, err := meta.Accessor(object) accessor, err := meta.Accessor(object)
@@ -510,7 +514,7 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *api.Pod
return nil return nil
} }
glog.V(4).Infof("Controller %v created pod %v", accessor.GetName(), newPod.Name) glog.V(4).Infof("Controller %v created pod %v", accessor.GetName(), newPod.Name)
r.Recorder.Eventf(object, api.EventTypeNormal, SuccessfulCreatePodReason, "Created pod: %v", newPod.Name) r.Recorder.Eventf(object, v1.EventTypeNormal, SuccessfulCreatePodReason, "Created pod: %v", newPod.Name)
} }
return nil return nil
} }
@@ -522,18 +526,18 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime
} }
glog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID) glog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID)
if err := r.KubeClient.Core().Pods(namespace).Delete(podID, nil); err != nil { if err := r.KubeClient.Core().Pods(namespace).Delete(podID, nil); err != nil {
r.Recorder.Eventf(object, api.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err) r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err)
return fmt.Errorf("unable to delete pods: %v", err) return fmt.Errorf("unable to delete pods: %v", err)
} else { } else {
r.Recorder.Eventf(object, api.EventTypeNormal, SuccessfulDeletePodReason, "Deleted pod: %v", podID) r.Recorder.Eventf(object, v1.EventTypeNormal, SuccessfulDeletePodReason, "Deleted pod: %v", podID)
} }
return nil return nil
} }
type FakePodControl struct { type FakePodControl struct {
sync.Mutex sync.Mutex
Templates []api.PodTemplateSpec Templates []v1.PodTemplateSpec
ControllerRefs []api.OwnerReference ControllerRefs []v1.OwnerReference
DeletePodName []string DeletePodName []string
Patches [][]byte Patches [][]byte
Err error Err error
@@ -551,7 +555,7 @@ func (f *FakePodControl) PatchPod(namespace, name string, data []byte) error {
return nil return nil
} }
func (f *FakePodControl) CreatePods(namespace string, spec *api.PodTemplateSpec, object runtime.Object) error { func (f *FakePodControl) CreatePods(namespace string, spec *v1.PodTemplateSpec, object runtime.Object) error {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
f.Templates = append(f.Templates, *spec) f.Templates = append(f.Templates, *spec)
@@ -561,7 +565,7 @@ func (f *FakePodControl) CreatePods(namespace string, spec *api.PodTemplateSpec,
return nil return nil
} }
func (f *FakePodControl) CreatePodsWithControllerRef(namespace string, spec *api.PodTemplateSpec, object runtime.Object, controllerRef *api.OwnerReference) error { func (f *FakePodControl) CreatePodsWithControllerRef(namespace string, spec *v1.PodTemplateSpec, object runtime.Object, controllerRef *v1.OwnerReference) error {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
f.Templates = append(f.Templates, *spec) f.Templates = append(f.Templates, *spec)
@@ -572,7 +576,7 @@ func (f *FakePodControl) CreatePodsWithControllerRef(namespace string, spec *api
return nil return nil
} }
func (f *FakePodControl) CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error { func (f *FakePodControl) CreatePodsOnNode(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object) error {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
f.Templates = append(f.Templates, *template) f.Templates = append(f.Templates, *template)
@@ -596,13 +600,13 @@ func (f *FakePodControl) Clear() {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
f.DeletePodName = []string{} f.DeletePodName = []string{}
f.Templates = []api.PodTemplateSpec{} f.Templates = []v1.PodTemplateSpec{}
f.ControllerRefs = []api.OwnerReference{} f.ControllerRefs = []v1.OwnerReference{}
f.Patches = [][]byte{} f.Patches = [][]byte{}
} }
// ByLogging allows custom sorting of pods so the best one can be picked for getting its logs. // ByLogging allows custom sorting of pods so the best one can be picked for getting its logs.
type ByLogging []*api.Pod type ByLogging []*v1.Pod
func (s ByLogging) Len() int { return len(s) } func (s ByLogging) Len() int { return len(s) }
func (s ByLogging) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s ByLogging) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
@@ -613,18 +617,18 @@ func (s ByLogging) Less(i, j int) bool {
return len(s[i].Spec.NodeName) > 0 return len(s[i].Spec.NodeName) > 0
} }
// 2. PodRunning < PodUnknown < PodPending // 2. PodRunning < PodUnknown < PodPending
m := map[api.PodPhase]int{api.PodRunning: 0, api.PodUnknown: 1, api.PodPending: 2} m := map[v1.PodPhase]int{v1.PodRunning: 0, v1.PodUnknown: 1, v1.PodPending: 2}
if m[s[i].Status.Phase] != m[s[j].Status.Phase] { if m[s[i].Status.Phase] != m[s[j].Status.Phase] {
return m[s[i].Status.Phase] < m[s[j].Status.Phase] return m[s[i].Status.Phase] < m[s[j].Status.Phase]
} }
// 3. ready < not ready // 3. ready < not ready
if api.IsPodReady(s[i]) != api.IsPodReady(s[j]) { if v1.IsPodReady(s[i]) != v1.IsPodReady(s[j]) {
return api.IsPodReady(s[i]) return v1.IsPodReady(s[i])
} }
// TODO: take availability into account when we push minReadySeconds information from deployment into pods, // TODO: take availability into account when we push minReadySeconds information from deployment into pods,
// see https://github.com/kubernetes/kubernetes/issues/22065 // see https://github.com/kubernetes/kubernetes/issues/22065
// 4. Been ready for more time < less time < empty time // 4. Been ready for more time < less time < empty time
if api.IsPodReady(s[i]) && api.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { if v1.IsPodReady(s[i]) && v1.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i])) return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i]))
} }
// 5. Pods with containers with higher restart counts < lower restart counts // 5. Pods with containers with higher restart counts < lower restart counts
@@ -639,7 +643,7 @@ func (s ByLogging) Less(i, j int) bool {
} }
// ActivePods type allows custom sorting of pods so a controller can pick the best ones to delete. // ActivePods type allows custom sorting of pods so a controller can pick the best ones to delete.
type ActivePods []*api.Pod type ActivePods []*v1.Pod
func (s ActivePods) Len() int { return len(s) } func (s ActivePods) Len() int { return len(s) }
func (s ActivePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s ActivePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
@@ -651,20 +655,20 @@ func (s ActivePods) Less(i, j int) bool {
return len(s[i].Spec.NodeName) == 0 return len(s[i].Spec.NodeName) == 0
} }
// 2. PodPending < PodUnknown < PodRunning // 2. PodPending < PodUnknown < PodRunning
m := map[api.PodPhase]int{api.PodPending: 0, api.PodUnknown: 1, api.PodRunning: 2} m := map[v1.PodPhase]int{v1.PodPending: 0, v1.PodUnknown: 1, v1.PodRunning: 2}
if m[s[i].Status.Phase] != m[s[j].Status.Phase] { if m[s[i].Status.Phase] != m[s[j].Status.Phase] {
return m[s[i].Status.Phase] < m[s[j].Status.Phase] return m[s[i].Status.Phase] < m[s[j].Status.Phase]
} }
// 3. Not ready < ready // 3. Not ready < ready
// If only one of the pods is not ready, the not ready one is smaller // If only one of the pods is not ready, the not ready one is smaller
if api.IsPodReady(s[i]) != api.IsPodReady(s[j]) { if v1.IsPodReady(s[i]) != v1.IsPodReady(s[j]) {
return !api.IsPodReady(s[i]) return !v1.IsPodReady(s[i])
} }
// TODO: take availability into account when we push minReadySeconds information from deployment into pods, // TODO: take availability into account when we push minReadySeconds information from deployment into pods,
// see https://github.com/kubernetes/kubernetes/issues/22065 // see https://github.com/kubernetes/kubernetes/issues/22065
// 4. Been ready for empty time < less time < more time // 4. Been ready for empty time < less time < more time
// If both pods are ready, the latest ready one is smaller // If both pods are ready, the latest ready one is smaller
if api.IsPodReady(s[i]) && api.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) { if v1.IsPodReady(s[i]) && v1.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j])) return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j]))
} }
// 5. Pods with containers with higher restart counts < lower restart counts // 5. Pods with containers with higher restart counts < lower restart counts
@@ -687,11 +691,11 @@ func afterOrZero(t1, t2 unversioned.Time) bool {
return t1.After(t2.Time) return t1.After(t2.Time)
} }
func podReadyTime(pod *api.Pod) unversioned.Time { func podReadyTime(pod *v1.Pod) unversioned.Time {
if api.IsPodReady(pod) { if v1.IsPodReady(pod) {
for _, c := range pod.Status.Conditions { for _, c := range pod.Status.Conditions {
// we only care about pod ready conditions // we only care about pod ready conditions
if c.Type == api.PodReady && c.Status == api.ConditionTrue { if c.Type == v1.PodReady && c.Status == v1.ConditionTrue {
return c.LastTransitionTime return c.LastTransitionTime
} }
} }
@@ -699,7 +703,7 @@ func podReadyTime(pod *api.Pod) unversioned.Time {
return unversioned.Time{} return unversioned.Time{}
} }
func maxContainerRestarts(pod *api.Pod) int { func maxContainerRestarts(pod *v1.Pod) int {
maxRestarts := 0 maxRestarts := 0
for _, c := range pod.Status.ContainerStatuses { for _, c := range pod.Status.ContainerStatuses {
maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount)) maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount))
@@ -708,8 +712,8 @@ func maxContainerRestarts(pod *api.Pod) int {
} }
// FilterActivePods returns pods that have not terminated. // FilterActivePods returns pods that have not terminated.
func FilterActivePods(pods []*api.Pod) []*api.Pod { func FilterActivePods(pods []*v1.Pod) []*v1.Pod {
var result []*api.Pod var result []*v1.Pod
for _, p := range pods { for _, p := range pods {
if IsPodActive(p) { if IsPodActive(p) {
result = append(result, p) result = append(result, p)
@@ -721,9 +725,9 @@ func FilterActivePods(pods []*api.Pod) []*api.Pod {
return result return result
} }
func IsPodActive(p *api.Pod) bool { func IsPodActive(p *v1.Pod) bool {
return api.PodSucceeded != p.Status.Phase && return v1.PodSucceeded != p.Status.Phase &&
api.PodFailed != p.Status.Phase && v1.PodFailed != p.Status.Phase &&
p.DeletionTimestamp == nil p.DeletionTimestamp == nil
} }
@@ -733,7 +737,7 @@ func FilterActiveReplicaSets(replicaSets []*extensions.ReplicaSet) []*extensions
for i := range replicaSets { for i := range replicaSets {
rs := replicaSets[i] rs := replicaSets[i]
if rs != nil && rs.Spec.Replicas > 0 { if rs != nil && *(rs.Spec.Replicas) > 0 {
active = append(active, replicaSets[i]) active = append(active, replicaSets[i])
} }
} }
@@ -744,12 +748,12 @@ func FilterActiveReplicaSets(replicaSets []*extensions.ReplicaSet) []*extensions
// It's used so we consistently use the same key scheme in this module. // It's used so we consistently use the same key scheme in this module.
// It does exactly what cache.MetaNamespaceKeyFunc would have done // It does exactly what cache.MetaNamespaceKeyFunc would have done
// except there's not possibility for error since we know the exact type. // except there's not possibility for error since we know the exact type.
func PodKey(pod *api.Pod) string { func PodKey(pod *v1.Pod) string {
return fmt.Sprintf("%v/%v", pod.Namespace, pod.Name) return fmt.Sprintf("%v/%v", pod.Namespace, pod.Name)
} }
// ControllersByCreationTimestamp sorts a list of ReplicationControllers by creation timestamp, using their names as a tie breaker. // ControllersByCreationTimestamp sorts a list of ReplicationControllers by creation timestamp, using their names as a tie breaker.
type ControllersByCreationTimestamp []*api.ReplicationController type ControllersByCreationTimestamp []*v1.ReplicationController
func (o ControllersByCreationTimestamp) Len() int { return len(o) } func (o ControllersByCreationTimestamp) Len() int { return len(o) }
func (o ControllersByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } func (o ControllersByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
@@ -779,10 +783,10 @@ type ReplicaSetsBySizeOlder []*extensions.ReplicaSet
func (o ReplicaSetsBySizeOlder) Len() int { return len(o) } func (o ReplicaSetsBySizeOlder) Len() int { return len(o) }
func (o ReplicaSetsBySizeOlder) Swap(i, j int) { o[i], o[j] = o[j], o[i] } func (o ReplicaSetsBySizeOlder) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o ReplicaSetsBySizeOlder) Less(i, j int) bool { func (o ReplicaSetsBySizeOlder) Less(i, j int) bool {
if o[i].Spec.Replicas == o[j].Spec.Replicas { if *(o[i].Spec.Replicas) == *(o[j].Spec.Replicas) {
return ReplicaSetsByCreationTimestamp(o).Less(i, j) return ReplicaSetsByCreationTimestamp(o).Less(i, j)
} }
return o[i].Spec.Replicas > o[j].Spec.Replicas return *(o[i].Spec.Replicas) > *(o[j].Spec.Replicas)
} }
// ReplicaSetsBySizeNewer sorts a list of ReplicaSet by size in descending order, using their creation timestamp or name as a tie breaker. // ReplicaSetsBySizeNewer sorts a list of ReplicaSet by size in descending order, using their creation timestamp or name as a tie breaker.
@@ -792,8 +796,8 @@ type ReplicaSetsBySizeNewer []*extensions.ReplicaSet
func (o ReplicaSetsBySizeNewer) Len() int { return len(o) } func (o ReplicaSetsBySizeNewer) Len() int { return len(o) }
func (o ReplicaSetsBySizeNewer) Swap(i, j int) { o[i], o[j] = o[j], o[i] } func (o ReplicaSetsBySizeNewer) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
func (o ReplicaSetsBySizeNewer) Less(i, j int) bool { func (o ReplicaSetsBySizeNewer) Less(i, j int) bool {
if o[i].Spec.Replicas == o[j].Spec.Replicas { if *(o[i].Spec.Replicas) == *(o[j].Spec.Replicas) {
return ReplicaSetsByCreationTimestamp(o).Less(j, i) return ReplicaSetsByCreationTimestamp(o).Less(j, i)
} }
return o[i].Spec.Replicas > o[j].Spec.Replicas return *(o[i].Spec.Replicas) > *(o[j].Spec.Replicas)
} }

View File

@@ -17,6 +17,7 @@ limitations under the License.
package controller package controller
import ( import (
"encoding/json"
"fmt" "fmt"
"math/rand" "math/rand"
"net/http/httptest" "net/http/httptest"
@@ -26,12 +27,12 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@@ -52,36 +53,36 @@ func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectat
return &ControllerExpectations{ttlStore}, fakeClock return &ControllerExpectations{ttlStore}, fakeClock
} }
func newReplicationController(replicas int) *api.ReplicationController { func newReplicationController(replicas int) *v1.ReplicationController {
rc := &api.ReplicationController{ rc := &v1.ReplicationController{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()}, TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
UID: uuid.NewUUID(), UID: uuid.NewUUID(),
Name: "foobar", Name: "foobar",
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
ResourceVersion: "18", ResourceVersion: "18",
}, },
Spec: api.ReplicationControllerSpec{ Spec: v1.ReplicationControllerSpec{
Replicas: int32(replicas), Replicas: func() *int32 { i := int32(replicas); return &i }(),
Selector: map[string]string{"foo": "bar"}, Selector: map[string]string{"foo": "bar"},
Template: &api.PodTemplateSpec{ Template: &v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{ Labels: map[string]string{
"name": "foo", "name": "foo",
"type": "production", "type": "production",
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: "foo/bar", Image: "foo/bar",
TerminationMessagePath: api.TerminationMessagePathDefault, TerminationMessagePath: v1.TerminationMessagePathDefault,
ImagePullPolicy: api.PullIfNotPresent, ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
}, },
}, },
RestartPolicy: api.RestartPolicyAlways, RestartPolicy: v1.RestartPolicyAlways,
DNSPolicy: api.DNSDefault, DNSPolicy: v1.DNSDefault,
NodeSelector: map[string]string{ NodeSelector: map[string]string{
"baz": "blah", "baz": "blah",
}, },
@@ -93,23 +94,23 @@ func newReplicationController(replicas int) *api.ReplicationController {
} }
// create count pods with the given phase for the given rc (same selectors and namespace), and add them to the store. // create count pods with the given phase for the given rc (same selectors and namespace), and add them to the store.
func newPodList(store cache.Store, count int, status api.PodPhase, rc *api.ReplicationController) *api.PodList { func newPodList(store cache.Store, count int, status v1.PodPhase, rc *v1.ReplicationController) *v1.PodList {
pods := []api.Pod{} pods := []v1.Pod{}
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
newPod := api.Pod{ newPod := v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("pod%d", i), Name: fmt.Sprintf("pod%d", i),
Labels: rc.Spec.Selector, Labels: rc.Spec.Selector,
Namespace: rc.Namespace, Namespace: rc.Namespace,
}, },
Status: api.PodStatus{Phase: status}, Status: v1.PodStatus{Phase: status},
} }
if store != nil { if store != nil {
store.Add(&newPod) store.Add(&newPod)
} }
pods = append(pods, newPod) pods = append(pods, newPod)
} }
return &api.PodList{ return &v1.PodList{
Items: pods, Items: pods,
} }
} }
@@ -187,7 +188,7 @@ func TestControllerExpectations(t *testing.T) {
func TestUIDExpectations(t *testing.T) { func TestUIDExpectations(t *testing.T) {
uidExp := NewUIDTrackingControllerExpectations(NewControllerExpectations()) uidExp := NewUIDTrackingControllerExpectations(NewControllerExpectations())
rcList := []*api.ReplicationController{ rcList := []*v1.ReplicationController{
newReplicationController(2), newReplicationController(2),
newReplicationController(1), newReplicationController(1),
newReplicationController(0), newReplicationController(0),
@@ -200,7 +201,7 @@ func TestUIDExpectations(t *testing.T) {
rcName := fmt.Sprintf("rc-%v", i) rcName := fmt.Sprintf("rc-%v", i)
rc.Name = rcName rc.Name = rcName
rc.Spec.Selector[rcName] = rcName rc.Spec.Selector[rcName] = rcName
podList := newPodList(nil, 5, api.PodRunning, rc) podList := newPodList(nil, 5, v1.PodRunning, rc)
rcKey, err := KeyFunc(rc) rcKey, err := KeyFunc(rc)
if err != nil { if err != nil {
t.Fatalf("Couldn't get key for object %#v: %v", rc, err) t.Fatalf("Couldn't get key for object %#v: %v", rc, err)
@@ -237,15 +238,15 @@ func TestUIDExpectations(t *testing.T) {
} }
func TestCreatePods(t *testing.T) { func TestCreatePods(t *testing.T) {
ns := api.NamespaceDefault ns := v1.NamespaceDefault
body := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Pod{ObjectMeta: api.ObjectMeta{Name: "empty_pod"}}) body := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "empty_pod"}})
fakeHandler := utiltesting.FakeHandler{ fakeHandler := utiltesting.FakeHandler{
StatusCode: 200, StatusCode: 200,
ResponseBody: string(body), ResponseBody: string(body),
} }
testServer := httptest.NewServer(&fakeHandler) testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close() defer testServer.Close()
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
podControl := RealPodControl{ podControl := RealPodControl{
KubeClient: clientset, KubeClient: clientset,
@@ -259,19 +260,20 @@ func TestCreatePods(t *testing.T) {
t.Fatalf("unexpected error: %v", err) t.Fatalf("unexpected error: %v", err)
} }
expectedPod := api.Pod{ expectedPod := v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: controllerSpec.Spec.Template.Labels, Labels: controllerSpec.Spec.Template.Labels,
GenerateName: fmt.Sprintf("%s-", controllerSpec.Name), GenerateName: fmt.Sprintf("%s-", controllerSpec.Name),
}, },
Spec: controllerSpec.Spec.Template.Spec, Spec: controllerSpec.Spec.Template.Spec,
} }
fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath("pods", api.NamespaceDefault, ""), "POST", nil) fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath("pods", v1.NamespaceDefault, ""), "POST", nil)
actualPod, err := runtime.Decode(testapi.Default.Codec(), []byte(fakeHandler.RequestBody)) var actualPod = &v1.Pod{}
err := json.Unmarshal([]byte(fakeHandler.RequestBody), actualPod)
if err != nil { if err != nil {
t.Fatalf("Unexpected error: %v", err) t.Fatalf("Unexpected error: %v", err)
} }
if !api.Semantic.DeepDerivative(&expectedPod, actualPod) { if !v1.Semantic.DeepDerivative(&expectedPod, actualPod) {
t.Logf("Body: %s", fakeHandler.RequestBody) t.Logf("Body: %s", fakeHandler.RequestBody)
t.Errorf("Unexpected mismatch. Expected\n %#v,\n Got:\n %#v", &expectedPod, actualPod) t.Errorf("Unexpected mismatch. Expected\n %#v,\n Got:\n %#v", &expectedPod, actualPod)
} }
@@ -280,15 +282,15 @@ func TestCreatePods(t *testing.T) {
func TestActivePodFiltering(t *testing.T) { func TestActivePodFiltering(t *testing.T) {
// This rc is not needed by the test, only the newPodList to give the pods labels/a namespace. // This rc is not needed by the test, only the newPodList to give the pods labels/a namespace.
rc := newReplicationController(0) rc := newReplicationController(0)
podList := newPodList(nil, 5, api.PodRunning, rc) podList := newPodList(nil, 5, v1.PodRunning, rc)
podList.Items[0].Status.Phase = api.PodSucceeded podList.Items[0].Status.Phase = v1.PodSucceeded
podList.Items[1].Status.Phase = api.PodFailed podList.Items[1].Status.Phase = v1.PodFailed
expectedNames := sets.NewString() expectedNames := sets.NewString()
for _, pod := range podList.Items[2:] { for _, pod := range podList.Items[2:] {
expectedNames.Insert(pod.Name) expectedNames.Insert(pod.Name)
} }
var podPointers []*api.Pod var podPointers []*v1.Pod
for i := range podList.Items { for i := range podList.Items {
podPointers = append(podPointers, &podList.Items[i]) podPointers = append(podPointers, &podList.Items[i])
} }
@@ -306,55 +308,55 @@ func TestSortingActivePods(t *testing.T) {
numPods := 9 numPods := 9
// This rc is not needed by the test, only the newPodList to give the pods labels/a namespace. // This rc is not needed by the test, only the newPodList to give the pods labels/a namespace.
rc := newReplicationController(0) rc := newReplicationController(0)
podList := newPodList(nil, numPods, api.PodRunning, rc) podList := newPodList(nil, numPods, v1.PodRunning, rc)
pods := make([]*api.Pod, len(podList.Items)) pods := make([]*v1.Pod, len(podList.Items))
for i := range podList.Items { for i := range podList.Items {
pods[i] = &podList.Items[i] pods[i] = &podList.Items[i]
} }
// pods[0] is not scheduled yet. // pods[0] is not scheduled yet.
pods[0].Spec.NodeName = "" pods[0].Spec.NodeName = ""
pods[0].Status.Phase = api.PodPending pods[0].Status.Phase = v1.PodPending
// pods[1] is scheduled but pending. // pods[1] is scheduled but pending.
pods[1].Spec.NodeName = "bar" pods[1].Spec.NodeName = "bar"
pods[1].Status.Phase = api.PodPending pods[1].Status.Phase = v1.PodPending
// pods[2] is unknown. // pods[2] is unknown.
pods[2].Spec.NodeName = "foo" pods[2].Spec.NodeName = "foo"
pods[2].Status.Phase = api.PodUnknown pods[2].Status.Phase = v1.PodUnknown
// pods[3] is running but not ready. // pods[3] is running but not ready.
pods[3].Spec.NodeName = "foo" pods[3].Spec.NodeName = "foo"
pods[3].Status.Phase = api.PodRunning pods[3].Status.Phase = v1.PodRunning
// pods[4] is running and ready but without LastTransitionTime. // pods[4] is running and ready but without LastTransitionTime.
now := unversioned.Now() now := unversioned.Now()
pods[4].Spec.NodeName = "foo" pods[4].Spec.NodeName = "foo"
pods[4].Status.Phase = api.PodRunning pods[4].Status.Phase = v1.PodRunning
pods[4].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue}} pods[4].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
pods[4].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}} pods[4].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}}
// pods[5] is running and ready and with LastTransitionTime. // pods[5] is running and ready and with LastTransitionTime.
pods[5].Spec.NodeName = "foo" pods[5].Spec.NodeName = "foo"
pods[5].Status.Phase = api.PodRunning pods[5].Status.Phase = v1.PodRunning
pods[5].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue, LastTransitionTime: now}} pods[5].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: now}}
pods[5].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}} pods[5].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}}
// pods[6] is running ready for a longer time than pods[5]. // pods[6] is running ready for a longer time than pods[5].
then := unversioned.Time{Time: now.AddDate(0, -1, 0)} then := unversioned.Time{Time: now.AddDate(0, -1, 0)}
pods[6].Spec.NodeName = "foo" pods[6].Spec.NodeName = "foo"
pods[6].Status.Phase = api.PodRunning pods[6].Status.Phase = v1.PodRunning
pods[6].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue, LastTransitionTime: then}} pods[6].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: then}}
pods[6].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}} pods[6].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}}
// pods[7] has lower container restart count than pods[6]. // pods[7] has lower container restart count than pods[6].
pods[7].Spec.NodeName = "foo" pods[7].Spec.NodeName = "foo"
pods[7].Status.Phase = api.PodRunning pods[7].Status.Phase = v1.PodRunning
pods[7].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue, LastTransitionTime: then}} pods[7].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: then}}
pods[7].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 2}, {RestartCount: 1}} pods[7].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 2}, {RestartCount: 1}}
pods[7].CreationTimestamp = now pods[7].CreationTimestamp = now
// pods[8] is older than pods[7]. // pods[8] is older than pods[7].
pods[8].Spec.NodeName = "foo" pods[8].Spec.NodeName = "foo"
pods[8].Status.Phase = api.PodRunning pods[8].Status.Phase = v1.PodRunning
pods[8].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue, LastTransitionTime: then}} pods[8].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: then}}
pods[8].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 2}, {RestartCount: 1}} pods[8].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 2}, {RestartCount: 1}}
pods[8].CreationTimestamp = then pods[8].CreationTimestamp = then
getOrder := func(pods []*api.Pod) []string { getOrder := func(pods []*v1.Pod) []string {
names := make([]string, len(pods)) names := make([]string, len(pods))
for i := range pods { for i := range pods {
names[i] = pods[i].Name names[i] = pods[i].Name
@@ -366,7 +368,7 @@ func TestSortingActivePods(t *testing.T) {
for i := 0; i < 20; i++ { for i := 0; i < 20; i++ {
idx := rand.Perm(numPods) idx := rand.Perm(numPods)
randomizedPods := make([]*api.Pod, numPods) randomizedPods := make([]*v1.Pod, numPods)
for j := 0; j < numPods; j++ { for j := 0; j < numPods; j++ {
randomizedPods[j] = pods[idx[j]] randomizedPods[j] = pods[idx[j]]
} }

View File

@@ -34,14 +34,13 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller/job"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
utilerrors "k8s.io/kubernetes/pkg/util/errors" utilerrors "k8s.io/kubernetes/pkg/util/errors"
"k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/metrics"
@@ -63,7 +62,7 @@ func NewCronJobController(kubeClient clientset.Interface) *CronJobController {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset. // TODO: remove the wrapper when every clients have moved to use the clientset.
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("cronjob_controller", kubeClient.Core().RESTClient().GetRateLimiter()) metrics.RegisterMetricAndTrackRateLimiterUsage("cronjob_controller", kubeClient.Core().RESTClient().GetRateLimiter())
@@ -74,7 +73,7 @@ func NewCronJobController(kubeClient clientset.Interface) *CronJobController {
jobControl: realJobControl{KubeClient: kubeClient}, jobControl: realJobControl{KubeClient: kubeClient},
sjControl: &realSJControl{KubeClient: kubeClient}, sjControl: &realSJControl{KubeClient: kubeClient},
podControl: &realPodControl{KubeClient: kubeClient}, podControl: &realPodControl{KubeClient: kubeClient},
recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "cronjob-controller"}), recorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "cronjob-controller"}),
} }
return jm return jm
@@ -97,7 +96,7 @@ func (jm *CronJobController) Run(stopCh <-chan struct{}) {
// SyncAll lists all the CronJobs and Jobs and reconciles them. // SyncAll lists all the CronJobs and Jobs and reconciles them.
func (jm *CronJobController) SyncAll() { func (jm *CronJobController) SyncAll() {
sjl, err := jm.kubeClient.Batch().CronJobs(api.NamespaceAll).List(api.ListOptions{}) sjl, err := jm.kubeClient.BatchV2alpha1().CronJobs(v1.NamespaceAll).List(v1.ListOptions{})
if err != nil { if err != nil {
glog.Errorf("Error listing cronjobs: %v", err) glog.Errorf("Error listing cronjobs: %v", err)
return return
@@ -105,7 +104,7 @@ func (jm *CronJobController) SyncAll() {
sjs := sjl.Items sjs := sjl.Items
glog.V(4).Infof("Found %d cronjobs", len(sjs)) glog.V(4).Infof("Found %d cronjobs", len(sjs))
jl, err := jm.kubeClient.Batch().Jobs(api.NamespaceAll).List(api.ListOptions{}) jl, err := jm.kubeClient.BatchV2alpha1().Jobs(v1.NamespaceAll).List(v1.ListOptions{})
if err != nil { if err != nil {
glog.Errorf("Error listing jobs") glog.Errorf("Error listing jobs")
return return
@@ -131,8 +130,8 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
for i := range js { for i := range js {
j := js[i] j := js[i]
found := inActiveList(sj, j.ObjectMeta.UID) found := inActiveList(sj, j.ObjectMeta.UID)
if !found && !job.IsJobFinished(&j) { if !found && !IsJobFinished(&j) {
recorder.Eventf(&sj, api.EventTypeWarning, "UnexpectedJob", "Saw a job that the controller did not create or forgot: %v", j.Name) recorder.Eventf(&sj, v1.EventTypeWarning, "UnexpectedJob", "Saw a job that the controller did not create or forgot: %v", j.Name)
// We found an unfinished job that has us as the parent, but it is not in our Active list. // We found an unfinished job that has us as the parent, but it is not in our Active list.
// This could happen if we crashed right after creating the Job and before updating the status, // This could happen if we crashed right after creating the Job and before updating the status,
// or if our jobs list is newer than our sj status after a relist, or if someone intentionally created // or if our jobs list is newer than our sj status after a relist, or if someone intentionally created
@@ -143,10 +142,10 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
// user has permission to create a job within a namespace, then they have permission to make any scheduledJob // user has permission to create a job within a namespace, then they have permission to make any scheduledJob
// in the same namespace "adopt" that job. ReplicaSets and their Pods work the same way. // in the same namespace "adopt" that job. ReplicaSets and their Pods work the same way.
// TBS: how to update sj.Status.LastScheduleTime if the adopted job is newer than any we knew about? // TBS: how to update sj.Status.LastScheduleTime if the adopted job is newer than any we knew about?
} else if found && job.IsJobFinished(&j) { } else if found && IsJobFinished(&j) {
deleteFromActiveList(&sj, j.ObjectMeta.UID) deleteFromActiveList(&sj, j.ObjectMeta.UID)
// TODO: event to call out failure vs success. // TODO: event to call out failure vs success.
recorder.Eventf(&sj, api.EventTypeNormal, "SawCompletedJob", "Saw completed job: %v", j.Name) recorder.Eventf(&sj, v1.EventTypeNormal, "SawCompletedJob", "Saw completed job: %v", j.Name)
} }
} }
updatedSJ, err := sjc.UpdateStatus(&sj) updatedSJ, err := sjc.UpdateStatus(&sj)
@@ -209,7 +208,7 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
glog.V(4).Infof("Deleting job %s of %s that was still running at next scheduled start time", j.Name, nameForLog) glog.V(4).Infof("Deleting job %s of %s that was still running at next scheduled start time", j.Name, nameForLog)
job, err := jc.GetJob(j.Namespace, j.Name) job, err := jc.GetJob(j.Namespace, j.Name)
if err != nil { if err != nil {
recorder.Eventf(&sj, api.EventTypeWarning, "FailedGet", "Get job: %v", err) recorder.Eventf(&sj, v1.EventTypeWarning, "FailedGet", "Get job: %v", err)
return return
} }
// scale job down to 0 // scale job down to 0
@@ -218,16 +217,16 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
job.Spec.Parallelism = &zero job.Spec.Parallelism = &zero
job, err = jc.UpdateJob(job.Namespace, job) job, err = jc.UpdateJob(job.Namespace, job)
if err != nil { if err != nil {
recorder.Eventf(&sj, api.EventTypeWarning, "FailedUpdate", "Update job: %v", err) recorder.Eventf(&sj, v1.EventTypeWarning, "FailedUpdate", "Update job: %v", err)
return return
} }
} }
// remove all pods... // remove all pods...
selector, _ := unversioned.LabelSelectorAsSelector(job.Spec.Selector) selector, _ := unversioned.LabelSelectorAsSelector(job.Spec.Selector)
options := api.ListOptions{LabelSelector: selector} options := v1.ListOptions{LabelSelector: selector.String()}
podList, err := pc.ListPods(job.Namespace, options) podList, err := pc.ListPods(job.Namespace, options)
if err != nil { if err != nil {
recorder.Eventf(&sj, api.EventTypeWarning, "FailedList", "List job-pods: %v", err) recorder.Eventf(&sj, v1.EventTypeWarning, "FailedList", "List job-pods: %v", err)
} }
errList := []error{} errList := []error{}
for _, pod := range podList.Items { for _, pod := range podList.Items {
@@ -240,18 +239,18 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
} }
} }
if len(errList) != 0 { if len(errList) != 0 {
recorder.Eventf(&sj, api.EventTypeWarning, "FailedDelete", "Deleted job-pods: %v", utilerrors.NewAggregate(errList)) recorder.Eventf(&sj, v1.EventTypeWarning, "FailedDelete", "Deleted job-pods: %v", utilerrors.NewAggregate(errList))
return return
} }
// ... the job itself... // ... the job itself...
if err := jc.DeleteJob(job.Namespace, job.Name); err != nil { if err := jc.DeleteJob(job.Namespace, job.Name); err != nil {
recorder.Eventf(&sj, api.EventTypeWarning, "FailedDelete", "Deleted job: %v", err) recorder.Eventf(&sj, v1.EventTypeWarning, "FailedDelete", "Deleted job: %v", err)
glog.Errorf("Error deleting job %s from %s: %v", job.Name, nameForLog, err) glog.Errorf("Error deleting job %s from %s: %v", job.Name, nameForLog, err)
return return
} }
// ... and its reference from active list // ... and its reference from active list
deleteFromActiveList(&sj, job.ObjectMeta.UID) deleteFromActiveList(&sj, job.ObjectMeta.UID)
recorder.Eventf(&sj, api.EventTypeNormal, "SuccessfulDelete", "Deleted job %v", j.Name) recorder.Eventf(&sj, v1.EventTypeNormal, "SuccessfulDelete", "Deleted job %v", j.Name)
} }
} }
@@ -262,11 +261,11 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
} }
jobResp, err := jc.CreateJob(sj.Namespace, jobReq) jobResp, err := jc.CreateJob(sj.Namespace, jobReq)
if err != nil { if err != nil {
recorder.Eventf(&sj, api.EventTypeWarning, "FailedCreate", "Error creating job: %v", err) recorder.Eventf(&sj, v1.EventTypeWarning, "FailedCreate", "Error creating job: %v", err)
return return
} }
glog.V(4).Infof("Created Job %s for %s", jobResp.Name, nameForLog) glog.V(4).Infof("Created Job %s for %s", jobResp.Name, nameForLog)
recorder.Eventf(&sj, api.EventTypeNormal, "SuccessfulCreate", "Created job %v", jobResp.Name) recorder.Eventf(&sj, v1.EventTypeNormal, "SuccessfulCreate", "Created job %v", jobResp.Name)
// ------------------------------------------------------------------ // // ------------------------------------------------------------------ //
@@ -293,6 +292,6 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
return return
} }
func getRef(object runtime.Object) (*api.ObjectReference, error) { func getRef(object runtime.Object) (*v1.ObjectReference, error) {
return api.GetReference(object) return v1.GetReference(object)
} }

View File

@@ -20,9 +20,9 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/api/v1"
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
) )
@@ -75,7 +75,7 @@ func justAfterThePriorHour() time.Time {
// returns a cronJob with some fields filled in. // returns a cronJob with some fields filled in.
func cronJob() batch.CronJob { func cronJob() batch.CronJob {
return batch.CronJob{ return batch.CronJob{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "mycronjob", Name: "mycronjob",
Namespace: "snazzycats", Namespace: "snazzycats",
UID: types.UID("1a2b3c"), UID: types.UID("1a2b3c"),
@@ -86,7 +86,7 @@ func cronJob() batch.CronJob {
Schedule: "* * * * ?", Schedule: "* * * * ?",
ConcurrencyPolicy: batch.AllowConcurrent, ConcurrencyPolicy: batch.AllowConcurrent,
JobTemplate: batch.JobTemplateSpec{ JobTemplate: batch.JobTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"a": "b"}, Labels: map[string]string{"a": "b"},
Annotations: map[string]string{"x": "y"}, Annotations: map[string]string{"x": "y"},
}, },
@@ -101,14 +101,14 @@ func jobSpec() batch.JobSpec {
return batch.JobSpec{ return batch.JobSpec{
Parallelism: &one, Parallelism: &one,
Completions: &one, Completions: &one,
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{ Labels: map[string]string{
"foo": "bar", "foo": "bar",
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{Image: "foo/bar"}, {Image: "foo/bar"},
}, },
}, },
@@ -118,10 +118,10 @@ func jobSpec() batch.JobSpec {
func newJob(UID string) batch.Job { func newJob(UID string) batch.Job {
return batch.Job{ return batch.Job{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
UID: types.UID(UID), UID: types.UID(UID),
Name: "foobar", Name: "foobar",
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
SelfLink: "/apis/batch/v1/namespaces/snazzycats/jobs/myjob", SelfLink: "/apis/batch/v1/namespaces/snazzycats/jobs/myjob",
}, },
Spec: jobSpec(), Spec: jobSpec(),
@@ -213,7 +213,7 @@ func TestSyncOne_RunOrNot(t *testing.T) {
job.UID = "1234" job.UID = "1234"
job.Namespace = "" job.Namespace = ""
if tc.stillActive { if tc.stillActive {
sj.Status.Active = []api.ObjectReference{{UID: job.UID}} sj.Status.Active = []v1.ObjectReference{{UID: job.UID}}
js = append(js, *job) js = append(js, *job)
} }
} else { } else {
@@ -271,7 +271,7 @@ func TestSyncOne_RunOrNot(t *testing.T) {
// TestSyncOne_Status tests sj.UpdateStatus in SyncOne // TestSyncOne_Status tests sj.UpdateStatus in SyncOne
func TestSyncOne_Status(t *testing.T) { func TestSyncOne_Status(t *testing.T) {
finishedJob := newJob("1") finishedJob := newJob("1")
finishedJob.Status.Conditions = append(finishedJob.Status.Conditions, batch.JobCondition{Type: batch.JobComplete, Status: api.ConditionTrue}) finishedJob.Status.Conditions = append(finishedJob.Status.Conditions, batch.JobCondition{Type: batch.JobComplete, Status: v1.ConditionTrue})
unexpectedJob := newJob("2") unexpectedJob := newJob("2")
testCases := map[string]struct { testCases := map[string]struct {
@@ -360,7 +360,7 @@ func TestSyncOne_Status(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("%s: test setup error: failed to get job's ref: %v.", name, err) t.Errorf("%s: test setup error: failed to get job's ref: %v.", name, err)
} }
sj.Status.Active = []api.ObjectReference{*ref} sj.Status.Active = []v1.ObjectReference{*ref}
jobs = append(jobs, finishedJob) jobs = append(jobs, finishedJob)
} }
if tc.hasUnexpectedJob { if tc.hasUnexpectedJob {

View File

@@ -20,9 +20,9 @@ import (
"fmt" "fmt"
"sync" "sync"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/batch" batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
) )
@@ -41,7 +41,7 @@ type realSJControl struct {
var _ sjControlInterface = &realSJControl{} var _ sjControlInterface = &realSJControl{}
func (c *realSJControl) UpdateStatus(sj *batch.CronJob) (*batch.CronJob, error) { func (c *realSJControl) UpdateStatus(sj *batch.CronJob) (*batch.CronJob, error) {
return c.KubeClient.Batch().CronJobs(sj.Namespace).UpdateStatus(sj) return c.KubeClient.BatchV2alpha1().CronJobs(sj.Namespace).UpdateStatus(sj)
} }
// fakeSJControl is the default implementation of sjControlInterface. // fakeSJControl is the default implementation of sjControlInterface.
@@ -97,19 +97,19 @@ func copyAnnotations(template *batch.JobTemplateSpec) labels.Set {
} }
func (r realJobControl) GetJob(namespace, name string) (*batch.Job, error) { func (r realJobControl) GetJob(namespace, name string) (*batch.Job, error) {
return r.KubeClient.Batch().Jobs(namespace).Get(name) return r.KubeClient.BatchV2alpha1().Jobs(namespace).Get(name)
} }
func (r realJobControl) UpdateJob(namespace string, job *batch.Job) (*batch.Job, error) { func (r realJobControl) UpdateJob(namespace string, job *batch.Job) (*batch.Job, error) {
return r.KubeClient.Batch().Jobs(namespace).Update(job) return r.KubeClient.BatchV2alpha1().Jobs(namespace).Update(job)
} }
func (r realJobControl) CreateJob(namespace string, job *batch.Job) (*batch.Job, error) { func (r realJobControl) CreateJob(namespace string, job *batch.Job) (*batch.Job, error) {
return r.KubeClient.Batch().Jobs(namespace).Create(job) return r.KubeClient.BatchV2alpha1().Jobs(namespace).Create(job)
} }
func (r realJobControl) DeleteJob(namespace string, name string) error { func (r realJobControl) DeleteJob(namespace string, name string) error {
return r.KubeClient.Batch().Jobs(namespace).Delete(name, nil) return r.KubeClient.BatchV2alpha1().Jobs(namespace).Delete(name, nil)
} }
type fakeJobControl struct { type fakeJobControl struct {
@@ -176,7 +176,7 @@ func (f *fakeJobControl) Clear() {
// created as an interface to allow testing. // created as an interface to allow testing.
type podControlInterface interface { type podControlInterface interface {
// ListPods list pods // ListPods list pods
ListPods(namespace string, opts api.ListOptions) (*api.PodList, error) ListPods(namespace string, opts v1.ListOptions) (*v1.PodList, error)
// DeleteJob deletes the pod identified by name. // DeleteJob deletes the pod identified by name.
// TODO: delete by UID? // TODO: delete by UID?
DeletePod(namespace string, name string) error DeletePod(namespace string, name string) error
@@ -190,7 +190,7 @@ type realPodControl struct {
var _ podControlInterface = &realPodControl{} var _ podControlInterface = &realPodControl{}
func (r realPodControl) ListPods(namespace string, opts api.ListOptions) (*api.PodList, error) { func (r realPodControl) ListPods(namespace string, opts v1.ListOptions) (*v1.PodList, error) {
return r.KubeClient.Core().Pods(namespace).List(opts) return r.KubeClient.Core().Pods(namespace).List(opts)
} }
@@ -200,17 +200,17 @@ func (r realPodControl) DeletePod(namespace string, name string) error {
type fakePodControl struct { type fakePodControl struct {
sync.Mutex sync.Mutex
Pods []api.Pod Pods []v1.Pod
DeletePodName []string DeletePodName []string
Err error Err error
} }
var _ podControlInterface = &fakePodControl{} var _ podControlInterface = &fakePodControl{}
func (f *fakePodControl) ListPods(namespace string, opts api.ListOptions) (*api.PodList, error) { func (f *fakePodControl) ListPods(namespace string, opts v1.ListOptions) (*v1.PodList, error) {
f.Lock() f.Lock()
defer f.Unlock() defer f.Unlock()
return &api.PodList{Items: f.Pods}, nil return &v1.PodList{Items: f.Pods}, nil
} }
func (f *fakePodControl) DeletePod(namespace string, name string) error { func (f *fakePodControl) DeletePod(namespace string, name string) error {

View File

@@ -26,7 +26,8 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/api/v1"
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
) )
@@ -46,7 +47,7 @@ func deleteFromActiveList(sj *batch.CronJob, uid types.UID) {
if sj == nil { if sj == nil {
return return
} }
newActive := []api.ObjectReference{} newActive := []v1.ObjectReference{}
for _, j := range sj.Status.Active { for _, j := range sj.Status.Active {
if j.UID != uid { if j.UID != uid {
newActive = append(newActive, j) newActive = append(newActive, j)
@@ -57,12 +58,12 @@ func deleteFromActiveList(sj *batch.CronJob, uid types.UID) {
// getParentUIDFromJob extracts UID of job's parent and whether it was found // getParentUIDFromJob extracts UID of job's parent and whether it was found
func getParentUIDFromJob(j batch.Job) (types.UID, bool) { func getParentUIDFromJob(j batch.Job) (types.UID, bool) {
creatorRefJson, found := j.ObjectMeta.Annotations[api.CreatedByAnnotation] creatorRefJson, found := j.ObjectMeta.Annotations[v1.CreatedByAnnotation]
if !found { if !found {
glog.V(4).Infof("Job with no created-by annotation, name %s namespace %s", j.Name, j.Namespace) glog.V(4).Infof("Job with no created-by annotation, name %s namespace %s", j.Name, j.Namespace)
return types.UID(""), false return types.UID(""), false
} }
var sr api.SerializedReference var sr v1.SerializedReference
err := json.Unmarshal([]byte(creatorRefJson), &sr) err := json.Unmarshal([]byte(creatorRefJson), &sr)
if err != nil { if err != nil {
glog.V(4).Infof("Job with unparsable created-by annotation, name %s namespace %s: %v", j.Name, j.Namespace, err) glog.V(4).Infof("Job with unparsable created-by annotation, name %s namespace %s: %v", j.Name, j.Namespace, err)
@@ -181,12 +182,12 @@ func getJobFromTemplate(sj *batch.CronJob, scheduledTime time.Time) (*batch.Job,
if err != nil { if err != nil {
return nil, err return nil, err
} }
annotations[api.CreatedByAnnotation] = string(createdByRefJson) annotations[v1.CreatedByAnnotation] = string(createdByRefJson)
// We want job names for a given nominal start time to have a deterministic name to avoid the same job being created twice // We want job names for a given nominal start time to have a deterministic name to avoid the same job being created twice
name := fmt.Sprintf("%s-%d", sj.Name, getTimeHash(scheduledTime)) name := fmt.Sprintf("%s-%d", sj.Name, getTimeHash(scheduledTime))
job := &batch.Job{ job := &batch.Job{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: labels, Labels: labels,
Annotations: annotations, Annotations: annotations,
Name: name, Name: name,
@@ -205,7 +206,7 @@ func getTimeHash(scheduledTime time.Time) int64 {
// makeCreatedByRefJson makes a json string with an object reference for use in "created-by" annotation value // makeCreatedByRefJson makes a json string with an object reference for use in "created-by" annotation value
func makeCreatedByRefJson(object runtime.Object) (string, error) { func makeCreatedByRefJson(object runtime.Object) (string, error) {
createdByRef, err := api.GetReference(object) createdByRef, err := v1.GetReference(object)
if err != nil { if err != nil {
return "", fmt.Errorf("unable to get controller reference: %v", err) return "", fmt.Errorf("unable to get controller reference: %v", err)
} }
@@ -213,9 +214,9 @@ func makeCreatedByRefJson(object runtime.Object) (string, error) {
// TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients // TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients
// would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment. // would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment.
// We need to consistently handle this case of annotation versioning. // We need to consistently handle this case of annotation versioning.
codec := api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: api.GroupName, Version: "v1"}) codec := api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: v1.GroupName, Version: "v1"})
createdByRefJson, err := runtime.Encode(codec, &api.SerializedReference{ createdByRefJson, err := runtime.Encode(codec, &v1.SerializedReference{
Reference: *createdByRef, Reference: *createdByRef,
}) })
if err != nil { if err != nil {
@@ -223,3 +224,12 @@ func makeCreatedByRefJson(object runtime.Object) (string, error) {
} }
return string(createdByRefJson), nil return string(createdByRefJson), nil
} }
func IsJobFinished(j *batch.Job) bool {
for _, c := range j.Status.Conditions {
if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == v1.ConditionTrue {
return true
}
}
return false
}

View File

@@ -22,9 +22,9 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/api/v1"
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
//"k8s.io/kubernetes/pkg/controller" //"k8s.io/kubernetes/pkg/controller"
// "k8s.io/kubernetes/pkg/util/rand" // "k8s.io/kubernetes/pkg/util/rand"
@@ -38,7 +38,7 @@ func TestGetJobFromTemplate(t *testing.T) {
var no bool = false var no bool = false
sj := batch.CronJob{ sj := batch.CronJob{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "mycronjob", Name: "mycronjob",
Namespace: "snazzycats", Namespace: "snazzycats",
UID: types.UID("1a2b3c"), UID: types.UID("1a2b3c"),
@@ -48,21 +48,21 @@ func TestGetJobFromTemplate(t *testing.T) {
Schedule: "* * * * ?", Schedule: "* * * * ?",
ConcurrencyPolicy: batch.AllowConcurrent, ConcurrencyPolicy: batch.AllowConcurrent,
JobTemplate: batch.JobTemplateSpec{ JobTemplate: batch.JobTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"a": "b"}, Labels: map[string]string{"a": "b"},
Annotations: map[string]string{"x": "y"}, Annotations: map[string]string{"x": "y"},
}, },
Spec: batch.JobSpec{ Spec: batch.JobSpec{
ActiveDeadlineSeconds: &one, ActiveDeadlineSeconds: &one,
ManualSelector: &no, ManualSelector: &no,
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{ Labels: map[string]string{
"foo": "bar", "foo": "bar",
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{Image: "foo/bar"}, {Image: "foo/bar"},
}, },
}, },
@@ -86,7 +86,7 @@ func TestGetJobFromTemplate(t *testing.T) {
if len(job.ObjectMeta.Annotations) != 2 { if len(job.ObjectMeta.Annotations) != 2 {
t.Errorf("Wrong number of annotations") t.Errorf("Wrong number of annotations")
} }
v, ok := job.ObjectMeta.Annotations[api.CreatedByAnnotation] v, ok := job.ObjectMeta.Annotations[v1.CreatedByAnnotation]
if !ok { if !ok {
t.Errorf("Missing created-by annotation") t.Errorf("Missing created-by annotation")
} }
@@ -102,22 +102,22 @@ func TestGetJobFromTemplate(t *testing.T) {
func TestGetParentUIDFromJob(t *testing.T) { func TestGetParentUIDFromJob(t *testing.T) {
j := &batch.Job{ j := &batch.Job{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foobar", Name: "foobar",
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
}, },
Spec: batch.JobSpec{ Spec: batch.JobSpec{
Selector: &unversioned.LabelSelector{ Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"}, MatchLabels: map[string]string{"foo": "bar"},
}, },
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{ Labels: map[string]string{
"foo": "bar", "foo": "bar",
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{Image: "foo/bar"}, {Image: "foo/bar"},
}, },
}, },
@@ -126,7 +126,7 @@ func TestGetParentUIDFromJob(t *testing.T) {
Status: batch.JobStatus{ Status: batch.JobStatus{
Conditions: []batch.JobCondition{{ Conditions: []batch.JobCondition{{
Type: batch.JobComplete, Type: batch.JobComplete,
Status: api.ConditionTrue, Status: v1.ConditionTrue,
}}, }},
}, },
} }
@@ -140,7 +140,7 @@ func TestGetParentUIDFromJob(t *testing.T) {
} }
{ {
// Case 2: Has UID annotation // Case 2: Has UID annotation
j.ObjectMeta.Annotations = map[string]string{api.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"default","name":"pi","uid":"5ef034e0-1890-11e6-8935-42010af0003e","apiVersion":"extensions","resourceVersion":"427339"}}`} j.ObjectMeta.Annotations = map[string]string{v1.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"default","name":"pi","uid":"5ef034e0-1890-11e6-8935-42010af0003e","apiVersion":"extensions","resourceVersion":"427339"}}`}
expectedUID := types.UID("5ef034e0-1890-11e6-8935-42010af0003e") expectedUID := types.UID("5ef034e0-1890-11e6-8935-42010af0003e")
@@ -158,9 +158,9 @@ func TestGroupJobsByParent(t *testing.T) {
uid1 := types.UID("11111111-1111-1111-1111-111111111111") uid1 := types.UID("11111111-1111-1111-1111-111111111111")
uid2 := types.UID("22222222-2222-2222-2222-222222222222") uid2 := types.UID("22222222-2222-2222-2222-222222222222")
uid3 := types.UID("33333333-3333-3333-3333-333333333333") uid3 := types.UID("33333333-3333-3333-3333-333333333333")
createdBy1 := map[string]string{api.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"x","name":"pi","uid":"11111111-1111-1111-1111-111111111111","apiVersion":"extensions","resourceVersion":"111111"}}`} createdBy1 := map[string]string{v1.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"x","name":"pi","uid":"11111111-1111-1111-1111-111111111111","apiVersion":"extensions","resourceVersion":"111111"}}`}
createdBy2 := map[string]string{api.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"x","name":"pi","uid":"22222222-2222-2222-2222-222222222222","apiVersion":"extensions","resourceVersion":"222222"}}`} createdBy2 := map[string]string{v1.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"x","name":"pi","uid":"22222222-2222-2222-2222-222222222222","apiVersion":"extensions","resourceVersion":"222222"}}`}
createdBy3 := map[string]string{api.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"y","name":"pi","uid":"33333333-3333-3333-3333-333333333333","apiVersion":"extensions","resourceVersion":"333333"}}`} createdBy3 := map[string]string{v1.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"y","name":"pi","uid":"33333333-3333-3333-3333-333333333333","apiVersion":"extensions","resourceVersion":"333333"}}`}
noCreatedBy := map[string]string{} noCreatedBy := map[string]string{}
{ {
@@ -176,7 +176,7 @@ func TestGroupJobsByParent(t *testing.T) {
{ {
// Case 2: there is one controller with no job. // Case 2: there is one controller with no job.
sjs := []batch.CronJob{ sjs := []batch.CronJob{
{ObjectMeta: api.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}}, {ObjectMeta: v1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
} }
js := []batch.Job{} js := []batch.Job{}
jobsBySj := groupJobsByParent(sjs, js) jobsBySj := groupJobsByParent(sjs, js)
@@ -188,10 +188,10 @@ func TestGroupJobsByParent(t *testing.T) {
{ {
// Case 3: there is one controller with one job it created. // Case 3: there is one controller with one job it created.
sjs := []batch.CronJob{ sjs := []batch.CronJob{
{ObjectMeta: api.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}}, {ObjectMeta: v1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
} }
js := []batch.Job{ js := []batch.Job{
{ObjectMeta: api.ObjectMeta{Name: "a", Namespace: "x", Annotations: createdBy1}}, {ObjectMeta: v1.ObjectMeta{Name: "a", Namespace: "x", Annotations: createdBy1}},
} }
jobsBySj := groupJobsByParent(sjs, js) jobsBySj := groupJobsByParent(sjs, js)
@@ -211,18 +211,18 @@ func TestGroupJobsByParent(t *testing.T) {
// Case 4: Two namespaces, one has two jobs from one controller, other has 3 jobs from two controllers. // Case 4: Two namespaces, one has two jobs from one controller, other has 3 jobs from two controllers.
// There are also two jobs with no created-by annotation. // There are also two jobs with no created-by annotation.
js := []batch.Job{ js := []batch.Job{
{ObjectMeta: api.ObjectMeta{Name: "a", Namespace: "x", Annotations: createdBy1}}, {ObjectMeta: v1.ObjectMeta{Name: "a", Namespace: "x", Annotations: createdBy1}},
{ObjectMeta: api.ObjectMeta{Name: "b", Namespace: "x", Annotations: createdBy2}}, {ObjectMeta: v1.ObjectMeta{Name: "b", Namespace: "x", Annotations: createdBy2}},
{ObjectMeta: api.ObjectMeta{Name: "c", Namespace: "x", Annotations: createdBy1}}, {ObjectMeta: v1.ObjectMeta{Name: "c", Namespace: "x", Annotations: createdBy1}},
{ObjectMeta: api.ObjectMeta{Name: "d", Namespace: "x", Annotations: noCreatedBy}}, {ObjectMeta: v1.ObjectMeta{Name: "d", Namespace: "x", Annotations: noCreatedBy}},
{ObjectMeta: api.ObjectMeta{Name: "a", Namespace: "y", Annotations: createdBy3}}, {ObjectMeta: v1.ObjectMeta{Name: "a", Namespace: "y", Annotations: createdBy3}},
{ObjectMeta: api.ObjectMeta{Name: "b", Namespace: "y", Annotations: createdBy3}}, {ObjectMeta: v1.ObjectMeta{Name: "b", Namespace: "y", Annotations: createdBy3}},
{ObjectMeta: api.ObjectMeta{Name: "d", Namespace: "y", Annotations: noCreatedBy}}, {ObjectMeta: v1.ObjectMeta{Name: "d", Namespace: "y", Annotations: noCreatedBy}},
} }
sjs := []batch.CronJob{ sjs := []batch.CronJob{
{ObjectMeta: api.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}}, {ObjectMeta: v1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
{ObjectMeta: api.ObjectMeta{Name: "f", Namespace: "x", UID: uid2}}, {ObjectMeta: v1.ObjectMeta{Name: "f", Namespace: "x", UID: uid2}},
{ObjectMeta: api.ObjectMeta{Name: "g", Namespace: "y", UID: uid3}}, {ObjectMeta: v1.ObjectMeta{Name: "g", Namespace: "y", UID: uid3}},
} }
jobsBySj := groupJobsByParent(sjs, js) jobsBySj := groupJobsByParent(sjs, js)
@@ -270,9 +270,9 @@ func TestGetRecentUnmetScheduleTimes(t *testing.T) {
} }
sj := batch.CronJob{ sj := batch.CronJob{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "mycronjob", Name: "mycronjob",
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
UID: types.UID("1a2b3c"), UID: types.UID("1a2b3c"),
}, },
Spec: batch.CronJobSpec{ Spec: batch.CronJobSpec{

View File

@@ -23,13 +23,13 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/controller/informers"
@@ -95,17 +95,17 @@ func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podI
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset. // TODO: remove the wrapper when every clients have moved to use the clientset.
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.Core().RESTClient().GetRateLimiter()) metrics.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.Core().RESTClient().GetRateLimiter())
} }
dsc := &DaemonSetsController{ dsc := &DaemonSetsController{
kubeClient: kubeClient, kubeClient: kubeClient,
eventRecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "daemonset-controller"}), eventRecorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "daemonset-controller"}),
podControl: controller.RealPodControl{ podControl: controller.RealPodControl{
KubeClient: kubeClient, KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "daemon-set"}), Recorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "daemon-set"}),
}, },
burstReplicas: BurstReplicas, burstReplicas: BurstReplicas,
expectations: controller.NewControllerExpectations(), expectations: controller.NewControllerExpectations(),
@@ -239,7 +239,7 @@ func (dsc *DaemonSetsController) enqueueDaemonSet(ds *extensions.DaemonSet) {
dsc.queue.Add(key) dsc.queue.Add(key)
} }
func (dsc *DaemonSetsController) getPodDaemonSet(pod *api.Pod) *extensions.DaemonSet { func (dsc *DaemonSetsController) getPodDaemonSet(pod *v1.Pod) *extensions.DaemonSet {
// look up in the cache, if cached and the cache is valid, just return cached value // look up in the cache, if cached and the cache is valid, just return cached value
if obj, cached := dsc.lookupCache.GetMatchingObject(pod); cached { if obj, cached := dsc.lookupCache.GetMatchingObject(pod); cached {
ds, ok := obj.(*extensions.DaemonSet) ds, ok := obj.(*extensions.DaemonSet)
@@ -272,7 +272,7 @@ func (dsc *DaemonSetsController) getPodDaemonSet(pod *api.Pod) *extensions.Daemo
} }
// isCacheValid check if the cache is valid // isCacheValid check if the cache is valid
func (dsc *DaemonSetsController) isCacheValid(pod *api.Pod, cachedDS *extensions.DaemonSet) bool { func (dsc *DaemonSetsController) isCacheValid(pod *v1.Pod, cachedDS *extensions.DaemonSet) bool {
_, exists, err := dsc.dsStore.Get(cachedDS) _, exists, err := dsc.dsStore.Get(cachedDS)
// ds has been deleted or updated, cache is invalid // ds has been deleted or updated, cache is invalid
if err != nil || !exists || !isDaemonSetMatch(pod, cachedDS) { if err != nil || !exists || !isDaemonSetMatch(pod, cachedDS) {
@@ -283,7 +283,7 @@ func (dsc *DaemonSetsController) isCacheValid(pod *api.Pod, cachedDS *extensions
// isDaemonSetMatch take a Pod and DaemonSet, return whether the Pod and DaemonSet are matching // isDaemonSetMatch take a Pod and DaemonSet, return whether the Pod and DaemonSet are matching
// TODO(mqliang): This logic is a copy from GetPodDaemonSets(), remove the duplication // TODO(mqliang): This logic is a copy from GetPodDaemonSets(), remove the duplication
func isDaemonSetMatch(pod *api.Pod, ds *extensions.DaemonSet) bool { func isDaemonSetMatch(pod *v1.Pod, ds *extensions.DaemonSet) bool {
if ds.Namespace != pod.Namespace { if ds.Namespace != pod.Namespace {
return false return false
} }
@@ -301,7 +301,7 @@ func isDaemonSetMatch(pod *api.Pod, ds *extensions.DaemonSet) bool {
} }
func (dsc *DaemonSetsController) addPod(obj interface{}) { func (dsc *DaemonSetsController) addPod(obj interface{}) {
pod := obj.(*api.Pod) pod := obj.(*v1.Pod)
glog.V(4).Infof("Pod %s added.", pod.Name) glog.V(4).Infof("Pod %s added.", pod.Name)
if ds := dsc.getPodDaemonSet(pod); ds != nil { if ds := dsc.getPodDaemonSet(pod); ds != nil {
dsKey, err := controller.KeyFunc(ds) dsKey, err := controller.KeyFunc(ds)
@@ -316,10 +316,10 @@ func (dsc *DaemonSetsController) addPod(obj interface{}) {
// When a pod is updated, figure out what sets manage it and wake them // When a pod is updated, figure out what sets manage it and wake them
// up. If the labels of the pod have changed we need to awaken both the old // up. If the labels of the pod have changed we need to awaken both the old
// and new set. old and cur must be *api.Pod types. // and new set. old and cur must be *v1.Pod types.
func (dsc *DaemonSetsController) updatePod(old, cur interface{}) { func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
curPod := cur.(*api.Pod) curPod := cur.(*v1.Pod)
oldPod := old.(*api.Pod) oldPod := old.(*v1.Pod)
if curPod.ResourceVersion == oldPod.ResourceVersion { if curPod.ResourceVersion == oldPod.ResourceVersion {
// Periodic resync will send update events for all known pods. // Periodic resync will send update events for all known pods.
// Two different versions of the same pod will always have different RVs. // Two different versions of the same pod will always have different RVs.
@@ -342,7 +342,7 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
} }
func (dsc *DaemonSetsController) deletePod(obj interface{}) { func (dsc *DaemonSetsController) deletePod(obj interface{}) {
pod, ok := obj.(*api.Pod) pod, ok := obj.(*v1.Pod)
// When a delete is dropped, the relist will notice a pod in the store not // When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains // in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the pod // the deleted key/value. Note that this value might be stale. If the pod
@@ -354,7 +354,7 @@ func (dsc *DaemonSetsController) deletePod(obj interface{}) {
glog.Errorf("Couldn't get object from tombstone %#v", obj) glog.Errorf("Couldn't get object from tombstone %#v", obj)
return return
} }
pod, ok = tombstone.Obj.(*api.Pod) pod, ok = tombstone.Obj.(*v1.Pod)
if !ok { if !ok {
glog.Errorf("Tombstone contained object that is not a pod %#v", obj) glog.Errorf("Tombstone contained object that is not a pod %#v", obj)
return return
@@ -379,7 +379,7 @@ func (dsc *DaemonSetsController) addNode(obj interface{}) {
glog.V(4).Infof("Error enqueueing daemon sets: %v", err) glog.V(4).Infof("Error enqueueing daemon sets: %v", err)
return return
} }
node := obj.(*api.Node) node := obj.(*v1.Node)
for i := range dsList.Items { for i := range dsList.Items {
ds := &dsList.Items[i] ds := &dsList.Items[i]
shouldEnqueue := dsc.nodeShouldRunDaemonPod(node, ds) shouldEnqueue := dsc.nodeShouldRunDaemonPod(node, ds)
@@ -390,8 +390,8 @@ func (dsc *DaemonSetsController) addNode(obj interface{}) {
} }
func (dsc *DaemonSetsController) updateNode(old, cur interface{}) { func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
oldNode := old.(*api.Node) oldNode := old.(*v1.Node)
curNode := cur.(*api.Node) curNode := cur.(*v1.Node)
if reflect.DeepEqual(oldNode.Labels, curNode.Labels) { if reflect.DeepEqual(oldNode.Labels, curNode.Labels) {
// If node labels didn't change, we can ignore this update. // If node labels didn't change, we can ignore this update.
return return
@@ -412,8 +412,8 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
} }
// getNodesToDaemonSetPods returns a map from nodes to daemon pods (corresponding to ds) running on the nodes. // getNodesToDaemonSetPods returns a map from nodes to daemon pods (corresponding to ds) running on the nodes.
func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet) (map[string][]*api.Pod, error) { func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet) (map[string][]*v1.Pod, error) {
nodeToDaemonPods := make(map[string][]*api.Pod) nodeToDaemonPods := make(map[string][]*v1.Pod)
selector, err := unversioned.LabelSelectorAsSelector(ds.Spec.Selector) selector, err := unversioned.LabelSelectorAsSelector(ds.Spec.Selector)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -585,7 +585,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
// Sort the daemon pods by creation time, so the the oldest is first. // Sort the daemon pods by creation time, so the the oldest is first.
daemonPods, _ := nodeToDaemonPods[node.Name] daemonPods, _ := nodeToDaemonPods[node.Name]
sort.Sort(podByCreationTimestamp(daemonPods)) sort.Sort(podByCreationTimestamp(daemonPods))
if api.IsPodReady(daemonPods[0]) { if v1.IsPodReady(daemonPods[0]) {
numberReady++ numberReady++
} }
} }
@@ -623,7 +623,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
everything := unversioned.LabelSelector{} everything := unversioned.LabelSelector{}
if reflect.DeepEqual(ds.Spec.Selector, &everything) { if reflect.DeepEqual(ds.Spec.Selector, &everything) {
dsc.eventRecorder.Eventf(ds, api.EventTypeWarning, "SelectingAll", "This daemon set is selecting all pods. A non-empty selector is required.") dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, "SelectingAll", "This daemon set is selecting all pods. A non-empty selector is required.")
return nil return nil
} }
@@ -644,7 +644,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
return dsc.updateDaemonSetStatus(ds) return dsc.updateDaemonSetStatus(ds)
} }
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *extensions.DaemonSet) bool { func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *extensions.DaemonSet) bool {
// If the daemon set specifies a node name, check that it matches with node.Name. // If the daemon set specifies a node name, check that it matches with node.Name.
if !(ds.Spec.Template.Spec.NodeName == "" || ds.Spec.Template.Spec.NodeName == node.Name) { if !(ds.Spec.Template.Spec.NodeName == "" || ds.Spec.Template.Spec.NodeName == node.Name) {
return false return false
@@ -652,23 +652,23 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *exte
// TODO: Move it to the predicates // TODO: Move it to the predicates
for _, c := range node.Status.Conditions { for _, c := range node.Status.Conditions {
if c.Type == api.NodeOutOfDisk && c.Status == api.ConditionTrue { if c.Type == v1.NodeOutOfDisk && c.Status == v1.ConditionTrue {
return false return false
} }
} }
newPod := &api.Pod{Spec: ds.Spec.Template.Spec, ObjectMeta: ds.Spec.Template.ObjectMeta} newPod := &v1.Pod{Spec: ds.Spec.Template.Spec, ObjectMeta: ds.Spec.Template.ObjectMeta}
newPod.Namespace = ds.Namespace newPod.Namespace = ds.Namespace
newPod.Spec.NodeName = node.Name newPod.Spec.NodeName = node.Name
pods := []*api.Pod{} pods := []*v1.Pod{}
for _, m := range dsc.podStore.Indexer.List() { for _, m := range dsc.podStore.Indexer.List() {
pod := m.(*api.Pod) pod := m.(*v1.Pod)
if pod.Spec.NodeName != node.Name { if pod.Spec.NodeName != node.Name {
continue continue
} }
if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed { if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
continue continue
} }
// ignore pods that belong to the daemonset when taking into account whether // ignore pods that belong to the daemonset when taking into account whether
@@ -689,10 +689,10 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *exte
glog.V(4).Infof("GeneralPredicates failed on ds '%s/%s' for reason: %v", ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason()) glog.V(4).Infof("GeneralPredicates failed on ds '%s/%s' for reason: %v", ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason())
switch reason := r.(type) { switch reason := r.(type) {
case *predicates.InsufficientResourceError: case *predicates.InsufficientResourceError:
dsc.eventRecorder.Eventf(ds, api.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: %s", node.ObjectMeta.Name, reason.Error()) dsc.eventRecorder.Eventf(ds, v1.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: %s", node.ObjectMeta.Name, reason.Error())
case *predicates.PredicateFailureError: case *predicates.PredicateFailureError:
if reason == predicates.ErrPodNotFitsHostPorts { if reason == predicates.ErrPodNotFitsHostPorts {
dsc.eventRecorder.Eventf(ds, api.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: host port conflict", node.ObjectMeta.Name) dsc.eventRecorder.Eventf(ds, v1.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: host port conflict", node.ObjectMeta.Name)
} }
} }
} }
@@ -712,7 +712,7 @@ func (o byCreationTimestamp) Less(i, j int) bool {
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp) return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
} }
type podByCreationTimestamp []*api.Pod type podByCreationTimestamp []*v1.Pod
func (o podByCreationTimestamp) Len() int { return len(o) } func (o podByCreationTimestamp) Len() int { return len(o) }
func (o podByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } func (o podByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }

View File

@@ -20,14 +20,14 @@ import (
"fmt" "fmt"
"testing" "testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/extensions" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/controller/informers"
@@ -55,46 +55,46 @@ func getKey(ds *extensions.DaemonSet, t *testing.T) string {
func newDaemonSet(name string) *extensions.DaemonSet { func newDaemonSet(name string) *extensions.DaemonSet {
return &extensions.DaemonSet{ return &extensions.DaemonSet{
TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()}, TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()},
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
}, },
Spec: extensions.DaemonSetSpec{ Spec: extensions.DaemonSetSpec{
Selector: &unversioned.LabelSelector{MatchLabels: simpleDaemonSetLabel}, Selector: &unversioned.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: simpleDaemonSetLabel, Labels: simpleDaemonSetLabel,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: "foo/bar", Image: "foo/bar",
TerminationMessagePath: api.TerminationMessagePathDefault, TerminationMessagePath: v1.TerminationMessagePathDefault,
ImagePullPolicy: api.PullIfNotPresent, ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
}, },
}, },
DNSPolicy: api.DNSDefault, DNSPolicy: v1.DNSDefault,
}, },
}, },
}, },
} }
} }
func newNode(name string, label map[string]string) *api.Node { func newNode(name string, label map[string]string) *v1.Node {
return &api.Node{ return &v1.Node{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()}, TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Labels: label, Labels: label,
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
}, },
Status: api.NodeStatus{ Status: v1.NodeStatus{
Conditions: []api.NodeCondition{ Conditions: []v1.NodeCondition{
{Type: api.NodeReady, Status: api.ConditionTrue}, {Type: v1.NodeReady, Status: v1.ConditionTrue},
}, },
Allocatable: api.ResourceList{ Allocatable: v1.ResourceList{
api.ResourcePods: resource.MustParse("100"), v1.ResourcePods: resource.MustParse("100"),
}, },
}, },
} }
@@ -106,28 +106,28 @@ func addNodes(nodeStore cache.Store, startIndex, numNodes int, label map[string]
} }
} }
func newPod(podName string, nodeName string, label map[string]string) *api.Pod { func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
pod := &api.Pod{ pod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()}, TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
GenerateName: podName, GenerateName: podName,
Labels: label, Labels: label,
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
NodeName: nodeName, NodeName: nodeName,
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: "foo/bar", Image: "foo/bar",
TerminationMessagePath: api.TerminationMessagePathDefault, TerminationMessagePath: v1.TerminationMessagePathDefault,
ImagePullPolicy: api.PullIfNotPresent, ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
}, },
}, },
DNSPolicy: api.DNSDefault, DNSPolicy: v1.DNSDefault,
}, },
} }
api.GenerateName(api.SimpleNameGenerator, &pod.ObjectMeta) v1.GenerateName(v1.SimpleNameGenerator, &pod.ObjectMeta)
return pod return pod
} }
@@ -138,8 +138,8 @@ func addPods(podStore cache.Store, nodeName string, label map[string]string, num
} }
func newTestController() (*DaemonSetsController, *controller.FakePodControl) { func newTestController() (*DaemonSetsController, *controller.FakePodControl) {
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
informerFactory := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc()) informerFactory := informers.NewSharedInformerFactory(clientset, nil, controller.NoResyncPeriodFunc())
manager := NewDaemonSetsController(informerFactory.DaemonSets(), informerFactory.Pods(), informerFactory.Nodes(), clientset, 0) manager := NewDaemonSetsController(informerFactory.DaemonSets(), informerFactory.Pods(), informerFactory.Nodes(), clientset, 0)
informerFactory.Start(wait.NeverStop) informerFactory.Start(wait.NeverStop)
@@ -212,8 +212,8 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) {
func TestNotReadNodeDaemonDoesNotLaunchPod(t *testing.T) { func TestNotReadNodeDaemonDoesNotLaunchPod(t *testing.T) {
manager, podControl := newTestController() manager, podControl := newTestController()
node := newNode("not-ready", nil) node := newNode("not-ready", nil)
node.Status.Conditions = []api.NodeCondition{ node.Status.Conditions = []v1.NodeCondition{
{Type: api.NodeReady, Status: api.ConditionFalse}, {Type: v1.NodeReady, Status: v1.ConditionFalse},
} }
manager.nodeStore.Add(node) manager.nodeStore.Add(node)
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
@@ -225,29 +225,29 @@ func TestNotReadNodeDaemonDoesNotLaunchPod(t *testing.T) {
func TestOutOfDiskNodeDaemonDoesNotLaunchPod(t *testing.T) { func TestOutOfDiskNodeDaemonDoesNotLaunchPod(t *testing.T) {
manager, podControl := newTestController() manager, podControl := newTestController()
node := newNode("not-enough-disk", nil) node := newNode("not-enough-disk", nil)
node.Status.Conditions = []api.NodeCondition{{Type: api.NodeOutOfDisk, Status: api.ConditionTrue}} node.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}
manager.nodeStore.Add(node) manager.nodeStore.Add(node)
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
manager.dsStore.Add(ds) manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0) syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
} }
func resourcePodSpec(nodeName, memory, cpu string) api.PodSpec { func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec {
return api.PodSpec{ return v1.PodSpec{
NodeName: nodeName, NodeName: nodeName,
Containers: []api.Container{{ Containers: []v1.Container{{
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: allocatableResources(memory, cpu), Requests: allocatableResources(memory, cpu),
}, },
}}, }},
} }
} }
func allocatableResources(memory, cpu string) api.ResourceList { func allocatableResources(memory, cpu string) v1.ResourceList {
return api.ResourceList{ return v1.ResourceList{
api.ResourceMemory: resource.MustParse(memory), v1.ResourceMemory: resource.MustParse(memory),
api.ResourceCPU: resource.MustParse(cpu), v1.ResourceCPU: resource.MustParse(cpu),
api.ResourcePods: resource.MustParse("100"), v1.ResourcePods: resource.MustParse("100"),
} }
} }
@@ -258,7 +258,7 @@ func TestInsufficentCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
node := newNode("too-much-mem", nil) node := newNode("too-much-mem", nil)
node.Status.Allocatable = allocatableResources("100M", "200m") node.Status.Allocatable = allocatableResources("100M", "200m")
manager.nodeStore.Add(node) manager.nodeStore.Add(node)
manager.podStore.Indexer.Add(&api.Pod{ manager.podStore.Indexer.Add(&v1.Pod{
Spec: podSpec, Spec: podSpec,
}) })
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
@@ -273,9 +273,9 @@ func TestSufficentCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) {
node := newNode("too-much-mem", nil) node := newNode("too-much-mem", nil)
node.Status.Allocatable = allocatableResources("100M", "200m") node.Status.Allocatable = allocatableResources("100M", "200m")
manager.nodeStore.Add(node) manager.nodeStore.Add(node)
manager.podStore.Indexer.Add(&api.Pod{ manager.podStore.Indexer.Add(&v1.Pod{
Spec: podSpec, Spec: podSpec,
Status: api.PodStatus{Phase: api.PodSucceeded}, Status: v1.PodStatus{Phase: v1.PodSucceeded},
}) })
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
ds.Spec.Template.Spec = podSpec ds.Spec.Template.Spec = podSpec
@@ -290,7 +290,7 @@ func TestSufficentCapacityNodeDaemonLaunchesPod(t *testing.T) {
node := newNode("not-too-much-mem", nil) node := newNode("not-too-much-mem", nil)
node.Status.Allocatable = allocatableResources("200M", "200m") node.Status.Allocatable = allocatableResources("200M", "200m")
manager.nodeStore.Add(node) manager.nodeStore.Add(node)
manager.podStore.Indexer.Add(&api.Pod{ manager.podStore.Indexer.Add(&v1.Pod{
Spec: podSpec, Spec: podSpec,
}) })
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
@@ -306,7 +306,7 @@ func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
node := newNode("not-too-much-mem", nil) node := newNode("not-too-much-mem", nil)
node.Status.Allocatable = allocatableResources("200M", "200m") node.Status.Allocatable = allocatableResources("200M", "200m")
manager.nodeStore.Add(node) manager.nodeStore.Add(node)
manager.podStore.Indexer.Add(&api.Pod{ manager.podStore.Indexer.Add(&v1.Pod{
Spec: podSpec, Spec: podSpec,
}) })
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
@@ -319,10 +319,10 @@ func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
// DaemonSets should not place onto nodes that would cause port conflicts // DaemonSets should not place onto nodes that would cause port conflicts
func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) { func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
podSpec := api.PodSpec{ podSpec := v1.PodSpec{
NodeName: "port-conflict", NodeName: "port-conflict",
Containers: []api.Container{{ Containers: []v1.Container{{
Ports: []api.ContainerPort{{ Ports: []v1.ContainerPort{{
HostPort: 666, HostPort: 666,
}}, }},
}}, }},
@@ -330,7 +330,7 @@ func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
manager, podControl := newTestController() manager, podControl := newTestController()
node := newNode("port-conflict", nil) node := newNode("port-conflict", nil)
manager.nodeStore.Add(node) manager.nodeStore.Add(node)
manager.podStore.Indexer.Add(&api.Pod{ manager.podStore.Indexer.Add(&v1.Pod{
Spec: podSpec, Spec: podSpec,
}) })
@@ -345,10 +345,10 @@ func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
// //
// Issue: https://github.com/kubernetes/kubernetes/issues/22309 // Issue: https://github.com/kubernetes/kubernetes/issues/22309
func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) { func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
podSpec := api.PodSpec{ podSpec := v1.PodSpec{
NodeName: "port-conflict", NodeName: "port-conflict",
Containers: []api.Container{{ Containers: []v1.Container{{
Ports: []api.ContainerPort{{ Ports: []v1.ContainerPort{{
HostPort: 666, HostPort: 666,
}}, }},
}}, }},
@@ -356,10 +356,10 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
manager, podControl := newTestController() manager, podControl := newTestController()
node := newNode("port-conflict", nil) node := newNode("port-conflict", nil)
manager.nodeStore.Add(node) manager.nodeStore.Add(node)
manager.podStore.Indexer.Add(&api.Pod{ manager.podStore.Indexer.Add(&v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: simpleDaemonSetLabel, Labels: simpleDaemonSetLabel,
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
}, },
Spec: podSpec, Spec: podSpec,
}) })
@@ -371,18 +371,18 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
// DaemonSets should place onto nodes that would not cause port conflicts // DaemonSets should place onto nodes that would not cause port conflicts
func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) { func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
podSpec1 := api.PodSpec{ podSpec1 := v1.PodSpec{
NodeName: "no-port-conflict", NodeName: "no-port-conflict",
Containers: []api.Container{{ Containers: []v1.Container{{
Ports: []api.ContainerPort{{ Ports: []v1.ContainerPort{{
HostPort: 6661, HostPort: 6661,
}}, }},
}}, }},
} }
podSpec2 := api.PodSpec{ podSpec2 := v1.PodSpec{
NodeName: "no-port-conflict", NodeName: "no-port-conflict",
Containers: []api.Container{{ Containers: []v1.Container{{
Ports: []api.ContainerPort{{ Ports: []v1.ContainerPort{{
HostPort: 6662, HostPort: 6662,
}}, }},
}}, }},
@@ -390,7 +390,7 @@ func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
manager, podControl := newTestController() manager, podControl := newTestController()
node := newNode("no-port-conflict", nil) node := newNode("no-port-conflict", nil)
manager.nodeStore.Add(node) manager.nodeStore.Add(node)
manager.podStore.Indexer.Add(&api.Pod{ manager.podStore.Indexer.Add(&v1.Pod{
Spec: podSpec1, Spec: podSpec1,
}) })
ds := newDaemonSet("foo") ds := newDaemonSet("foo")
@@ -406,12 +406,12 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
manager, podControl := newTestController() manager, podControl := newTestController()
manager.nodeStore.Store.Add(newNode("node1", nil)) manager.nodeStore.Store.Add(newNode("node1", nil))
// Create pod not controlled by a daemonset. // Create pod not controlled by a daemonset.
manager.podStore.Indexer.Add(&api.Pod{ manager.podStore.Indexer.Add(&v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{"bang": "boom"}, Labels: map[string]string{"bang": "boom"},
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
NodeName: "node1", NodeName: "node1",
}, },
}) })
@@ -554,7 +554,7 @@ func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel) addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
daemon := newDaemonSet("foo") daemon := newDaemonSet("foo")
affinity := map[string]string{ affinity := map[string]string{
api.AffinityAnnotationKey: fmt.Sprintf(` v1.AffinityAnnotationKey: fmt.Sprintf(`
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
"nodeSelectorTerms": [{ "nodeSelectorTerms": [{
"matchExpressions": [{ "matchExpressions": [{
@@ -586,7 +586,7 @@ func TestNumberReadyStatus(t *testing.T) {
selector, _ := unversioned.LabelSelectorAsSelector(daemon.Spec.Selector) selector, _ := unversioned.LabelSelectorAsSelector(daemon.Spec.Selector)
daemonPods, _ := manager.podStore.Pods(daemon.Namespace).List(selector) daemonPods, _ := manager.podStore.Pods(daemon.Namespace).List(selector)
for _, pod := range daemonPods { for _, pod := range daemonPods {
condition := api.PodCondition{Type: api.PodReady, Status: api.ConditionTrue} condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
pod.Status.Conditions = append(pod.Status.Conditions, condition) pod.Status.Conditions = append(pod.Status.Conditions, condition)
} }

View File

@@ -27,12 +27,12 @@ import (
"time" "time"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/deployment/util" "k8s.io/kubernetes/pkg/controller/deployment/util"
@@ -91,14 +91,14 @@ func NewDeploymentController(dInformer informers.DeploymentInformer, rsInformer
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset. // TODO: remove the wrapper when every clients have moved to use the clientset.
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: client.Core().Events("")}) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.Core().Events("")})
if client != nil && client.Core().RESTClient().GetRateLimiter() != nil { if client != nil && client.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.Core().RESTClient().GetRateLimiter()) metrics.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.Core().RESTClient().GetRateLimiter())
} }
dc := &DeploymentController{ dc := &DeploymentController{
client: client, client: client,
eventRecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "deployment-controller"}), eventRecorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "deployment-controller"}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
} }
@@ -220,7 +220,7 @@ func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) {
} }
// A number of things could affect the old deployment: labels changing, // A number of things could affect the old deployment: labels changing,
// pod template changing, etc. // pod template changing, etc.
if !api.Semantic.DeepEqual(oldRS, curRS) { if !v1.Semantic.DeepEqual(oldRS, curRS) {
if oldD := dc.getDeploymentForReplicaSet(oldRS); oldD != nil { if oldD := dc.getDeploymentForReplicaSet(oldRS); oldD != nil {
dc.enqueueDeployment(oldD) dc.enqueueDeployment(oldD)
} }
@@ -333,7 +333,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
everything := unversioned.LabelSelector{} everything := unversioned.LabelSelector{}
if reflect.DeepEqual(d.Spec.Selector, &everything) { if reflect.DeepEqual(d.Spec.Selector, &everything) {
dc.eventRecorder.Eventf(d, api.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.") dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
if d.Status.ObservedGeneration < d.Generation { if d.Status.ObservedGeneration < d.Generation {
d.Status.ObservedGeneration = d.Generation d.Status.ObservedGeneration = d.Generation
dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d) dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
@@ -347,7 +347,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
// Handle overlapping deployments by deterministically avoid syncing deployments that fight over ReplicaSets. // Handle overlapping deployments by deterministically avoid syncing deployments that fight over ReplicaSets.
if err = dc.handleOverlap(d); err != nil { if err = dc.handleOverlap(d); err != nil {
dc.eventRecorder.Eventf(d, api.EventTypeWarning, "SelectorOverlap", err.Error()) dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectorOverlap", err.Error())
return nil return nil
} }

View File

@@ -20,11 +20,11 @@ import (
"fmt" "fmt"
"testing" "testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/extensions" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
@@ -41,15 +41,15 @@ var (
func rs(name string, replicas int, selector map[string]string, timestamp unversioned.Time) *extensions.ReplicaSet { func rs(name string, replicas int, selector map[string]string, timestamp unversioned.Time) *extensions.ReplicaSet {
return &extensions.ReplicaSet{ return &extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
CreationTimestamp: timestamp, CreationTimestamp: timestamp,
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
}, },
Spec: extensions.ReplicaSetSpec{ Spec: extensions.ReplicaSetSpec{
Replicas: int32(replicas), Replicas: func() *int32 { i := int32(replicas); return &i }(),
Selector: &unversioned.LabelSelector{MatchLabels: selector}, Selector: &unversioned.LabelSelector{MatchLabels: selector},
Template: api.PodTemplateSpec{}, Template: v1.PodTemplateSpec{},
}, },
} }
} }
@@ -65,24 +65,27 @@ func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map
func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *extensions.Deployment { func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *extensions.Deployment {
d := extensions.Deployment{ d := extensions.Deployment{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(extensions.GroupName).GroupVersion.String()}, TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(extensions.GroupName).GroupVersion.String()},
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
UID: uuid.NewUUID(), UID: uuid.NewUUID(),
Name: name, Name: name,
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
}, },
Spec: extensions.DeploymentSpec{ Spec: extensions.DeploymentSpec{
Strategy: extensions.DeploymentStrategy{ Strategy: extensions.DeploymentStrategy{
Type: extensions.RollingUpdateDeploymentStrategyType, Type: extensions.RollingUpdateDeploymentStrategyType,
RollingUpdate: &extensions.RollingUpdateDeployment{}, RollingUpdate: &extensions.RollingUpdateDeployment{
MaxUnavailable: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
MaxSurge: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
},
}, },
Replicas: int32(replicas), Replicas: func() *int32 { i := int32(replicas); return &i }(),
Selector: &unversioned.LabelSelector{MatchLabels: selector}, Selector: &unversioned.LabelSelector{MatchLabels: selector},
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: selector, Labels: selector,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: "foo/bar", Image: "foo/bar",
}, },
@@ -93,22 +96,22 @@ func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSu
}, },
} }
if maxSurge != nil { if maxSurge != nil {
d.Spec.Strategy.RollingUpdate.MaxSurge = *maxSurge d.Spec.Strategy.RollingUpdate.MaxSurge = maxSurge
} }
if maxUnavailable != nil { if maxUnavailable != nil {
d.Spec.Strategy.RollingUpdate.MaxUnavailable = *maxUnavailable d.Spec.Strategy.RollingUpdate.MaxUnavailable = maxUnavailable
} }
return &d return &d
} }
func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensions.ReplicaSet { func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensions.ReplicaSet {
return &extensions.ReplicaSet{ return &extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
}, },
Spec: extensions.ReplicaSetSpec{ Spec: extensions.ReplicaSetSpec{
Replicas: int32(replicas), Replicas: func() *int32 { i := int32(replicas); return &i }(),
Template: d.Spec.Template, Template: d.Spec.Template,
}, },
} }
@@ -130,7 +133,7 @@ type fixture struct {
// Objects to put in the store. // Objects to put in the store.
dLister []*extensions.Deployment dLister []*extensions.Deployment
rsLister []*extensions.ReplicaSet rsLister []*extensions.ReplicaSet
podLister []*api.Pod podLister []*v1.Pod
// Actions expected to happen on the client. Objects from here are also // Actions expected to happen on the client. Objects from here are also
// preloaded into NewSimpleFake. // preloaded into NewSimpleFake.
@@ -161,7 +164,7 @@ func newFixture(t *testing.T) *fixture {
func (f *fixture) run(deploymentName string) { func (f *fixture) run(deploymentName string) {
f.client = fake.NewSimpleClientset(f.objects...) f.client = fake.NewSimpleClientset(f.objects...)
informers := informers.NewSharedInformerFactory(f.client, controller.NoResyncPeriodFunc()) informers := informers.NewSharedInformerFactory(f.client, nil, controller.NoResyncPeriodFunc())
c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), f.client) c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), f.client)
c.eventRecorder = &record.FakeRecorder{} c.eventRecorder = &record.FakeRecorder{}
c.dListerSynced = alwaysReady c.dListerSynced = alwaysReady
@@ -234,7 +237,7 @@ func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) {
// issue: https://github.com/kubernetes/kubernetes/issues/23218 // issue: https://github.com/kubernetes/kubernetes/issues/23218
func TestDeploymentController_dontSyncDeploymentsWithEmptyPodSelector(t *testing.T) { func TestDeploymentController_dontSyncDeploymentsWithEmptyPodSelector(t *testing.T) {
fake := &fake.Clientset{} fake := &fake.Clientset{}
informers := informers.NewSharedInformerFactory(fake, controller.NoResyncPeriodFunc()) informers := informers.NewSharedInformerFactory(fake, nil, controller.NoResyncPeriodFunc())
controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake) controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake)
controller.eventRecorder = &record.FakeRecorder{} controller.eventRecorder = &record.FakeRecorder{}
controller.dListerSynced = alwaysReady controller.dListerSynced = alwaysReady

View File

@@ -20,8 +20,8 @@ import (
"fmt" "fmt"
"reflect" "reflect"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/controller/deployment/util" "k8s.io/kubernetes/pkg/controller/deployment/util"
) )
@@ -95,14 +95,14 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
// Update the deployment conditions with a message for the new replica set that // Update the deployment conditions with a message for the new replica set that
// was successfully deployed. If the condition already exists, we ignore this update. // was successfully deployed. If the condition already exists, we ignore this update.
msg := fmt.Sprintf("Replica set %q has successfully progressed.", newRS.Name) msg := fmt.Sprintf("Replica set %q has successfully progressed.", newRS.Name)
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionTrue, util.NewRSAvailableReason, msg) condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, util.NewRSAvailableReason, msg)
util.SetDeploymentCondition(&newStatus, *condition) util.SetDeploymentCondition(&newStatus, *condition)
case util.DeploymentProgressing(d, &newStatus): case util.DeploymentProgressing(d, &newStatus):
// If there is any progress made, continue by not checking if the deployment failed. This // If there is any progress made, continue by not checking if the deployment failed. This
// behavior emulates the rolling updater progressDeadline check. // behavior emulates the rolling updater progressDeadline check.
msg := fmt.Sprintf("Replica set %q is progressing.", newRS.Name) msg := fmt.Sprintf("Replica set %q is progressing.", newRS.Name)
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionTrue, util.ReplicaSetUpdatedReason, msg) condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, util.ReplicaSetUpdatedReason, msg)
// Update the current Progressing condition or add a new one if it doesn't exist. // Update the current Progressing condition or add a new one if it doesn't exist.
// If a Progressing condition with status=true already exists, we should update // If a Progressing condition with status=true already exists, we should update
// everything but lastTransitionTime. SetDeploymentCondition already does that but // everything but lastTransitionTime. SetDeploymentCondition already does that but
@@ -111,7 +111,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
// update with the same reason and change just lastUpdateTime iff we notice any // update with the same reason and change just lastUpdateTime iff we notice any
// progress. That's why we handle it here. // progress. That's why we handle it here.
if currentCond != nil { if currentCond != nil {
if currentCond.Status == api.ConditionTrue { if currentCond.Status == v1.ConditionTrue {
condition.LastTransitionTime = currentCond.LastTransitionTime condition.LastTransitionTime = currentCond.LastTransitionTime
} }
util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentProgressing) util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentProgressing)
@@ -122,7 +122,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
// Update the deployment with a timeout condition. If the condition already exists, // Update the deployment with a timeout condition. If the condition already exists,
// we ignore this update. // we ignore this update.
msg := fmt.Sprintf("Replica set %q has timed out progressing.", newRS.Name) msg := fmt.Sprintf("Replica set %q has timed out progressing.", newRS.Name)
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionFalse, util.TimedOutReason, msg) condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, util.TimedOutReason, msg)
util.SetDeploymentCondition(&newStatus, *condition) util.SetDeploymentCondition(&newStatus, *condition)
} }
} }

View File

@@ -19,7 +19,7 @@ package deployment
import ( import (
"fmt" "fmt"
"k8s.io/kubernetes/pkg/apis/extensions" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/retry" "k8s.io/kubernetes/pkg/client/retry"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@@ -82,7 +82,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*ext
for i := range oldRSs { for i := range oldRSs {
rs := oldRSs[i] rs := oldRSs[i]
// Scaling not required. // Scaling not required.
if rs.Spec.Replicas == 0 { if *(rs.Spec.Replicas) == 0 {
continue continue
} }
scaledRS, updatedRS, err := dc.scaleReplicaSetAndRecordEvent(rs, 0, deployment) scaledRS, updatedRS, err := dc.scaleReplicaSetAndRecordEvent(rs, 0, deployment)
@@ -104,7 +104,7 @@ func (dc *DeploymentController) waitForInactiveReplicaSets(oldRSs []*extensions.
rs := oldRSs[i] rs := oldRSs[i]
desiredGeneration := rs.Generation desiredGeneration := rs.Generation
observedGeneration := rs.Status.ObservedGeneration observedGeneration := rs.Status.ObservedGeneration
specReplicas := rs.Spec.Replicas specReplicas := *(rs.Spec.Replicas)
statusReplicas := rs.Status.Replicas statusReplicas := rs.Status.Replicas
if err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) { if err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) {
@@ -113,13 +113,13 @@ func (dc *DeploymentController) waitForInactiveReplicaSets(oldRSs []*extensions.
return false, err return false, err
} }
specReplicas = replicaSet.Spec.Replicas specReplicas = *(replicaSet.Spec.Replicas)
statusReplicas = replicaSet.Status.Replicas statusReplicas = replicaSet.Status.Replicas
observedGeneration = replicaSet.Status.ObservedGeneration observedGeneration = replicaSet.Status.ObservedGeneration
// TODO: We also need to wait for terminating replicas to actually terminate. // TODO: We also need to wait for terminating replicas to actually terminate.
// See https://github.com/kubernetes/kubernetes/issues/32567 // See https://github.com/kubernetes/kubernetes/issues/32567
return observedGeneration >= desiredGeneration && replicaSet.Spec.Replicas == 0 && replicaSet.Status.Replicas == 0, nil return observedGeneration >= desiredGeneration && *(replicaSet.Spec.Replicas) == 0 && replicaSet.Status.Replicas == 0, nil
}); err != nil { }); err != nil {
if err == wait.ErrWaitTimeout { if err == wait.ErrWaitTimeout {
err = fmt.Errorf("replica set %q never became inactive: synced=%t, spec.replicas=%d, status.replicas=%d", err = fmt.Errorf("replica set %q never became inactive: synced=%t, spec.replicas=%d, status.replicas=%d",
@@ -133,6 +133,6 @@ func (dc *DeploymentController) waitForInactiveReplicaSets(oldRSs []*extensions.
// scaleUpNewReplicaSetForRecreate scales up new replica set when deployment strategy is "Recreate" // scaleUpNewReplicaSetForRecreate scales up new replica set when deployment strategy is "Recreate"
func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) { func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, deployment.Spec.Replicas, deployment) scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, *(deployment.Spec.Replicas), deployment)
return scaled, err return scaled, err
} }

View File

@@ -21,8 +21,8 @@ import (
"reflect" "reflect"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
) )
@@ -91,11 +91,11 @@ func (dc *DeploymentController) rollbackToTemplate(deployment *extensions.Deploy
} }
func (dc *DeploymentController) emitRollbackWarningEvent(deployment *extensions.Deployment, reason, message string) { func (dc *DeploymentController) emitRollbackWarningEvent(deployment *extensions.Deployment, reason, message string) {
dc.eventRecorder.Eventf(deployment, api.EventTypeWarning, reason, message) dc.eventRecorder.Eventf(deployment, v1.EventTypeWarning, reason, message)
} }
func (dc *DeploymentController) emitRollbackNormalEvent(deployment *extensions.Deployment, message string) { func (dc *DeploymentController) emitRollbackNormalEvent(deployment *extensions.Deployment, message string) {
dc.eventRecorder.Eventf(deployment, api.EventTypeNormal, deploymentutil.RollbackDone, message) dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, deploymentutil.RollbackDone, message)
} }
// updateDeploymentAndClearRollbackTo sets .spec.rollbackTo to nil and update the input deployment // updateDeploymentAndClearRollbackTo sets .spec.rollbackTo to nil and update the input deployment

View File

@@ -21,7 +21,7 @@ import (
"sort" "sort"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/apis/extensions" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
"k8s.io/kubernetes/pkg/util/integer" "k8s.io/kubernetes/pkg/util/integer"
@@ -62,13 +62,13 @@ func (dc *DeploymentController) rolloutRolling(deployment *extensions.Deployment
} }
func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) { func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
if newRS.Spec.Replicas == deployment.Spec.Replicas { if *(newRS.Spec.Replicas) == *(deployment.Spec.Replicas) {
// Scaling not required. // Scaling not required.
return false, nil return false, nil
} }
if newRS.Spec.Replicas > deployment.Spec.Replicas { if *(newRS.Spec.Replicas) > *(deployment.Spec.Replicas) {
// Scale down. // Scale down.
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, deployment.Spec.Replicas, deployment) scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, *(deployment.Spec.Replicas), deployment)
return scaled, err return scaled, err
} }
newReplicasCount, err := deploymentutil.NewRSNewReplicas(deployment, allRSs, newRS) newReplicasCount, err := deploymentutil.NewRSNewReplicas(deployment, allRSs, newRS)
@@ -120,8 +120,8 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.Rep
// * The new replica set created must start with 0 replicas because allPodsCount is already at 13. // * The new replica set created must start with 0 replicas because allPodsCount is already at 13.
// * However, newRSPodsUnavailable would also be 0, so the 2 old replica sets could be scaled down by 5 (13 - 8 - 0), which would then // * However, newRSPodsUnavailable would also be 0, so the 2 old replica sets could be scaled down by 5 (13 - 8 - 0), which would then
// allow the new replica set to be scaled up by 5. // allow the new replica set to be scaled up by 5.
minAvailable := deployment.Spec.Replicas - maxUnavailable minAvailable := *(deployment.Spec.Replicas) - maxUnavailable
newRSUnavailablePodCount := newRS.Spec.Replicas - newRS.Status.AvailableReplicas newRSUnavailablePodCount := *(newRS.Spec.Replicas) - newRS.Status.AvailableReplicas
maxScaledDown := allPodsCount - minAvailable - newRSUnavailablePodCount maxScaledDown := allPodsCount - minAvailable - newRSUnavailablePodCount
if maxScaledDown <= 0 { if maxScaledDown <= 0 {
return false, nil return false, nil
@@ -158,20 +158,20 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.Re
if totalScaledDown >= maxCleanupCount { if totalScaledDown >= maxCleanupCount {
break break
} }
if targetRS.Spec.Replicas == 0 { if *(targetRS.Spec.Replicas) == 0 {
// cannot scale down this replica set. // cannot scale down this replica set.
continue continue
} }
glog.V(4).Infof("Found %d available pods in old RS %s/%s", targetRS.Status.AvailableReplicas, targetRS.Namespace, targetRS.Name) glog.V(4).Infof("Found %d available pods in old RS %s/%s", targetRS.Status.AvailableReplicas, targetRS.Namespace, targetRS.Name)
if targetRS.Spec.Replicas == targetRS.Status.AvailableReplicas { if *(targetRS.Spec.Replicas) == targetRS.Status.AvailableReplicas {
// no unhealthy replicas found, no scaling required. // no unhealthy replicas found, no scaling required.
continue continue
} }
scaledDownCount := int32(integer.IntMin(int(maxCleanupCount-totalScaledDown), int(targetRS.Spec.Replicas-targetRS.Status.AvailableReplicas))) scaledDownCount := int32(integer.IntMin(int(maxCleanupCount-totalScaledDown), int(*(targetRS.Spec.Replicas)-targetRS.Status.AvailableReplicas)))
newReplicasCount := targetRS.Spec.Replicas - scaledDownCount newReplicasCount := *(targetRS.Spec.Replicas) - scaledDownCount
if newReplicasCount > targetRS.Spec.Replicas { if newReplicasCount > *(targetRS.Spec.Replicas) {
return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, targetRS.Spec.Replicas, newReplicasCount) return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
} }
_, updatedOldRS, err := dc.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount, deployment) _, updatedOldRS, err := dc.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount, deployment)
if err != nil { if err != nil {
@@ -189,7 +189,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs [
maxUnavailable := deploymentutil.MaxUnavailable(*deployment) maxUnavailable := deploymentutil.MaxUnavailable(*deployment)
// Check if we can scale down. // Check if we can scale down.
minAvailable := deployment.Spec.Replicas - maxUnavailable minAvailable := *(deployment.Spec.Replicas) - maxUnavailable
// Find the number of available pods. // Find the number of available pods.
availablePodCount := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs) availablePodCount := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs)
if availablePodCount <= minAvailable { if availablePodCount <= minAvailable {
@@ -207,15 +207,15 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs [
// No further scaling required. // No further scaling required.
break break
} }
if targetRS.Spec.Replicas == 0 { if *(targetRS.Spec.Replicas) == 0 {
// cannot scale down this ReplicaSet. // cannot scale down this ReplicaSet.
continue continue
} }
// Scale down. // Scale down.
scaleDownCount := int32(integer.IntMin(int(targetRS.Spec.Replicas), int(totalScaleDownCount-totalScaledDown))) scaleDownCount := int32(integer.IntMin(int(*(targetRS.Spec.Replicas)), int(totalScaleDownCount-totalScaledDown)))
newReplicasCount := targetRS.Spec.Replicas - scaleDownCount newReplicasCount := *(targetRS.Spec.Replicas) - scaleDownCount
if newReplicasCount > targetRS.Spec.Replicas { if newReplicasCount > *(targetRS.Spec.Replicas) {
return 0, fmt.Errorf("when scaling down old RS, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, targetRS.Spec.Replicas, newReplicasCount) return 0, fmt.Errorf("when scaling down old RS, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
} }
_, _, err := dc.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount, deployment) _, _, err := dc.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount, deployment)
if err != nil { if err != nil {

View File

@@ -19,8 +19,8 @@ package deployment
import ( import (
"testing" "testing"
"k8s.io/kubernetes/pkg/apis/extensions" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
@@ -110,7 +110,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
continue continue
} }
updated := fake.Actions()[0].(core.UpdateAction).GetObject().(*extensions.ReplicaSet) updated := fake.Actions()[0].(core.UpdateAction).GetObject().(*extensions.ReplicaSet)
if e, a := test.expectedNewReplicas, int(updated.Spec.Replicas); e != a { if e, a := test.expectedNewReplicas, int(*(updated.Spec.Replicas)); e != a {
t.Errorf("expected update to %d replicas, got %d", e, a) t.Errorf("expected update to %d replicas, got %d", e, a)
} }
} }
@@ -372,7 +372,7 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing
continue continue
} }
updated := updateAction.GetObject().(*extensions.ReplicaSet) updated := updateAction.GetObject().(*extensions.ReplicaSet)
if e, a := test.expectedOldReplicas, int(updated.Spec.Replicas); e != a { if e, a := test.expectedOldReplicas, int(*(updated.Spec.Replicas)); e != a {
t.Errorf("expected update to %d replicas, got %d", e, a) t.Errorf("expected update to %d replicas, got %d", e, a)
} }
} }

View File

@@ -24,9 +24,11 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util" deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
utilerrors "k8s.io/kubernetes/pkg/util/errors" utilerrors "k8s.io/kubernetes/pkg/util/errors"
@@ -80,11 +82,11 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment)
needsUpdate := false needsUpdate := false
if d.Spec.Paused && !pausedCondExists { if d.Spec.Paused && !pausedCondExists {
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused") condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused")
deploymentutil.SetDeploymentCondition(&d.Status, *condition) deploymentutil.SetDeploymentCondition(&d.Status, *condition)
needsUpdate = true needsUpdate = true
} else if !d.Spec.Paused && pausedCondExists { } else if !d.Spec.Paused && pausedCondExists {
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed") condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed")
deploymentutil.SetDeploymentCondition(&d.Status, *condition) deploymentutil.SetDeploymentCondition(&d.Status, *condition)
needsUpdate = true needsUpdate = true
} }
@@ -126,10 +128,14 @@ func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(deployment *ext
} }
// rsAndPodsWithHashKeySynced returns the RSes and pods the given deployment targets, with pod-template-hash information synced. // rsAndPodsWithHashKeySynced returns the RSes and pods the given deployment targets, with pod-template-hash information synced.
func (dc *DeploymentController) rsAndPodsWithHashKeySynced(deployment *extensions.Deployment) ([]*extensions.ReplicaSet, *api.PodList, error) { func (dc *DeploymentController) rsAndPodsWithHashKeySynced(deployment *extensions.Deployment) ([]*extensions.ReplicaSet, *v1.PodList, error) {
rsList, err := deploymentutil.ListReplicaSets(deployment, rsList, err := deploymentutil.ListReplicaSets(deployment,
func(namespace string, options api.ListOptions) ([]*extensions.ReplicaSet, error) { func(namespace string, options v1.ListOptions) ([]*extensions.ReplicaSet, error) {
return dc.rsLister.ReplicaSets(namespace).List(options.LabelSelector) parsed, err := labels.Parse(options.LabelSelector)
if err != nil {
return nil, err
}
return dc.rsLister.ReplicaSets(namespace).List(parsed)
}) })
if err != nil { if err != nil {
return nil, nil, fmt.Errorf("error listing ReplicaSets: %v", err) return nil, nil, fmt.Errorf("error listing ReplicaSets: %v", err)
@@ -201,12 +207,16 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet)
if err != nil { if err != nil {
return nil, fmt.Errorf("error in converting selector to label selector for replica set %s: %s", updatedRS.Name, err) return nil, fmt.Errorf("error in converting selector to label selector for replica set %s: %s", updatedRS.Name, err)
} }
options := api.ListOptions{LabelSelector: selector} options := v1.ListOptions{LabelSelector: selector.String()}
pods, err := dc.podLister.Pods(namespace).List(options.LabelSelector) parsed, err := labels.Parse(options.LabelSelector)
if err != nil {
return nil, err
}
pods, err := dc.podLister.Pods(namespace).List(parsed)
if err != nil { if err != nil {
return nil, fmt.Errorf("error in getting pod list for namespace %s and list options %+v: %s", namespace, options, err) return nil, fmt.Errorf("error in getting pod list for namespace %s and list options %+v: %s", namespace, options, err)
} }
podList := api.PodList{Items: make([]api.Pod, 0, len(pods))} podList := v1.PodList{Items: make([]v1.Pod, 0, len(pods))}
for i := range pods { for i := range pods {
podList.Items = append(podList.Items, *pods[i]) podList.Items = append(podList.Items, *pods[i])
} }
@@ -253,11 +263,15 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet)
return updatedRS, nil return updatedRS, nil
} }
func (dc *DeploymentController) listPods(deployment *extensions.Deployment) (*api.PodList, error) { func (dc *DeploymentController) listPods(deployment *extensions.Deployment) (*v1.PodList, error) {
return deploymentutil.ListPods(deployment, return deploymentutil.ListPods(deployment,
func(namespace string, options api.ListOptions) (*api.PodList, error) { func(namespace string, options v1.ListOptions) (*v1.PodList, error) {
pods, err := dc.podLister.Pods(namespace).List(options.LabelSelector) parsed, err := labels.Parse(options.LabelSelector)
result := api.PodList{Items: make([]api.Pod, 0, len(pods))} if err != nil {
return nil, err
}
pods, err := dc.podLister.Pods(namespace).List(parsed)
result := v1.PodList{Items: make([]v1.Pod, 0, len(pods))}
for i := range pods { for i := range pods {
result.Items = append(result.Items, *pods[i]) result.Items = append(result.Items, *pods[i])
} }
@@ -307,7 +321,7 @@ func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployme
cond := deploymentutil.GetDeploymentCondition(deployment.Status, extensions.DeploymentProgressing) cond := deploymentutil.GetDeploymentCondition(deployment.Status, extensions.DeploymentProgressing)
if deployment.Spec.ProgressDeadlineSeconds != nil && cond == nil { if deployment.Spec.ProgressDeadlineSeconds != nil && cond == nil {
msg := fmt.Sprintf("Found new replica set %q", rsCopy.Name) msg := fmt.Sprintf("Found new replica set %q", rsCopy.Name)
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionTrue, deploymentutil.FoundNewRSReason, msg) condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.FoundNewRSReason, msg)
deploymentutil.SetDeploymentCondition(&deployment.Status, *condition) deploymentutil.SetDeploymentCondition(&deployment.Status, *condition)
updateConditions = true updateConditions = true
} }
@@ -333,13 +347,13 @@ func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployme
// Create new ReplicaSet // Create new ReplicaSet
newRS := extensions.ReplicaSet{ newRS := extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
// Make the name deterministic, to ensure idempotence // Make the name deterministic, to ensure idempotence
Name: deployment.Name + "-" + fmt.Sprintf("%d", podTemplateSpecHash), Name: deployment.Name + "-" + fmt.Sprintf("%d", podTemplateSpecHash),
Namespace: namespace, Namespace: namespace,
}, },
Spec: extensions.ReplicaSetSpec{ Spec: extensions.ReplicaSetSpec{
Replicas: 0, Replicas: func(i int32) *int32 { return &i }(0),
MinReadySeconds: deployment.Spec.MinReadySeconds, MinReadySeconds: deployment.Spec.MinReadySeconds,
Selector: newRSSelector, Selector: newRSSelector,
Template: newRSTemplate, Template: newRSTemplate,
@@ -351,7 +365,7 @@ func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployme
return nil, err return nil, err
} }
newRS.Spec.Replicas = newReplicasCount *(newRS.Spec.Replicas) = newReplicasCount
// Set new replica set's annotation // Set new replica set's annotation
deploymentutil.SetNewReplicaSetAnnotations(deployment, &newRS, newRevision, false) deploymentutil.SetNewReplicaSetAnnotations(deployment, &newRS, newRevision, false)
createdRS, err := dc.client.Extensions().ReplicaSets(namespace).Create(&newRS) createdRS, err := dc.client.Extensions().ReplicaSets(namespace).Create(&newRS)
@@ -365,7 +379,7 @@ func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployme
case err != nil: case err != nil:
msg := fmt.Sprintf("Failed to create new replica set %q: %v", newRS.Name, err) msg := fmt.Sprintf("Failed to create new replica set %q: %v", newRS.Name, err)
if deployment.Spec.ProgressDeadlineSeconds != nil { if deployment.Spec.ProgressDeadlineSeconds != nil {
cond := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionFalse, deploymentutil.FailedRSCreateReason, msg) cond := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, deploymentutil.FailedRSCreateReason, msg)
deploymentutil.SetDeploymentCondition(&deployment.Status, *cond) deploymentutil.SetDeploymentCondition(&deployment.Status, *cond)
// We don't really care about this error at this point, since we have a bigger issue to report. // We don't really care about this error at this point, since we have a bigger issue to report.
// TODO: Update the rest of the Deployment status, too. We may need to do this every time we // TODO: Update the rest of the Deployment status, too. We may need to do this every time we
@@ -375,17 +389,17 @@ func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployme
// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568 // these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
_, _ = dc.client.Extensions().Deployments(deployment.ObjectMeta.Namespace).UpdateStatus(deployment) _, _ = dc.client.Extensions().Deployments(deployment.ObjectMeta.Namespace).UpdateStatus(deployment)
} }
dc.eventRecorder.Eventf(deployment, api.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg) dc.eventRecorder.Eventf(deployment, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
return nil, err return nil, err
} }
if newReplicasCount > 0 { if newReplicasCount > 0 {
dc.eventRecorder.Eventf(deployment, api.EventTypeNormal, "ScalingReplicaSet", "Scaled up replica set %s to %d", createdRS.Name, newReplicasCount) dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled up replica set %s to %d", createdRS.Name, newReplicasCount)
} }
deploymentutil.SetDeploymentRevision(deployment, newRevision) deploymentutil.SetDeploymentRevision(deployment, newRevision)
if deployment.Spec.ProgressDeadlineSeconds != nil { if deployment.Spec.ProgressDeadlineSeconds != nil {
msg := fmt.Sprintf("Created new replica set %q", createdRS.Name) msg := fmt.Sprintf("Created new replica set %q", createdRS.Name)
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionTrue, deploymentutil.NewReplicaSetReason, msg) condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.NewReplicaSetReason, msg)
deploymentutil.SetDeploymentCondition(&deployment.Status, *condition) deploymentutil.SetDeploymentCondition(&deployment.Status, *condition)
} }
_, err = dc.client.Extensions().Deployments(deployment.Namespace).UpdateStatus(deployment) _, err = dc.client.Extensions().Deployments(deployment.Namespace).UpdateStatus(deployment)
@@ -401,10 +415,10 @@ func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *
// If there is only one active replica set then we should scale that up to the full count of the // If there is only one active replica set then we should scale that up to the full count of the
// deployment. If there is no active replica set, then we should scale up the newest replica set. // deployment. If there is no active replica set, then we should scale up the newest replica set.
if activeOrLatest := deploymentutil.FindActiveOrLatest(newRS, oldRSs); activeOrLatest != nil { if activeOrLatest := deploymentutil.FindActiveOrLatest(newRS, oldRSs); activeOrLatest != nil {
if activeOrLatest.Spec.Replicas == deployment.Spec.Replicas { if *(activeOrLatest.Spec.Replicas) == *(deployment.Spec.Replicas) {
return nil return nil
} }
_, _, err := dc.scaleReplicaSetAndRecordEvent(activeOrLatest, deployment.Spec.Replicas, deployment) _, _, err := dc.scaleReplicaSetAndRecordEvent(activeOrLatest, *(deployment.Spec.Replicas), deployment)
return err return err
} }
@@ -427,8 +441,8 @@ func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *
allRSsReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs) allRSsReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
allowedSize := int32(0) allowedSize := int32(0)
if deployment.Spec.Replicas > 0 { if *(deployment.Spec.Replicas) > 0 {
allowedSize = deployment.Spec.Replicas + deploymentutil.MaxSurge(*deployment) allowedSize = *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
} }
// Number of additional replicas that can be either added or removed from the total // Number of additional replicas that can be either added or removed from the total
@@ -465,10 +479,10 @@ func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *
if deploymentReplicasToAdd != 0 { if deploymentReplicasToAdd != 0 {
proportion := deploymentutil.GetProportion(rs, *deployment, deploymentReplicasToAdd, deploymentReplicasAdded) proportion := deploymentutil.GetProportion(rs, *deployment, deploymentReplicasToAdd, deploymentReplicasAdded)
nameToSize[rs.Name] = rs.Spec.Replicas + proportion nameToSize[rs.Name] = *(rs.Spec.Replicas) + proportion
deploymentReplicasAdded += proportion deploymentReplicasAdded += proportion
} else { } else {
nameToSize[rs.Name] = rs.Spec.Replicas nameToSize[rs.Name] = *(rs.Spec.Replicas)
} }
} }
@@ -497,11 +511,11 @@ func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *
func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment) (bool, *extensions.ReplicaSet, error) { func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment) (bool, *extensions.ReplicaSet, error) {
// No need to scale // No need to scale
if rs.Spec.Replicas == newScale { if *(rs.Spec.Replicas) == newScale {
return false, rs, nil return false, rs, nil
} }
var scalingOperation string var scalingOperation string
if rs.Spec.Replicas < newScale { if *(rs.Spec.Replicas) < newScale {
scalingOperation = "up" scalingOperation = "up"
} else { } else {
scalingOperation = "down" scalingOperation = "down"
@@ -517,14 +531,14 @@ func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newSc
} }
rsCopy := objCopy.(*extensions.ReplicaSet) rsCopy := objCopy.(*extensions.ReplicaSet)
sizeNeedsUpdate := rsCopy.Spec.Replicas != newScale sizeNeedsUpdate := *(rsCopy.Spec.Replicas) != newScale
annotationsNeedUpdate := deploymentutil.SetReplicasAnnotations(rsCopy, deployment.Spec.Replicas, deployment.Spec.Replicas+deploymentutil.MaxSurge(*deployment)) annotationsNeedUpdate := deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
if sizeNeedsUpdate || annotationsNeedUpdate { if sizeNeedsUpdate || annotationsNeedUpdate {
rsCopy.Spec.Replicas = newScale *(rsCopy.Spec.Replicas) = newScale
rs, err = dc.client.Extensions().ReplicaSets(rsCopy.Namespace).Update(rsCopy) rs, err = dc.client.Extensions().ReplicaSets(rsCopy.Namespace).Update(rsCopy)
if err == nil && sizeNeedsUpdate { if err == nil && sizeNeedsUpdate {
dc.eventRecorder.Eventf(deployment, api.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale) dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
} }
} }
return rs, err return rs, err
@@ -549,7 +563,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSe
for i := int32(0); i < diff; i++ { for i := int32(0); i < diff; i++ {
rs := oldRSs[i] rs := oldRSs[i]
// Avoid delete replica set with non-zero replica counts // Avoid delete replica set with non-zero replica counts
if rs.Status.Replicas != 0 || rs.Spec.Replicas != 0 || rs.Generation > rs.Status.ObservedGeneration { if rs.Status.Replicas != 0 || *(rs.Spec.Replicas) != 0 || rs.Generation > rs.Status.ObservedGeneration {
continue continue
} }
if err := dc.client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) { if err := dc.client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
@@ -579,11 +593,11 @@ func (dc *DeploymentController) calculateStatus(allRSs []*extensions.ReplicaSet,
availableReplicas := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs) availableReplicas := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs)
totalReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs) totalReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
if availableReplicas >= deployment.Spec.Replicas-deploymentutil.MaxUnavailable(*deployment) { if availableReplicas >= *(deployment.Spec.Replicas)-deploymentutil.MaxUnavailable(*deployment) {
minAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, api.ConditionTrue, deploymentutil.MinimumReplicasAvailable, "Deployment has minimum availability.") minAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionTrue, deploymentutil.MinimumReplicasAvailable, "Deployment has minimum availability.")
deploymentutil.SetDeploymentCondition(&deployment.Status, *minAvailability) deploymentutil.SetDeploymentCondition(&deployment.Status, *minAvailability)
} else { } else {
noMinAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, api.ConditionFalse, deploymentutil.MinimumReplicasUnavailable, "Deployment does not have minimum availability.") noMinAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionFalse, deploymentutil.MinimumReplicasUnavailable, "Deployment does not have minimum availability.")
deploymentutil.SetDeploymentCondition(&deployment.Status, *noMinAvailability) deploymentutil.SetDeploymentCondition(&deployment.Status, *noMinAvailability)
} }
@@ -611,7 +625,7 @@ func (dc *DeploymentController) isScalingEvent(d *extensions.Deployment) (bool,
if !ok { if !ok {
continue continue
} }
if desired != d.Spec.Replicas { if desired != *(d.Spec.Replicas) {
return true, nil return true, nil
} }
} }

View File

@@ -21,8 +21,8 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
testclient "k8s.io/kubernetes/pkg/client/testing/core" testclient "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
@@ -261,7 +261,7 @@ func TestScale(t *testing.T) {
} }
if test.newRS != nil { if test.newRS != nil {
desiredReplicas := test.oldDeployment.Spec.Replicas desiredReplicas := *(test.oldDeployment.Spec.Replicas)
if desired, ok := test.desiredReplicasAnnotations[test.newRS.Name]; ok { if desired, ok := test.desiredReplicasAnnotations[test.newRS.Name]; ok {
desiredReplicas = desired desiredReplicas = desired
} }
@@ -272,7 +272,7 @@ func TestScale(t *testing.T) {
if rs == nil { if rs == nil {
continue continue
} }
desiredReplicas := test.oldDeployment.Spec.Replicas desiredReplicas := *(test.oldDeployment.Spec.Replicas)
if desired, ok := test.desiredReplicasAnnotations[rs.Name]; ok { if desired, ok := test.desiredReplicasAnnotations[rs.Name]; ok {
desiredReplicas = desired desiredReplicas = desired
} }
@@ -289,22 +289,22 @@ func TestScale(t *testing.T) {
// no update action for it. // no update action for it.
nameToSize := make(map[string]int32) nameToSize := make(map[string]int32)
if test.newRS != nil { if test.newRS != nil {
nameToSize[test.newRS.Name] = test.newRS.Spec.Replicas nameToSize[test.newRS.Name] = *(test.newRS.Spec.Replicas)
} }
for i := range test.oldRSs { for i := range test.oldRSs {
rs := test.oldRSs[i] rs := test.oldRSs[i]
nameToSize[rs.Name] = rs.Spec.Replicas nameToSize[rs.Name] = *(rs.Spec.Replicas)
} }
// Get all the UPDATE actions and update nameToSize with all the updated sizes. // Get all the UPDATE actions and update nameToSize with all the updated sizes.
for _, action := range fake.Actions() { for _, action := range fake.Actions() {
rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet) rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet)
if !test.wasntUpdated[rs.Name] { if !test.wasntUpdated[rs.Name] {
nameToSize[rs.Name] = rs.Spec.Replicas nameToSize[rs.Name] = *(rs.Spec.Replicas)
} }
} }
if test.expectedNew != nil && test.newRS != nil && test.expectedNew.Spec.Replicas != nameToSize[test.newRS.Name] { if test.expectedNew != nil && test.newRS != nil && *(test.expectedNew.Spec.Replicas) != nameToSize[test.newRS.Name] {
t.Errorf("%s: expected new replicas: %d, got: %d", test.name, test.expectedNew.Spec.Replicas, nameToSize[test.newRS.Name]) t.Errorf("%s: expected new replicas: %d, got: %d", test.name, *(test.expectedNew.Spec.Replicas), nameToSize[test.newRS.Name])
continue continue
} }
if len(test.expectedOld) != len(test.oldRSs) { if len(test.expectedOld) != len(test.oldRSs) {
@@ -314,8 +314,8 @@ func TestScale(t *testing.T) {
for n := range test.oldRSs { for n := range test.oldRSs {
rs := test.oldRSs[n] rs := test.oldRSs[n]
expected := test.expectedOld[n] expected := test.expectedOld[n]
if expected.Spec.Replicas != nameToSize[rs.Name] { if *(expected.Spec.Replicas) != nameToSize[rs.Name] {
t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, expected.Spec.Replicas, nameToSize[rs.Name]) t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, *(expected.Spec.Replicas), nameToSize[rs.Name])
} }
} }
} }
@@ -371,7 +371,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
for i := range tests { for i := range tests {
test := tests[i] test := tests[i]
fake := &fake.Clientset{} fake := &fake.Clientset{}
informers := informers.NewSharedInformerFactory(fake, controller.NoResyncPeriodFunc()) informers := informers.NewSharedInformerFactory(fake, nil, controller.NoResyncPeriodFunc())
controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake) controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake)
controller.eventRecorder = &record.FakeRecorder{} controller.eventRecorder = &record.FakeRecorder{}

View File

@@ -29,8 +29,10 @@ import (
"k8s.io/kubernetes/pkg/api/annotations" "k8s.io/kubernetes/pkg/api/annotations"
"k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" internalextensions "k8s.io/kubernetes/pkg/apis/extensions"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@@ -108,7 +110,7 @@ const (
) )
// NewDeploymentCondition creates a new deployment condition. // NewDeploymentCondition creates a new deployment condition.
func NewDeploymentCondition(condType extensions.DeploymentConditionType, status api.ConditionStatus, reason, message string) *extensions.DeploymentCondition { func NewDeploymentCondition(condType extensions.DeploymentConditionType, status v1.ConditionStatus, reason, message string) *extensions.DeploymentCondition {
return &extensions.DeploymentCondition{ return &extensions.DeploymentCondition{
Type: condType, Type: condType,
Status: status, Status: status,
@@ -266,7 +268,7 @@ func SetNewReplicaSetAnnotations(deployment *extensions.Deployment, newRS *exten
} }
} }
// If the new replica set is about to be created, we need to add replica annotations to it. // If the new replica set is about to be created, we need to add replica annotations to it.
if !exists && SetReplicasAnnotations(newRS, deployment.Spec.Replicas, deployment.Spec.Replicas+MaxSurge(*deployment)) { if !exists && SetReplicasAnnotations(newRS, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+MaxSurge(*deployment)) {
annotationChanged = true annotationChanged = true
} }
return annotationChanged return annotationChanged
@@ -404,7 +406,7 @@ func MaxUnavailable(deployment extensions.Deployment) int32 {
return int32(0) return int32(0)
} }
// Error caught by validation // Error caught by validation
_, maxUnavailable, _ := ResolveFenceposts(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, &deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas) _, maxUnavailable, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas))
return maxUnavailable return maxUnavailable
} }
@@ -413,7 +415,7 @@ func MinAvailable(deployment *extensions.Deployment) int32 {
if !IsRollingUpdate(deployment) { if !IsRollingUpdate(deployment) {
return int32(0) return int32(0)
} }
return deployment.Spec.Replicas - MaxUnavailable(*deployment) return *(deployment.Spec.Replicas) - MaxUnavailable(*deployment)
} }
// MaxSurge returns the maximum surge pods a rolling deployment can take. // MaxSurge returns the maximum surge pods a rolling deployment can take.
@@ -422,7 +424,7 @@ func MaxSurge(deployment extensions.Deployment) int32 {
return int32(0) return int32(0)
} }
// Error caught by validation // Error caught by validation
maxSurge, _, _ := ResolveFenceposts(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, &deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas) maxSurge, _, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas))
return maxSurge return maxSurge
} }
@@ -430,7 +432,7 @@ func MaxSurge(deployment extensions.Deployment) int32 {
// of the parent deployment, 2. the replica count that needs be added on the replica sets of the // of the parent deployment, 2. the replica count that needs be added on the replica sets of the
// deployment, and 3. the total replicas added in the replica sets of the deployment so far. // deployment, and 3. the total replicas added in the replica sets of the deployment so far.
func GetProportion(rs *extensions.ReplicaSet, d extensions.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 { func GetProportion(rs *extensions.ReplicaSet, d extensions.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 {
if rs == nil || rs.Spec.Replicas == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded { if rs == nil || *(rs.Spec.Replicas) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded {
return int32(0) return int32(0)
} }
@@ -453,11 +455,11 @@ func GetProportion(rs *extensions.ReplicaSet, d extensions.Deployment, deploymen
// 1. a scaling event during a rollout or 2. when scaling a paused deployment. // 1. a scaling event during a rollout or 2. when scaling a paused deployment.
func getReplicaSetFraction(rs extensions.ReplicaSet, d extensions.Deployment) int32 { func getReplicaSetFraction(rs extensions.ReplicaSet, d extensions.Deployment) int32 {
// If we are scaling down to zero then the fraction of this replica set is its whole size (negative) // If we are scaling down to zero then the fraction of this replica set is its whole size (negative)
if d.Spec.Replicas == int32(0) { if *(d.Spec.Replicas) == int32(0) {
return -rs.Spec.Replicas return -*(rs.Spec.Replicas)
} }
deploymentReplicas := d.Spec.Replicas + MaxSurge(d) deploymentReplicas := *(d.Spec.Replicas) + MaxSurge(d)
annotatedReplicas, ok := getMaxReplicasAnnotation(&rs) annotatedReplicas, ok := getMaxReplicasAnnotation(&rs)
if !ok { if !ok {
// If we cannot find the annotation then fallback to the current deployment size. Note that this // If we cannot find the annotation then fallback to the current deployment size. Note that this
@@ -469,8 +471,8 @@ func getReplicaSetFraction(rs extensions.ReplicaSet, d extensions.Deployment) in
// We should never proportionally scale up from zero which means rs.spec.replicas and annotatedReplicas // We should never proportionally scale up from zero which means rs.spec.replicas and annotatedReplicas
// will never be zero here. // will never be zero here.
newRSsize := (float64(rs.Spec.Replicas * deploymentReplicas)) / float64(annotatedReplicas) newRSsize := (float64(*(rs.Spec.Replicas) * deploymentReplicas)) / float64(annotatedReplicas)
return integer.RoundToInt32(newRSsize) - rs.Spec.Replicas return integer.RoundToInt32(newRSsize) - *(rs.Spec.Replicas)
} }
// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and ReplicaSetList from client interface. // GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and ReplicaSetList from client interface.
@@ -523,7 +525,7 @@ func GetNewReplicaSet(deployment *extensions.Deployment, c clientset.Interface)
// listReplicaSets lists all RSes the given deployment targets with the given client interface. // listReplicaSets lists all RSes the given deployment targets with the given client interface.
func listReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, error) { func listReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, error) {
return ListReplicaSets(deployment, return ListReplicaSets(deployment,
func(namespace string, options api.ListOptions) ([]*extensions.ReplicaSet, error) { func(namespace string, options v1.ListOptions) ([]*extensions.ReplicaSet, error) {
rsList, err := c.Extensions().ReplicaSets(namespace).List(options) rsList, err := c.Extensions().ReplicaSets(namespace).List(options)
if err != nil { if err != nil {
return nil, err return nil, err
@@ -537,16 +539,16 @@ func listReplicaSets(deployment *extensions.Deployment, c clientset.Interface) (
} }
// listReplicaSets lists all Pods the given deployment targets with the given client interface. // listReplicaSets lists all Pods the given deployment targets with the given client interface.
func listPods(deployment *extensions.Deployment, c clientset.Interface) (*api.PodList, error) { func listPods(deployment *extensions.Deployment, c clientset.Interface) (*v1.PodList, error) {
return ListPods(deployment, return ListPods(deployment,
func(namespace string, options api.ListOptions) (*api.PodList, error) { func(namespace string, options v1.ListOptions) (*v1.PodList, error) {
return c.Core().Pods(namespace).List(options) return c.Core().Pods(namespace).List(options)
}) })
} }
// TODO: switch this to full namespacers // TODO: switch this to full namespacers
type rsListFunc func(string, api.ListOptions) ([]*extensions.ReplicaSet, error) type rsListFunc func(string, v1.ListOptions) ([]*extensions.ReplicaSet, error)
type podListFunc func(string, api.ListOptions) (*api.PodList, error) type podListFunc func(string, v1.ListOptions) (*v1.PodList, error)
// ListReplicaSets returns a slice of RSes the given deployment targets. // ListReplicaSets returns a slice of RSes the given deployment targets.
func ListReplicaSets(deployment *extensions.Deployment, getRSList rsListFunc) ([]*extensions.ReplicaSet, error) { func ListReplicaSets(deployment *extensions.Deployment, getRSList rsListFunc) ([]*extensions.ReplicaSet, error) {
@@ -558,18 +560,18 @@ func ListReplicaSets(deployment *extensions.Deployment, getRSList rsListFunc) ([
if err != nil { if err != nil {
return nil, err return nil, err
} }
options := api.ListOptions{LabelSelector: selector} options := v1.ListOptions{LabelSelector: selector.String()}
return getRSList(namespace, options) return getRSList(namespace, options)
} }
// ListPods returns a list of pods the given deployment targets. // ListPods returns a list of pods the given deployment targets.
func ListPods(deployment *extensions.Deployment, getPodList podListFunc) (*api.PodList, error) { func ListPods(deployment *extensions.Deployment, getPodList podListFunc) (*v1.PodList, error) {
namespace := deployment.Namespace namespace := deployment.Namespace
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector) selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
if err != nil { if err != nil {
return nil, err return nil, err
} }
options := api.ListOptions{LabelSelector: selector} options := v1.ListOptions{LabelSelector: selector.String()}
return getPodList(namespace, options) return getPodList(namespace, options)
} }
@@ -577,7 +579,7 @@ func ListPods(deployment *extensions.Deployment, getPodList podListFunc) (*api.P
// We ignore pod-template-hash because the hash result would be different upon podTemplateSpec API changes // We ignore pod-template-hash because the hash result would be different upon podTemplateSpec API changes
// (e.g. the addition of a new field will cause the hash code to change) // (e.g. the addition of a new field will cause the hash code to change)
// Note that we assume input podTemplateSpecs contain non-empty labels // Note that we assume input podTemplateSpecs contain non-empty labels
func equalIgnoreHash(template1, template2 api.PodTemplateSpec) (bool, error) { func equalIgnoreHash(template1, template2 v1.PodTemplateSpec) (bool, error) {
// First, compare template.Labels (ignoring hash) // First, compare template.Labels (ignoring hash)
labels1, labels2 := template1.Labels, template2.Labels labels1, labels2 := template1.Labels, template2.Labels
// The podTemplateSpec must have a non-empty label so that label selectors can find them. // The podTemplateSpec must have a non-empty label so that label selectors can find them.
@@ -597,7 +599,7 @@ func equalIgnoreHash(template1, template2 api.PodTemplateSpec) (bool, error) {
// Then, compare the templates without comparing their labels // Then, compare the templates without comparing their labels
template1.Labels, template2.Labels = nil, nil template1.Labels, template2.Labels = nil, nil
result := api.Semantic.DeepEqual(template1, template2) result := v1.Semantic.DeepEqual(template1, template2)
return result, nil return result, nil
} }
@@ -620,7 +622,7 @@ func FindNewReplicaSet(deployment *extensions.Deployment, rsList []*extensions.R
// FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given PodList and slice of RSes. // FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given PodList and slice of RSes.
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets. // Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, podList *api.PodList) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) { func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, podList *v1.PodList) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) {
// Find all pods whose labels match deployment.Spec.Selector, and corresponding replica sets for pods in podList. // Find all pods whose labels match deployment.Spec.Selector, and corresponding replica sets for pods in podList.
// All pods and replica sets are labeled with pod-template-hash to prevent overlapping // All pods and replica sets are labeled with pod-template-hash to prevent overlapping
oldRSs := map[string]*extensions.ReplicaSet{} oldRSs := map[string]*extensions.ReplicaSet{}
@@ -679,19 +681,19 @@ func WaitForPodsHashPopulated(c clientset.Interface, desiredGeneration int64, na
return false, err return false, err
} }
return rs.Status.ObservedGeneration >= desiredGeneration && return rs.Status.ObservedGeneration >= desiredGeneration &&
rs.Status.FullyLabeledReplicas == rs.Spec.Replicas, nil rs.Status.FullyLabeledReplicas == *(rs.Spec.Replicas), nil
}) })
} }
// LabelPodsWithHash labels all pods in the given podList with the new hash label. // LabelPodsWithHash labels all pods in the given podList with the new hash label.
// The returned bool value can be used to tell if all pods are actually labeled. // The returned bool value can be used to tell if all pods are actually labeled.
func LabelPodsWithHash(podList *api.PodList, rs *extensions.ReplicaSet, c clientset.Interface, namespace, hash string) (bool, error) { func LabelPodsWithHash(podList *v1.PodList, rs *extensions.ReplicaSet, c clientset.Interface, namespace, hash string) (bool, error) {
allPodsLabeled := true allPodsLabeled := true
for _, pod := range podList.Items { for _, pod := range podList.Items {
// Only label the pod that doesn't already have the new hash // Only label the pod that doesn't already have the new hash
if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash { if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash {
if _, podUpdated, err := podutil.UpdatePodWithRetries(c.Core().Pods(namespace), &pod, if _, podUpdated, err := podutil.UpdatePodWithRetries(c.Core().Pods(namespace), &pod,
func(podToUpdate *api.Pod) error { func(podToUpdate *v1.Pod) error {
// Precondition: the pod doesn't contain the new hash in its label. // Precondition: the pod doesn't contain the new hash in its label.
if podToUpdate.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash { if podToUpdate.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
return errors.ErrPreconditionViolated return errors.ErrPreconditionViolated
@@ -713,9 +715,9 @@ func LabelPodsWithHash(podList *api.PodList, rs *extensions.ReplicaSet, c client
} }
// GetNewReplicaSetTemplate returns the desired PodTemplateSpec for the new ReplicaSet corresponding to the given ReplicaSet. // GetNewReplicaSetTemplate returns the desired PodTemplateSpec for the new ReplicaSet corresponding to the given ReplicaSet.
func GetNewReplicaSetTemplate(deployment *extensions.Deployment) api.PodTemplateSpec { func GetNewReplicaSetTemplate(deployment *extensions.Deployment) v1.PodTemplateSpec {
// newRS will have the same template as in deployment spec, plus a unique label in some cases. // newRS will have the same template as in deployment spec, plus a unique label in some cases.
newRSTemplate := api.PodTemplateSpec{ newRSTemplate := v1.PodTemplateSpec{
ObjectMeta: deployment.Spec.Template.ObjectMeta, ObjectMeta: deployment.Spec.Template.ObjectMeta,
Spec: deployment.Spec.Template.Spec, Spec: deployment.Spec.Template.Spec,
} }
@@ -726,8 +728,23 @@ func GetNewReplicaSetTemplate(deployment *extensions.Deployment) api.PodTemplate
return newRSTemplate return newRSTemplate
} }
// TODO: remove the duplicate
// GetNewInternalReplicaSetTemplate returns the desired PodTemplateSpec for the new ReplicaSet corresponding to the given ReplicaSet.
func GetNewInternalReplicaSetTemplate(deployment *internalextensions.Deployment) api.PodTemplateSpec {
// newRS will have the same template as in deployment spec, plus a unique label in some cases.
newRSTemplate := api.PodTemplateSpec{
ObjectMeta: deployment.Spec.Template.ObjectMeta,
Spec: deployment.Spec.Template.Spec,
}
newRSTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel(
deployment.Spec.Template.ObjectMeta.Labels,
internalextensions.DefaultDeploymentUniqueLabelKey,
podutil.GetInternalPodTemplateSpecHash(newRSTemplate))
return newRSTemplate
}
// SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment. // SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment.
func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template api.PodTemplateSpec) *extensions.Deployment { func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template v1.PodTemplateSpec) *extensions.Deployment {
deployment.Spec.Template.ObjectMeta = template.ObjectMeta deployment.Spec.Template.ObjectMeta = template.ObjectMeta
deployment.Spec.Template.Spec = template.Spec deployment.Spec.Template.Spec = template.Spec
deployment.Spec.Template.ObjectMeta.Labels = labelsutil.CloneAndRemoveLabel( deployment.Spec.Template.ObjectMeta.Labels = labelsutil.CloneAndRemoveLabel(
@@ -741,7 +758,7 @@ func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 {
totalReplicas := int32(0) totalReplicas := int32(0)
for _, rs := range replicaSets { for _, rs := range replicaSets {
if rs != nil { if rs != nil {
totalReplicas += rs.Spec.Replicas totalReplicas += *(rs.Spec.Replicas)
} }
} }
return totalReplicas return totalReplicas
@@ -772,7 +789,7 @@ func GetAvailableReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet
// IsPodAvailable return true if the pod is available. // IsPodAvailable return true if the pod is available.
// TODO: Remove this once we start using replica set status for calculating available pods // TODO: Remove this once we start using replica set status for calculating available pods
// for a deployment. // for a deployment.
func IsPodAvailable(pod *api.Pod, minReadySeconds int32, now time.Time) bool { func IsPodAvailable(pod *v1.Pod, minReadySeconds int32, now time.Time) bool {
if !controller.IsPodActive(pod) { if !controller.IsPodActive(pod) {
return false return false
} }
@@ -780,7 +797,7 @@ func IsPodAvailable(pod *api.Pod, minReadySeconds int32, now time.Time) bool {
// If so, this pod is ready // If so, this pod is ready
for _, c := range pod.Status.Conditions { for _, c := range pod.Status.Conditions {
// we only care about pod ready conditions // we only care about pod ready conditions
if c.Type == api.PodReady && c.Status == api.ConditionTrue { if c.Type == v1.PodReady && c.Status == v1.ConditionTrue {
glog.V(4).Infof("Comparing pod %s/%s ready condition last transition time %s + minReadySeconds %d with now %s.", pod.Namespace, pod.Name, c.LastTransitionTime.String(), minReadySeconds, now.String()) glog.V(4).Infof("Comparing pod %s/%s ready condition last transition time %s + minReadySeconds %d with now %s.", pod.Namespace, pod.Name, c.LastTransitionTime.String(), minReadySeconds, now.String())
// 2 cases that this ready condition is valid (passed minReadySeconds, i.e. the pod is available): // 2 cases that this ready condition is valid (passed minReadySeconds, i.e. the pod is available):
// 1. minReadySeconds == 0, or // 1. minReadySeconds == 0, or
@@ -802,8 +819,8 @@ func IsRollingUpdate(deployment *extensions.Deployment) bool {
// DeploymentComplete considers a deployment to be complete once its desired replicas equals its // DeploymentComplete considers a deployment to be complete once its desired replicas equals its
// updatedReplicas and it doesn't violate minimum availability. // updatedReplicas and it doesn't violate minimum availability.
func DeploymentComplete(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool { func DeploymentComplete(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool {
return newStatus.UpdatedReplicas == deployment.Spec.Replicas && return newStatus.UpdatedReplicas == *(deployment.Spec.Replicas) &&
newStatus.AvailableReplicas >= deployment.Spec.Replicas-MaxUnavailable(*deployment) newStatus.AvailableReplicas >= *(deployment.Spec.Replicas)-MaxUnavailable(*deployment)
} }
// DeploymentProgressing reports progress for a deployment. Progress is estimated by comparing the // DeploymentProgressing reports progress for a deployment. Progress is estimated by comparing the
@@ -857,24 +874,24 @@ func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.Re
switch deployment.Spec.Strategy.Type { switch deployment.Spec.Strategy.Type {
case extensions.RollingUpdateDeploymentStrategyType: case extensions.RollingUpdateDeploymentStrategyType:
// Check if we can scale up. // Check if we can scale up.
maxSurge, err := intstrutil.GetValueFromIntOrPercent(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(deployment.Spec.Replicas), true) maxSurge, err := intstrutil.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), true)
if err != nil { if err != nil {
return 0, err return 0, err
} }
// Find the total number of pods // Find the total number of pods
currentPodCount := GetReplicaCountForReplicaSets(allRSs) currentPodCount := GetReplicaCountForReplicaSets(allRSs)
maxTotalPods := deployment.Spec.Replicas + int32(maxSurge) maxTotalPods := *(deployment.Spec.Replicas) + int32(maxSurge)
if currentPodCount >= maxTotalPods { if currentPodCount >= maxTotalPods {
// Cannot scale up. // Cannot scale up.
return newRS.Spec.Replicas, nil return *(newRS.Spec.Replicas), nil
} }
// Scale up. // Scale up.
scaleUpCount := maxTotalPods - currentPodCount scaleUpCount := maxTotalPods - currentPodCount
// Do not exceed the number of desired replicas. // Do not exceed the number of desired replicas.
scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(deployment.Spec.Replicas-newRS.Spec.Replicas))) scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(*(deployment.Spec.Replicas)-*(newRS.Spec.Replicas))))
return newRS.Spec.Replicas + scaleUpCount, nil return *(newRS.Spec.Replicas) + scaleUpCount, nil
case extensions.RecreateDeploymentStrategyType: case extensions.RecreateDeploymentStrategyType:
return deployment.Spec.Replicas, nil return *(deployment.Spec.Replicas), nil
default: default:
return 0, fmt.Errorf("deployment type %v isn't supported", deployment.Spec.Strategy.Type) return 0, fmt.Errorf("deployment type %v isn't supported", deployment.Spec.Strategy.Type)
} }
@@ -892,7 +909,7 @@ func IsSaturated(deployment *extensions.Deployment, rs *extensions.ReplicaSet) b
if err != nil { if err != nil {
return false return false
} }
return rs.Spec.Replicas == deployment.Spec.Replicas && int32(desired) == deployment.Spec.Replicas return *(rs.Spec.Replicas) == *(deployment.Spec.Replicas) && int32(desired) == *(deployment.Spec.Replicas)
} }
// WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration. // WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration.

View File

@@ -26,8 +26,9 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
@@ -74,22 +75,22 @@ func addUpdateRSReactor(fakeClient *fake.Clientset) *fake.Clientset {
func addUpdatePodsReactor(fakeClient *fake.Clientset) *fake.Clientset { func addUpdatePodsReactor(fakeClient *fake.Clientset) *fake.Clientset {
fakeClient.AddReactor("update", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { fakeClient.AddReactor("update", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := action.(core.UpdateAction).GetObject().(*api.Pod) obj := action.(core.UpdateAction).GetObject().(*v1.Pod)
return true, obj, nil return true, obj, nil
}) })
return fakeClient return fakeClient
} }
func newPod(now time.Time, ready bool, beforeSec int) api.Pod { func newPod(now time.Time, ready bool, beforeSec int) v1.Pod {
conditionStatus := api.ConditionFalse conditionStatus := v1.ConditionFalse
if ready { if ready {
conditionStatus = api.ConditionTrue conditionStatus = v1.ConditionTrue
} }
return api.Pod{ return v1.Pod{
Status: api.PodStatus{ Status: v1.PodStatus{
Conditions: []api.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: api.PodReady, Type: v1.PodReady,
LastTransitionTime: unversioned.NewTime(now.Add(-1 * time.Duration(beforeSec) * time.Second)), LastTransitionTime: unversioned.NewTime(now.Add(-1 * time.Duration(beforeSec) * time.Second)),
Status: conditionStatus, Status: conditionStatus,
}, },
@@ -99,27 +100,27 @@ func newPod(now time.Time, ready bool, beforeSec int) api.Pod {
} }
// generatePodFromRS creates a pod, with the input ReplicaSet's selector and its template // generatePodFromRS creates a pod, with the input ReplicaSet's selector and its template
func generatePodFromRS(rs extensions.ReplicaSet) api.Pod { func generatePodFromRS(rs extensions.ReplicaSet) v1.Pod {
return api.Pod{ return v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: rs.Labels, Labels: rs.Labels,
}, },
Spec: rs.Spec.Template.Spec, Spec: rs.Spec.Template.Spec,
} }
} }
func generatePod(labels map[string]string, image string) api.Pod { func generatePod(labels map[string]string, image string) v1.Pod {
return api.Pod{ return v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: labels, Labels: labels,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: image, Name: image,
Image: image, Image: image,
ImagePullPolicy: api.PullAlways, ImagePullPolicy: v1.PullAlways,
TerminationMessagePath: api.TerminationMessagePathDefault, TerminationMessagePath: v1.TerminationMessagePathDefault,
}, },
}, },
}, },
@@ -128,24 +129,24 @@ func generatePod(labels map[string]string, image string) api.Pod {
func generateRSWithLabel(labels map[string]string, image string) extensions.ReplicaSet { func generateRSWithLabel(labels map[string]string, image string) extensions.ReplicaSet {
return extensions.ReplicaSet{ return extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: api.SimpleNameGenerator.GenerateName("replicaset"), Name: v1.SimpleNameGenerator.GenerateName("replicaset"),
Labels: labels, Labels: labels,
}, },
Spec: extensions.ReplicaSetSpec{ Spec: extensions.ReplicaSetSpec{
Replicas: 1, Replicas: func(i int32) *int32 { return &i }(1),
Selector: &unversioned.LabelSelector{MatchLabels: labels}, Selector: &unversioned.LabelSelector{MatchLabels: labels},
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: labels, Labels: labels,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: image, Name: image,
Image: image, Image: image,
ImagePullPolicy: api.PullAlways, ImagePullPolicy: v1.PullAlways,
TerminationMessagePath: api.TerminationMessagePathDefault, TerminationMessagePath: v1.TerminationMessagePathDefault,
}, },
}, },
}, },
@@ -158,11 +159,12 @@ func generateRSWithLabel(labels map[string]string, image string) extensions.Repl
func generateRS(deployment extensions.Deployment) extensions.ReplicaSet { func generateRS(deployment extensions.Deployment) extensions.ReplicaSet {
template := GetNewReplicaSetTemplate(&deployment) template := GetNewReplicaSetTemplate(&deployment)
return extensions.ReplicaSet{ return extensions.ReplicaSet{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: api.SimpleNameGenerator.GenerateName("replicaset"), Name: v1.SimpleNameGenerator.GenerateName("replicaset"),
Labels: template.Labels, Labels: template.Labels,
}, },
Spec: extensions.ReplicaSetSpec{ Spec: extensions.ReplicaSetSpec{
Replicas: func() *int32 { i := int32(0); return &i }(),
Template: template, Template: template,
Selector: &unversioned.LabelSelector{MatchLabels: template.Labels}, Selector: &unversioned.LabelSelector{MatchLabels: template.Labels},
}, },
@@ -174,29 +176,29 @@ func generateDeployment(image string) extensions.Deployment {
podLabels := map[string]string{"name": image} podLabels := map[string]string{"name": image}
terminationSec := int64(30) terminationSec := int64(30)
return extensions.Deployment{ return extensions.Deployment{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: image, Name: image,
}, },
Spec: extensions.DeploymentSpec{ Spec: extensions.DeploymentSpec{
Replicas: 1, Replicas: func(i int32) *int32 { return &i }(1),
Selector: &unversioned.LabelSelector{MatchLabels: podLabels}, Selector: &unversioned.LabelSelector{MatchLabels: podLabels},
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: podLabels, Labels: podLabels,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: image, Name: image,
Image: image, Image: image,
ImagePullPolicy: api.PullAlways, ImagePullPolicy: v1.PullAlways,
TerminationMessagePath: api.TerminationMessagePathDefault, TerminationMessagePath: v1.TerminationMessagePathDefault,
}, },
}, },
DNSPolicy: api.DNSClusterFirst, DNSPolicy: v1.DNSClusterFirst,
TerminationGracePeriodSeconds: &terminationSec, TerminationGracePeriodSeconds: &terminationSec,
RestartPolicy: api.RestartPolicyAlways, RestartPolicy: v1.RestartPolicyAlways,
SecurityContext: &api.PodSecurityContext{}, SecurityContext: &v1.PodSecurityContext{},
}, },
}, },
}, },
@@ -215,7 +217,7 @@ func TestGetNewRC(t *testing.T) {
{ {
"No new ReplicaSet", "No new ReplicaSet",
[]runtime.Object{ []runtime.Object{
&api.PodList{}, &v1.PodList{},
&extensions.ReplicaSetList{ &extensions.ReplicaSetList{
Items: []extensions.ReplicaSet{ Items: []extensions.ReplicaSet{
generateRS(generateDeployment("foo")), generateRS(generateDeployment("foo")),
@@ -228,7 +230,7 @@ func TestGetNewRC(t *testing.T) {
{ {
"Has new ReplicaSet", "Has new ReplicaSet",
[]runtime.Object{ []runtime.Object{
&api.PodList{}, &v1.PodList{},
&extensions.ReplicaSetList{ &extensions.ReplicaSetList{
Items: []extensions.ReplicaSet{ Items: []extensions.ReplicaSet{
generateRS(generateDeployment("foo")), generateRS(generateDeployment("foo")),
@@ -253,7 +255,7 @@ func TestGetNewRC(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("In test case %s, got unexpected error %v", test.test, err) t.Errorf("In test case %s, got unexpected error %v", test.test, err)
} }
if !api.Semantic.DeepEqual(rs, test.expected) { if !v1.Semantic.DeepEqual(rs, test.expected) {
t.Errorf("In test case %s, expected %#v, got %#v", test.test, test.expected, rs) t.Errorf("In test case %s, expected %#v, got %#v", test.test, test.expected, rs)
} }
} }
@@ -262,25 +264,25 @@ func TestGetNewRC(t *testing.T) {
func TestGetOldRCs(t *testing.T) { func TestGetOldRCs(t *testing.T) {
newDeployment := generateDeployment("nginx") newDeployment := generateDeployment("nginx")
newRS := generateRS(newDeployment) newRS := generateRS(newDeployment)
newRS.Status.FullyLabeledReplicas = newRS.Spec.Replicas newRS.Status.FullyLabeledReplicas = *(newRS.Spec.Replicas)
newPod := generatePodFromRS(newRS) newPod := generatePodFromRS(newRS)
// create 2 old deployments and related replica sets/pods, with the same labels but different template // create 2 old deployments and related replica sets/pods, with the same labels but different template
oldDeployment := generateDeployment("nginx") oldDeployment := generateDeployment("nginx")
oldDeployment.Spec.Template.Spec.Containers[0].Name = "nginx-old-1" oldDeployment.Spec.Template.Spec.Containers[0].Name = "nginx-old-1"
oldRS := generateRS(oldDeployment) oldRS := generateRS(oldDeployment)
oldRS.Status.FullyLabeledReplicas = oldRS.Spec.Replicas oldRS.Status.FullyLabeledReplicas = *(oldRS.Spec.Replicas)
oldPod := generatePodFromRS(oldRS) oldPod := generatePodFromRS(oldRS)
oldDeployment2 := generateDeployment("nginx") oldDeployment2 := generateDeployment("nginx")
oldDeployment2.Spec.Template.Spec.Containers[0].Name = "nginx-old-2" oldDeployment2.Spec.Template.Spec.Containers[0].Name = "nginx-old-2"
oldRS2 := generateRS(oldDeployment2) oldRS2 := generateRS(oldDeployment2)
oldRS2.Status.FullyLabeledReplicas = oldRS2.Spec.Replicas oldRS2.Status.FullyLabeledReplicas = *(oldRS2.Spec.Replicas)
oldPod2 := generatePodFromRS(oldRS2) oldPod2 := generatePodFromRS(oldRS2)
// create 1 ReplicaSet that existed before the deployment, with the same labels as the deployment // create 1 ReplicaSet that existed before the deployment, with the same labels as the deployment
existedPod := generatePod(newDeployment.Spec.Template.Labels, "foo") existedPod := generatePod(newDeployment.Spec.Template.Labels, "foo")
existedRS := generateRSWithLabel(newDeployment.Spec.Template.Labels, "foo") existedRS := generateRSWithLabel(newDeployment.Spec.Template.Labels, "foo")
existedRS.Status.FullyLabeledReplicas = existedRS.Spec.Replicas existedRS.Status.FullyLabeledReplicas = *(existedRS.Spec.Replicas)
tests := []struct { tests := []struct {
test string test string
@@ -290,8 +292,8 @@ func TestGetOldRCs(t *testing.T) {
{ {
"No old ReplicaSets", "No old ReplicaSets",
[]runtime.Object{ []runtime.Object{
&api.PodList{ &v1.PodList{
Items: []api.Pod{ Items: []v1.Pod{
generatePod(newDeployment.Spec.Template.Labels, "foo"), generatePod(newDeployment.Spec.Template.Labels, "foo"),
generatePod(newDeployment.Spec.Template.Labels, "bar"), generatePod(newDeployment.Spec.Template.Labels, "bar"),
newPod, newPod,
@@ -310,8 +312,8 @@ func TestGetOldRCs(t *testing.T) {
{ {
"Has old ReplicaSet", "Has old ReplicaSet",
[]runtime.Object{ []runtime.Object{
&api.PodList{ &v1.PodList{
Items: []api.Pod{ Items: []v1.Pod{
oldPod, oldPod,
oldPod2, oldPod2,
generatePod(map[string]string{"name": "bar"}, "bar"), generatePod(map[string]string{"name": "bar"}, "bar"),
@@ -359,14 +361,14 @@ func TestGetOldRCs(t *testing.T) {
} }
} }
func generatePodTemplateSpec(name, nodeName string, annotations, labels map[string]string) api.PodTemplateSpec { func generatePodTemplateSpec(name, nodeName string, annotations, labels map[string]string) v1.PodTemplateSpec {
return api.PodTemplateSpec{ return v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Annotations: annotations, Annotations: annotations,
Labels: labels, Labels: labels,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
NodeName: nodeName, NodeName: nodeName,
}, },
} }
@@ -375,7 +377,7 @@ func generatePodTemplateSpec(name, nodeName string, annotations, labels map[stri
func TestEqualIgnoreHash(t *testing.T) { func TestEqualIgnoreHash(t *testing.T) {
tests := []struct { tests := []struct {
test string test string
former, latter api.PodTemplateSpec former, latter v1.PodTemplateSpec
expected bool expected bool
}{ }{
{ {
@@ -429,7 +431,7 @@ func TestEqualIgnoreHash(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
runTest := func(t1, t2 api.PodTemplateSpec, reversed bool) { runTest := func(t1, t2 v1.PodTemplateSpec, reversed bool) {
// Set up // Set up
t1Copy, err := api.Scheme.DeepCopy(t1) t1Copy, err := api.Scheme.DeepCopy(t1)
if err != nil { if err != nil {
@@ -472,7 +474,7 @@ func TestFindNewReplicaSet(t *testing.T) {
oldDeployment := generateDeployment("nginx") oldDeployment := generateDeployment("nginx")
oldDeployment.Spec.Template.Spec.Containers[0].Name = "nginx-old-1" oldDeployment.Spec.Template.Spec.Containers[0].Name = "nginx-old-1"
oldRS := generateRS(oldDeployment) oldRS := generateRS(oldDeployment)
oldRS.Status.FullyLabeledReplicas = oldRS.Spec.Replicas oldRS.Status.FullyLabeledReplicas = *(oldRS.Spec.Replicas)
tests := []struct { tests := []struct {
test string test string
@@ -508,7 +510,7 @@ func TestFindOldReplicaSets(t *testing.T) {
oldDeployment := generateDeployment("nginx") oldDeployment := generateDeployment("nginx")
oldDeployment.Spec.Template.Spec.Containers[0].Name = "nginx-old-1" oldDeployment.Spec.Template.Spec.Containers[0].Name = "nginx-old-1"
oldRS := generateRS(oldDeployment) oldRS := generateRS(oldDeployment)
oldRS.Status.FullyLabeledReplicas = oldRS.Spec.Replicas oldRS.Status.FullyLabeledReplicas = *(oldRS.Spec.Replicas)
newPod := generatePodFromRS(newRS) newPod := generatePodFromRS(newRS)
oldPod := generatePodFromRS(oldRS) oldPod := generatePodFromRS(oldRS)
@@ -516,15 +518,15 @@ func TestFindOldReplicaSets(t *testing.T) {
test string test string
deployment extensions.Deployment deployment extensions.Deployment
rsList []*extensions.ReplicaSet rsList []*extensions.ReplicaSet
podList *api.PodList podList *v1.PodList
expected []*extensions.ReplicaSet expected []*extensions.ReplicaSet
}{ }{
{ {
test: "Get old ReplicaSets", test: "Get old ReplicaSets",
deployment: deployment, deployment: deployment,
rsList: []*extensions.ReplicaSet{&newRS, &oldRS}, rsList: []*extensions.ReplicaSet{&newRS, &oldRS},
podList: &api.PodList{ podList: &v1.PodList{
Items: []api.Pod{ Items: []v1.Pod{
newPod, newPod,
oldPod, oldPod,
}, },
@@ -535,8 +537,8 @@ func TestFindOldReplicaSets(t *testing.T) {
test: "Get old ReplicaSets with no new ReplicaSet", test: "Get old ReplicaSets with no new ReplicaSet",
deployment: deployment, deployment: deployment,
rsList: []*extensions.ReplicaSet{&oldRS}, rsList: []*extensions.ReplicaSet{&oldRS},
podList: &api.PodList{ podList: &v1.PodList{
Items: []api.Pod{ Items: []v1.Pod{
oldPod, oldPod,
}, },
}, },
@@ -546,8 +548,8 @@ func TestFindOldReplicaSets(t *testing.T) {
test: "Get empty old ReplicaSets", test: "Get empty old ReplicaSets",
deployment: deployment, deployment: deployment,
rsList: []*extensions.ReplicaSet{&newRS}, rsList: []*extensions.ReplicaSet{&newRS},
podList: &api.PodList{ podList: &v1.PodList{
Items: []api.Pod{ Items: []v1.Pod{
newPod, newPod,
}, },
}, },
@@ -584,10 +586,10 @@ func equal(rss1, rss2 []*extensions.ReplicaSet) bool {
func TestGetReplicaCountForReplicaSets(t *testing.T) { func TestGetReplicaCountForReplicaSets(t *testing.T) {
rs1 := generateRS(generateDeployment("foo")) rs1 := generateRS(generateDeployment("foo"))
rs1.Spec.Replicas = 1 *(rs1.Spec.Replicas) = 1
rs1.Status.Replicas = 2 rs1.Status.Replicas = 2
rs2 := generateRS(generateDeployment("bar")) rs2 := generateRS(generateDeployment("bar"))
rs2.Spec.Replicas = 2 *(rs2.Spec.Replicas) = 2
rs2.Status.Replicas = 3 rs2.Status.Replicas = 3
tests := []struct { tests := []struct {
@@ -715,16 +717,16 @@ func TestNewRSNewReplicas(t *testing.T) {
newDeployment := generateDeployment("nginx") newDeployment := generateDeployment("nginx")
newRC := generateRS(newDeployment) newRC := generateRS(newDeployment)
rs5 := generateRS(newDeployment) rs5 := generateRS(newDeployment)
rs5.Spec.Replicas = 5 *(rs5.Spec.Replicas) = 5
for _, test := range tests { for _, test := range tests {
newDeployment.Spec.Replicas = test.depReplicas *(newDeployment.Spec.Replicas) = test.depReplicas
newDeployment.Spec.Strategy = extensions.DeploymentStrategy{Type: test.strategyType} newDeployment.Spec.Strategy = extensions.DeploymentStrategy{Type: test.strategyType}
newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{ newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{
MaxUnavailable: intstr.FromInt(1), MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(1),
MaxSurge: intstr.FromInt(test.maxSurge), MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(test.maxSurge),
} }
newRC.Spec.Replicas = test.newRSReplicas *(newRC.Spec.Replicas) = test.newRSReplicas
rs, err := NewRSNewReplicas(&newDeployment, []*extensions.ReplicaSet{&rs5}, &newRC) rs, err := NewRSNewReplicas(&newDeployment, []*extensions.ReplicaSet{&rs5}, &newRC)
if err != nil { if err != nil {
t.Errorf("In test case %s, got unexpected error %v", test.test, err) t.Errorf("In test case %s, got unexpected error %v", test.test, err)
@@ -739,7 +741,7 @@ var (
condProgressing = func() extensions.DeploymentCondition { condProgressing = func() extensions.DeploymentCondition {
return extensions.DeploymentCondition{ return extensions.DeploymentCondition{
Type: extensions.DeploymentProgressing, Type: extensions.DeploymentProgressing,
Status: api.ConditionFalse, Status: v1.ConditionFalse,
Reason: "ForSomeReason", Reason: "ForSomeReason",
} }
} }
@@ -747,7 +749,7 @@ var (
condProgressing2 = func() extensions.DeploymentCondition { condProgressing2 = func() extensions.DeploymentCondition {
return extensions.DeploymentCondition{ return extensions.DeploymentCondition{
Type: extensions.DeploymentProgressing, Type: extensions.DeploymentProgressing,
Status: api.ConditionTrue, Status: v1.ConditionTrue,
Reason: "BecauseItIs", Reason: "BecauseItIs",
} }
} }
@@ -755,7 +757,7 @@ var (
condAvailable = func() extensions.DeploymentCondition { condAvailable = func() extensions.DeploymentCondition {
return extensions.DeploymentCondition{ return extensions.DeploymentCondition{
Type: extensions.DeploymentAvailable, Type: extensions.DeploymentAvailable,
Status: api.ConditionTrue, Status: v1.ConditionTrue,
Reason: "AwesomeController", Reason: "AwesomeController",
} }
} }
@@ -775,7 +777,7 @@ func TestGetCondition(t *testing.T) {
status extensions.DeploymentStatus status extensions.DeploymentStatus
condType extensions.DeploymentConditionType condType extensions.DeploymentConditionType
condStatus api.ConditionStatus condStatus v1.ConditionStatus
condReason string condReason string
expected bool expected bool
@@ -897,10 +899,11 @@ func TestDeploymentComplete(t *testing.T) {
deployment := func(desired, current, updated, available, maxUnavailable int32) *extensions.Deployment { deployment := func(desired, current, updated, available, maxUnavailable int32) *extensions.Deployment {
return &extensions.Deployment{ return &extensions.Deployment{
Spec: extensions.DeploymentSpec{ Spec: extensions.DeploymentSpec{
Replicas: desired, Replicas: &desired,
Strategy: extensions.DeploymentStrategy{ Strategy: extensions.DeploymentStrategy{
RollingUpdate: &extensions.RollingUpdateDeployment{ RollingUpdate: &extensions.RollingUpdateDeployment{
MaxUnavailable: intstr.FromInt(int(maxUnavailable)), MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxUnavailable)),
MaxSurge: func() *intstr.IntOrString { x := intstr.FromInt(0); return &x }(),
}, },
Type: extensions.RollingUpdateDeploymentStrategyType, Type: extensions.RollingUpdateDeploymentStrategyType,
}, },
@@ -1047,7 +1050,7 @@ func TestDeploymentTimedOut(t *testing.T) {
timeFn := func(min, sec int) time.Time { timeFn := func(min, sec int) time.Time {
return time.Date(2016, 1, 1, 0, min, sec, 0, time.UTC) return time.Date(2016, 1, 1, 0, min, sec, 0, time.UTC)
} }
deployment := func(condType extensions.DeploymentConditionType, status api.ConditionStatus, pds *int32, from time.Time) extensions.Deployment { deployment := func(condType extensions.DeploymentConditionType, status v1.ConditionStatus, pds *int32, from time.Time) extensions.Deployment {
return extensions.Deployment{ return extensions.Deployment{
Spec: extensions.DeploymentSpec{ Spec: extensions.DeploymentSpec{
ProgressDeadlineSeconds: pds, ProgressDeadlineSeconds: pds,
@@ -1075,21 +1078,21 @@ func TestDeploymentTimedOut(t *testing.T) {
{ {
name: "no progressDeadlineSeconds specified - no timeout", name: "no progressDeadlineSeconds specified - no timeout",
d: deployment(extensions.DeploymentProgressing, api.ConditionTrue, null, timeFn(1, 9)), d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, null, timeFn(1, 9)),
nowFn: func() time.Time { return timeFn(1, 20) }, nowFn: func() time.Time { return timeFn(1, 20) },
expected: false, expected: false,
}, },
{ {
name: "progressDeadlineSeconds: 10s, now - started => 00:01:20 - 00:01:09 => 11s", name: "progressDeadlineSeconds: 10s, now - started => 00:01:20 - 00:01:09 => 11s",
d: deployment(extensions.DeploymentProgressing, api.ConditionTrue, &ten, timeFn(1, 9)), d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, &ten, timeFn(1, 9)),
nowFn: func() time.Time { return timeFn(1, 20) }, nowFn: func() time.Time { return timeFn(1, 20) },
expected: true, expected: true,
}, },
{ {
name: "progressDeadlineSeconds: 10s, now - started => 00:01:20 - 00:01:11 => 9s", name: "progressDeadlineSeconds: 10s, now - started => 00:01:20 - 00:01:11 => 9s",
d: deployment(extensions.DeploymentProgressing, api.ConditionTrue, &ten, timeFn(1, 11)), d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, &ten, timeFn(1, 11)),
nowFn: func() time.Time { return timeFn(1, 20) }, nowFn: func() time.Time { return timeFn(1, 20) },
expected: false, expected: false,
}, },

View File

@@ -23,12 +23,13 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/policy" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
policyclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion" policyclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/policy/v1beta1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@@ -58,7 +59,7 @@ const DeletionTimeout = 2 * 60 * time.Second
type updater func(*policy.PodDisruptionBudget) error type updater func(*policy.PodDisruptionBudget) error
type DisruptionController struct { type DisruptionController struct {
kubeClient internalclientset.Interface kubeClient clientset.Interface
pdbStore cache.Store pdbStore cache.Store
pdbController *cache.Controller pdbController *cache.Controller
@@ -98,9 +99,9 @@ type controllerAndScale struct {
// podControllerFinder is a function type that maps a pod to a list of // podControllerFinder is a function type that maps a pod to a list of
// controllers and their scale. // controllers and their scale.
type podControllerFinder func(*api.Pod) ([]controllerAndScale, error) type podControllerFinder func(*v1.Pod) ([]controllerAndScale, error)
func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient internalclientset.Interface) *DisruptionController { func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface) *DisruptionController {
dc := &DisruptionController{ dc := &DisruptionController{
kubeClient: kubeClient, kubeClient: kubeClient,
podController: podInformer.GetController(), podController: podInformer.GetController(),
@@ -108,7 +109,7 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient i
recheckQueue: workqueue.NewNamedDelayingQueue("disruption-recheck"), recheckQueue: workqueue.NewNamedDelayingQueue("disruption-recheck"),
broadcaster: record.NewBroadcaster(), broadcaster: record.NewBroadcaster(),
} }
dc.recorder = dc.broadcaster.NewRecorder(api.EventSource{Component: "controllermanager"}) dc.recorder = dc.broadcaster.NewRecorder(v1.EventSource{Component: "controllermanager"})
dc.getUpdater = func() updater { return dc.writePdbStatus } dc.getUpdater = func() updater { return dc.writePdbStatus }
@@ -122,11 +123,11 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient i
dc.pdbStore, dc.pdbController = cache.NewInformer( dc.pdbStore, dc.pdbController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return dc.kubeClient.Policy().PodDisruptionBudgets(api.NamespaceAll).List(options) return dc.kubeClient.Policy().PodDisruptionBudgets(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return dc.kubeClient.Policy().PodDisruptionBudgets(api.NamespaceAll).Watch(options) return dc.kubeClient.Policy().PodDisruptionBudgets(v1.NamespaceAll).Watch(options)
}, },
}, },
&policy.PodDisruptionBudget{}, &policy.PodDisruptionBudget{},
@@ -141,14 +142,14 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient i
dc.rcIndexer, dc.rcController = cache.NewIndexerInformer( dc.rcIndexer, dc.rcController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return dc.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options) return dc.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return dc.kubeClient.Core().ReplicationControllers(api.NamespaceAll).Watch(options) return dc.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).Watch(options)
}, },
}, },
&api.ReplicationController{}, &v1.ReplicationController{},
30*time.Second, 30*time.Second,
cache.ResourceEventHandlerFuncs{}, cache.ResourceEventHandlerFuncs{},
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
@@ -158,11 +159,11 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient i
dc.rsLister.Indexer, dc.rsController = cache.NewIndexerInformer( dc.rsLister.Indexer, dc.rsController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return dc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options) return dc.kubeClient.Extensions().ReplicaSets(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return dc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).Watch(options) return dc.kubeClient.Extensions().ReplicaSets(v1.NamespaceAll).Watch(options)
}, },
}, },
&extensions.ReplicaSet{}, &extensions.ReplicaSet{},
@@ -174,11 +175,11 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient i
dc.dIndexer, dc.dController = cache.NewIndexerInformer( dc.dIndexer, dc.dController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return dc.kubeClient.Extensions().Deployments(api.NamespaceAll).List(options) return dc.kubeClient.Extensions().Deployments(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return dc.kubeClient.Extensions().Deployments(api.NamespaceAll).Watch(options) return dc.kubeClient.Extensions().Deployments(v1.NamespaceAll).Watch(options)
}, },
}, },
&extensions.Deployment{}, &extensions.Deployment{},
@@ -204,7 +205,7 @@ func (dc *DisruptionController) finders() []podControllerFinder {
} }
// getPodReplicaSets finds replicasets which have no matching deployments. // getPodReplicaSets finds replicasets which have no matching deployments.
func (dc *DisruptionController) getPodReplicaSets(pod *api.Pod) ([]controllerAndScale, error) { func (dc *DisruptionController) getPodReplicaSets(pod *v1.Pod) ([]controllerAndScale, error) {
cas := []controllerAndScale{} cas := []controllerAndScale{}
rss, err := dc.rsLister.GetPodReplicaSets(pod) rss, err := dc.rsLister.GetPodReplicaSets(pod)
// GetPodReplicaSets returns an error only if no ReplicaSets are found. We // GetPodReplicaSets returns an error only if no ReplicaSets are found. We
@@ -220,7 +221,7 @@ func (dc *DisruptionController) getPodReplicaSets(pod *api.Pod) ([]controllerAnd
if err == nil { // A deployment was found, so this finder will not count this RS. if err == nil { // A deployment was found, so this finder will not count this RS.
continue continue
} }
controllerScale[rs.UID] = rs.Spec.Replicas controllerScale[rs.UID] = *(rs.Spec.Replicas)
} }
for uid, scale := range controllerScale { for uid, scale := range controllerScale {
@@ -231,7 +232,7 @@ func (dc *DisruptionController) getPodReplicaSets(pod *api.Pod) ([]controllerAnd
} }
// getPodDeployments finds deployments for any replicasets which are being managed by deployments. // getPodDeployments finds deployments for any replicasets which are being managed by deployments.
func (dc *DisruptionController) getPodDeployments(pod *api.Pod) ([]controllerAndScale, error) { func (dc *DisruptionController) getPodDeployments(pod *v1.Pod) ([]controllerAndScale, error) {
cas := []controllerAndScale{} cas := []controllerAndScale{}
rss, err := dc.rsLister.GetPodReplicaSets(pod) rss, err := dc.rsLister.GetPodReplicaSets(pod)
// GetPodReplicaSets returns an error only if no ReplicaSets are found. We // GetPodReplicaSets returns an error only if no ReplicaSets are found. We
@@ -248,7 +249,7 @@ func (dc *DisruptionController) getPodDeployments(pod *api.Pod) ([]controllerAnd
continue continue
} }
for _, d := range ds { for _, d := range ds {
controllerScale[d.UID] = d.Spec.Replicas controllerScale[d.UID] = *(d.Spec.Replicas)
} }
} }
@@ -259,12 +260,12 @@ func (dc *DisruptionController) getPodDeployments(pod *api.Pod) ([]controllerAnd
return cas, nil return cas, nil
} }
func (dc *DisruptionController) getPodReplicationControllers(pod *api.Pod) ([]controllerAndScale, error) { func (dc *DisruptionController) getPodReplicationControllers(pod *v1.Pod) ([]controllerAndScale, error) {
cas := []controllerAndScale{} cas := []controllerAndScale{}
rcs, err := dc.rcLister.GetPodControllers(pod) rcs, err := dc.rcLister.GetPodControllers(pod)
if err == nil { if err == nil {
for _, rc := range rcs { for _, rc := range rcs {
cas = append(cas, controllerAndScale{UID: rc.UID, scale: rc.Spec.Replicas}) cas = append(cas, controllerAndScale{UID: rc.UID, scale: *(rc.Spec.Replicas)})
} }
} }
return cas, nil return cas, nil
@@ -274,7 +275,7 @@ func (dc *DisruptionController) Run(stopCh <-chan struct{}) {
glog.V(0).Infof("Starting disruption controller") glog.V(0).Infof("Starting disruption controller")
if dc.kubeClient != nil { if dc.kubeClient != nil {
glog.V(0).Infof("Sending events to api server.") glog.V(0).Infof("Sending events to api server.")
dc.broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: dc.kubeClient.Core().Events("")}) dc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dc.kubeClient.Core().Events("")})
} else { } else {
glog.V(0).Infof("No api server defined - no events will be sent to API server.") glog.V(0).Infof("No api server defined - no events will be sent to API server.")
} }
@@ -310,7 +311,7 @@ func (dc *DisruptionController) removeDb(obj interface{}) {
} }
func (dc *DisruptionController) addPod(obj interface{}) { func (dc *DisruptionController) addPod(obj interface{}) {
pod := obj.(*api.Pod) pod := obj.(*v1.Pod)
glog.V(4).Infof("addPod called on pod %q", pod.Name) glog.V(4).Infof("addPod called on pod %q", pod.Name)
pdb := dc.getPdbForPod(pod) pdb := dc.getPdbForPod(pod)
if pdb == nil { if pdb == nil {
@@ -322,7 +323,7 @@ func (dc *DisruptionController) addPod(obj interface{}) {
} }
func (dc *DisruptionController) updatePod(old, cur interface{}) { func (dc *DisruptionController) updatePod(old, cur interface{}) {
pod := cur.(*api.Pod) pod := cur.(*v1.Pod)
glog.V(4).Infof("updatePod called on pod %q", pod.Name) glog.V(4).Infof("updatePod called on pod %q", pod.Name)
pdb := dc.getPdbForPod(pod) pdb := dc.getPdbForPod(pod)
if pdb == nil { if pdb == nil {
@@ -334,7 +335,7 @@ func (dc *DisruptionController) updatePod(old, cur interface{}) {
} }
func (dc *DisruptionController) deletePod(obj interface{}) { func (dc *DisruptionController) deletePod(obj interface{}) {
pod, ok := obj.(*api.Pod) pod, ok := obj.(*v1.Pod)
// When a delete is dropped, the relist will notice a pod in the store not // When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains // in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the pod // the deleted key/value. Note that this value might be stale. If the pod
@@ -346,7 +347,7 @@ func (dc *DisruptionController) deletePod(obj interface{}) {
glog.Errorf("Couldn't get object from tombstone %+v", obj) glog.Errorf("Couldn't get object from tombstone %+v", obj)
return return
} }
pod, ok = tombstone.Obj.(*api.Pod) pod, ok = tombstone.Obj.(*v1.Pod)
if !ok { if !ok {
glog.Errorf("Tombstone contained object that is not a pod %+v", obj) glog.Errorf("Tombstone contained object that is not a pod %+v", obj)
return return
@@ -380,7 +381,7 @@ func (dc *DisruptionController) enqueuePdbForRecheck(pdb *policy.PodDisruptionBu
dc.recheckQueue.AddAfter(key, delay) dc.recheckQueue.AddAfter(key, delay)
} }
func (dc *DisruptionController) getPdbForPod(pod *api.Pod) *policy.PodDisruptionBudget { func (dc *DisruptionController) getPdbForPod(pod *v1.Pod) *policy.PodDisruptionBudget {
// GetPodPodDisruptionBudgets returns an error only if no // GetPodPodDisruptionBudgets returns an error only if no
// PodDisruptionBudgets are found. We don't return that as an error to the // PodDisruptionBudgets are found. We don't return that as an error to the
// caller. // caller.
@@ -393,25 +394,25 @@ func (dc *DisruptionController) getPdbForPod(pod *api.Pod) *policy.PodDisruption
if len(pdbs) > 1 { if len(pdbs) > 1 {
msg := fmt.Sprintf("Pod %q/%q matches multiple PodDisruptionBudgets. Chose %q arbitrarily.", pod.Namespace, pod.Name, pdbs[0].Name) msg := fmt.Sprintf("Pod %q/%q matches multiple PodDisruptionBudgets. Chose %q arbitrarily.", pod.Namespace, pod.Name, pdbs[0].Name)
glog.Warning(msg) glog.Warning(msg)
dc.recorder.Event(pod, api.EventTypeWarning, "MultiplePodDisruptionBudgets", msg) dc.recorder.Event(pod, v1.EventTypeWarning, "MultiplePodDisruptionBudgets", msg)
} }
return &pdbs[0] return &pdbs[0]
} }
func (dc *DisruptionController) getPodsForPdb(pdb *policy.PodDisruptionBudget) ([]*api.Pod, error) { func (dc *DisruptionController) getPodsForPdb(pdb *policy.PodDisruptionBudget) ([]*v1.Pod, error) {
sel, err := unversioned.LabelSelectorAsSelector(pdb.Spec.Selector) sel, err := unversioned.LabelSelectorAsSelector(pdb.Spec.Selector)
if sel.Empty() { if sel.Empty() {
return []*api.Pod{}, nil return []*v1.Pod{}, nil
} }
if err != nil { if err != nil {
return []*api.Pod{}, err return []*v1.Pod{}, err
} }
pods, err := dc.podLister.Pods(pdb.Namespace).List(sel) pods, err := dc.podLister.Pods(pdb.Namespace).List(sel)
if err != nil { if err != nil {
return []*api.Pod{}, err return []*v1.Pod{}, err
} }
// TODO: Do we need to copy here? // TODO: Do we need to copy here?
result := make([]*api.Pod, 0, len(pods)) result := make([]*v1.Pod, 0, len(pods))
for i := range pods { for i := range pods {
result = append(result, &(*pods[i])) result = append(result, &(*pods[i]))
} }
@@ -485,16 +486,16 @@ func (dc *DisruptionController) sync(key string) error {
func (dc *DisruptionController) trySync(pdb *policy.PodDisruptionBudget) error { func (dc *DisruptionController) trySync(pdb *policy.PodDisruptionBudget) error {
pods, err := dc.getPodsForPdb(pdb) pods, err := dc.getPodsForPdb(pdb)
if err != nil { if err != nil {
dc.recorder.Eventf(pdb, api.EventTypeWarning, "NoPods", "Failed to get pods: %v", err) dc.recorder.Eventf(pdb, v1.EventTypeWarning, "NoPods", "Failed to get pods: %v", err)
return err return err
} }
if len(pods) == 0 { if len(pods) == 0 {
dc.recorder.Eventf(pdb, api.EventTypeNormal, "NoPods", "No matching pods found") dc.recorder.Eventf(pdb, v1.EventTypeNormal, "NoPods", "No matching pods found")
} }
expectedCount, desiredHealthy, err := dc.getExpectedPodCount(pdb, pods) expectedCount, desiredHealthy, err := dc.getExpectedPodCount(pdb, pods)
if err != nil { if err != nil {
dc.recorder.Eventf(pdb, api.EventTypeNormal, "ExpectedPods", "Failed to calculate the number of expected pods: %v", err) dc.recorder.Eventf(pdb, v1.EventTypeNormal, "ExpectedPods", "Failed to calculate the number of expected pods: %v", err)
return err return err
} }
@@ -512,7 +513,7 @@ func (dc *DisruptionController) trySync(pdb *policy.PodDisruptionBudget) error {
return err return err
} }
func (dc *DisruptionController) getExpectedPodCount(pdb *policy.PodDisruptionBudget, pods []*api.Pod) (expectedCount, desiredHealthy int32, err error) { func (dc *DisruptionController) getExpectedPodCount(pdb *policy.PodDisruptionBudget, pods []*v1.Pod) (expectedCount, desiredHealthy int32, err error) {
err = nil err = nil
// TODO(davidopp): consider making the way expectedCount and rules about // TODO(davidopp): consider making the way expectedCount and rules about
// permitted controller configurations (specifically, considering it an error // permitted controller configurations (specifically, considering it an error
@@ -554,11 +555,11 @@ func (dc *DisruptionController) getExpectedPodCount(pdb *policy.PodDisruptionBud
} }
if controllerCount == 0 { if controllerCount == 0 {
err = fmt.Errorf("asked for percentage, but found no controllers for pod %q", pod.Name) err = fmt.Errorf("asked for percentage, but found no controllers for pod %q", pod.Name)
dc.recorder.Event(pdb, api.EventTypeWarning, "NoControllers", err.Error()) dc.recorder.Event(pdb, v1.EventTypeWarning, "NoControllers", err.Error())
return return
} else if controllerCount > 1 { } else if controllerCount > 1 {
err = fmt.Errorf("pod %q has %v>1 controllers", pod.Name, controllerCount) err = fmt.Errorf("pod %q has %v>1 controllers", pod.Name, controllerCount)
dc.recorder.Event(pdb, api.EventTypeWarning, "TooManyControllers", err.Error()) dc.recorder.Event(pdb, v1.EventTypeWarning, "TooManyControllers", err.Error())
return return
} }
} }
@@ -581,7 +582,7 @@ func (dc *DisruptionController) getExpectedPodCount(pdb *policy.PodDisruptionBud
return return
} }
func countHealthyPods(pods []*api.Pod, disruptedPods map[string]unversioned.Time, currentTime time.Time) (currentHealthy int32) { func countHealthyPods(pods []*v1.Pod, disruptedPods map[string]unversioned.Time, currentTime time.Time) (currentHealthy int32) {
Pod: Pod:
for _, pod := range pods { for _, pod := range pods {
// Pod is beeing deleted. // Pod is beeing deleted.
@@ -592,7 +593,7 @@ Pod:
if disruptionTime, found := disruptedPods[pod.Name]; found && disruptionTime.Time.Add(DeletionTimeout).After(currentTime) { if disruptionTime, found := disruptedPods[pod.Name]; found && disruptionTime.Time.Add(DeletionTimeout).After(currentTime) {
continue continue
} }
if api.IsPodReady(pod) { if v1.IsPodReady(pod) {
currentHealthy++ currentHealthy++
continue Pod continue Pod
} }
@@ -603,7 +604,7 @@ Pod:
// Builds new PodDisruption map, possibly removing items that refer to non-existing, already deleted // Builds new PodDisruption map, possibly removing items that refer to non-existing, already deleted
// or not-deleted at all items. Also returns an information when this check should be repeated. // or not-deleted at all items. Also returns an information when this check should be repeated.
func (dc *DisruptionController) buildDisruptedPodMap(pods []*api.Pod, pdb *policy.PodDisruptionBudget, currentTime time.Time) (map[string]unversioned.Time, *time.Time) { func (dc *DisruptionController) buildDisruptedPodMap(pods []*v1.Pod, pdb *policy.PodDisruptionBudget, currentTime time.Time) (map[string]unversioned.Time, *time.Time) {
disruptedPods := pdb.Status.DisruptedPods disruptedPods := pdb.Status.DisruptedPods
result := make(map[string]unversioned.Time) result := make(map[string]unversioned.Time)
var recheckTime *time.Time var recheckTime *time.Time
@@ -625,7 +626,7 @@ func (dc *DisruptionController) buildDisruptedPodMap(pods []*api.Pod, pdb *polic
if expectedDeletion.Before(currentTime) { if expectedDeletion.Before(currentTime) {
glog.V(1).Infof("Pod %s/%s was expected to be deleted at %s but it wasn't, updating pdb %s/%s", glog.V(1).Infof("Pod %s/%s was expected to be deleted at %s but it wasn't, updating pdb %s/%s",
pod.Namespace, pod.Name, disruptionTime.String(), pdb.Namespace, pdb.Name) pod.Namespace, pod.Name, disruptionTime.String(), pdb.Namespace, pdb.Name)
dc.recorder.Eventf(pod, api.EventTypeWarning, "NotDeleted", "Pod was expected by PDB %s/%s to be deleted but it wasn't", dc.recorder.Eventf(pod, v1.EventTypeWarning, "NotDeleted", "Pod was expected by PDB %s/%s to be deleted but it wasn't",
pdb.Namespace, pdb.Namespace) pdb.Namespace, pdb.Namespace)
} else { } else {
if recheckTime == nil || expectedDeletion.Before(*recheckTime) { if recheckTime == nil || expectedDeletion.Before(*recheckTime) {

View File

@@ -25,9 +25,10 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/extensions" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/apis/policy" policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
@@ -95,7 +96,7 @@ func newFakeDisruptionController() (*DisruptionController, *pdbStates) {
broadcaster: record.NewBroadcaster(), broadcaster: record.NewBroadcaster(),
} }
dc.recorder = dc.broadcaster.NewRecorder(api.EventSource{Component: "disruption_test"}) dc.recorder = dc.broadcaster.NewRecorder(v1.EventSource{Component: "disruption_test"})
return dc, ps return dc, ps
} }
@@ -115,11 +116,11 @@ func newSelFooBar() *unversioned.LabelSelector {
func newPodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) { func newPodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {
pdb := &policy.PodDisruptionBudget{ pdb := &policy.PodDisruptionBudget{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()}, TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
UID: uuid.NewUUID(), UID: uuid.NewUUID(),
Name: "foobar", Name: "foobar",
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
ResourceVersion: "18", ResourceVersion: "18",
}, },
Spec: policy.PodDisruptionBudgetSpec{ Spec: policy.PodDisruptionBudgetSpec{
@@ -136,21 +137,21 @@ func newPodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrString) (*pol
return pdb, pdbName return pdb, pdbName
} }
func newPod(t *testing.T, name string) (*api.Pod, string) { func newPod(t *testing.T, name string) (*v1.Pod, string) {
pod := &api.Pod{ pod := &v1.Pod{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()}, TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
UID: uuid.NewUUID(), UID: uuid.NewUUID(),
Annotations: make(map[string]string), Annotations: make(map[string]string),
Name: name, Name: name,
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
ResourceVersion: "18", ResourceVersion: "18",
Labels: fooBar(), Labels: fooBar(),
}, },
Spec: api.PodSpec{}, Spec: v1.PodSpec{},
Status: api.PodStatus{ Status: v1.PodStatus{
Conditions: []api.PodCondition{ Conditions: []v1.PodCondition{
{Type: api.PodReady, Status: api.ConditionTrue}, {Type: v1.PodReady, Status: v1.ConditionTrue},
}, },
}, },
} }
@@ -163,18 +164,18 @@ func newPod(t *testing.T, name string) (*api.Pod, string) {
return pod, podName return pod, podName
} }
func newReplicationController(t *testing.T, size int32) (*api.ReplicationController, string) { func newReplicationController(t *testing.T, size int32) (*v1.ReplicationController, string) {
rc := &api.ReplicationController{ rc := &v1.ReplicationController{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()}, TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
UID: uuid.NewUUID(), UID: uuid.NewUUID(),
Name: "foobar", Name: "foobar",
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
ResourceVersion: "18", ResourceVersion: "18",
Labels: fooBar(), Labels: fooBar(),
}, },
Spec: api.ReplicationControllerSpec{ Spec: v1.ReplicationControllerSpec{
Replicas: size, Replicas: &size,
Selector: fooBar(), Selector: fooBar(),
}, },
} }
@@ -189,16 +190,16 @@ func newReplicationController(t *testing.T, size int32) (*api.ReplicationControl
func newDeployment(t *testing.T, size int32) (*extensions.Deployment, string) { func newDeployment(t *testing.T, size int32) (*extensions.Deployment, string) {
d := &extensions.Deployment{ d := &extensions.Deployment{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()}, TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
UID: uuid.NewUUID(), UID: uuid.NewUUID(),
Name: "foobar", Name: "foobar",
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
ResourceVersion: "18", ResourceVersion: "18",
Labels: fooBar(), Labels: fooBar(),
}, },
Spec: extensions.DeploymentSpec{ Spec: extensions.DeploymentSpec{
Replicas: size, Replicas: &size,
Selector: newSelFooBar(), Selector: newSelFooBar(),
}, },
} }
@@ -213,16 +214,16 @@ func newDeployment(t *testing.T, size int32) (*extensions.Deployment, string) {
func newReplicaSet(t *testing.T, size int32) (*extensions.ReplicaSet, string) { func newReplicaSet(t *testing.T, size int32) (*extensions.ReplicaSet, string) {
rs := &extensions.ReplicaSet{ rs := &extensions.ReplicaSet{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()}, TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
UID: uuid.NewUUID(), UID: uuid.NewUUID(),
Name: "foobar", Name: "foobar",
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
ResourceVersion: "18", ResourceVersion: "18",
Labels: fooBar(), Labels: fooBar(),
}, },
Spec: extensions.ReplicaSetSpec{ Spec: extensions.ReplicaSetSpec{
Replicas: size, Replicas: &size,
Selector: newSelFooBar(), Selector: newSelFooBar(),
}, },
} }
@@ -274,7 +275,7 @@ func TestUnavailable(t *testing.T) {
dc.sync(pdbName) dc.sync(pdbName)
// Add three pods, verifying that the counts go up at each step. // Add three pods, verifying that the counts go up at each step.
pods := []*api.Pod{} pods := []*v1.Pod{}
for i := int32(0); i < 4; i++ { for i := int32(0); i < 4; i++ {
ps.VerifyPdbStatus(t, pdbName, 0, i, 3, i, map[string]unversioned.Time{}) ps.VerifyPdbStatus(t, pdbName, 0, i, 3, i, map[string]unversioned.Time{})
pod, _ := newPod(t, fmt.Sprintf("yo-yo-yo %d", i)) pod, _ := newPod(t, fmt.Sprintf("yo-yo-yo %d", i))
@@ -285,7 +286,7 @@ func TestUnavailable(t *testing.T) {
ps.VerifyPdbStatus(t, pdbName, 1, 4, 3, 4, map[string]unversioned.Time{}) ps.VerifyPdbStatus(t, pdbName, 1, 4, 3, 4, map[string]unversioned.Time{})
// Now set one pod as unavailable // Now set one pod as unavailable
pods[0].Status.Conditions = []api.PodCondition{} pods[0].Status.Conditions = []v1.PodCondition{}
update(t, dc.podLister.Indexer, pods[0]) update(t, dc.podLister.Indexer, pods[0])
dc.sync(pdbName) dc.sync(pdbName)
@@ -387,7 +388,7 @@ func TestReplicationController(t *testing.T) {
// about the RC. This is a known bug. TODO(mml): file issue // about the RC. This is a known bug. TODO(mml): file issue
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]unversioned.Time{}) ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]unversioned.Time{})
pods := []*api.Pod{} pods := []*v1.Pod{}
for i := int32(0); i < 3; i++ { for i := int32(0); i < 3; i++ {
pod, _ := newPod(t, fmt.Sprintf("foobar %d", i)) pod, _ := newPod(t, fmt.Sprintf("foobar %d", i))
@@ -439,7 +440,7 @@ func TestTwoControllers(t *testing.T) {
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]unversioned.Time{}) ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]unversioned.Time{})
pods := []*api.Pod{} pods := []*v1.Pod{}
unavailablePods := collectionSize - minimumOne - 1 unavailablePods := collectionSize - minimumOne - 1
for i := int32(1); i <= collectionSize; i++ { for i := int32(1); i <= collectionSize; i++ {
@@ -447,7 +448,7 @@ func TestTwoControllers(t *testing.T) {
pods = append(pods, pod) pods = append(pods, pod)
pod.Labels = rcLabels pod.Labels = rcLabels
if i <= unavailablePods { if i <= unavailablePods {
pod.Status.Conditions = []api.PodCondition{} pod.Status.Conditions = []v1.PodCondition{}
} }
add(t, dc.podLister.Indexer, pod) add(t, dc.podLister.Indexer, pod)
dc.sync(pdbName) dc.sync(pdbName)
@@ -480,7 +481,7 @@ func TestTwoControllers(t *testing.T) {
pods = append(pods, pod) pods = append(pods, pod)
pod.Labels = dLabels pod.Labels = dLabels
if i <= unavailablePods { if i <= unavailablePods {
pod.Status.Conditions = []api.PodCondition{} pod.Status.Conditions = []v1.PodCondition{}
} }
add(t, dc.podLister.Indexer, pod) add(t, dc.podLister.Indexer, pod)
dc.sync(pdbName) dc.sync(pdbName)
@@ -498,17 +499,17 @@ func TestTwoControllers(t *testing.T) {
// but if we bring down two, it's not. Then we make the pod ready again and // but if we bring down two, it's not. Then we make the pod ready again and
// verify that a disruption is permitted again. // verify that a disruption is permitted again.
ps.VerifyPdbStatus(t, pdbName, 2, 2+minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{}) ps.VerifyPdbStatus(t, pdbName, 2, 2+minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{})
pods[collectionSize-1].Status.Conditions = []api.PodCondition{} pods[collectionSize-1].Status.Conditions = []v1.PodCondition{}
update(t, dc.podLister.Indexer, pods[collectionSize-1]) update(t, dc.podLister.Indexer, pods[collectionSize-1])
dc.sync(pdbName) dc.sync(pdbName)
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{}) ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{})
pods[collectionSize-2].Status.Conditions = []api.PodCondition{} pods[collectionSize-2].Status.Conditions = []v1.PodCondition{}
update(t, dc.podLister.Indexer, pods[collectionSize-2]) update(t, dc.podLister.Indexer, pods[collectionSize-2])
dc.sync(pdbName) dc.sync(pdbName)
ps.VerifyPdbStatus(t, pdbName, 0, minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{}) ps.VerifyPdbStatus(t, pdbName, 0, minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{})
pods[collectionSize-1].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue}} pods[collectionSize-1].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
update(t, dc.podLister.Indexer, pods[collectionSize-1]) update(t, dc.podLister.Indexer, pods[collectionSize-1])
dc.sync(pdbName) dc.sync(pdbName)
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{}) ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{})

View File

@@ -24,13 +24,13 @@ import (
"encoding/json" "encoding/json"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/endpoints"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
podutil "k8s.io/kubernetes/pkg/api/pod" "k8s.io/kubernetes/pkg/api/v1"
utilpod "k8s.io/kubernetes/pkg/api/pod" "k8s.io/kubernetes/pkg/api/v1/endpoints"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
utilpod "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@@ -80,14 +80,14 @@ func NewEndpointController(podInformer cache.SharedIndexInformer, client clients
e.serviceStore.Indexer, e.serviceController = cache.NewIndexerInformer( e.serviceStore.Indexer, e.serviceController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return e.client.Core().Services(api.NamespaceAll).List(options) return e.client.Core().Services(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return e.client.Core().Services(api.NamespaceAll).Watch(options) return e.client.Core().Services(v1.NamespaceAll).Watch(options)
}, },
}, },
&api.Service{}, &v1.Service{},
// TODO: Can we have much longer period here? // TODO: Can we have much longer period here?
FullServiceResyncPeriod, FullServiceResyncPeriod,
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
@@ -180,7 +180,7 @@ func (e *EndpointController) Run(workers int, stopCh <-chan struct{}) {
<-stopCh <-stopCh
} }
func (e *EndpointController) getPodServiceMemberships(pod *api.Pod) (sets.String, error) { func (e *EndpointController) getPodServiceMemberships(pod *v1.Pod) (sets.String, error) {
set := sets.String{} set := sets.String{}
services, err := e.serviceStore.GetPodServices(pod) services, err := e.serviceStore.GetPodServices(pod)
if err != nil { if err != nil {
@@ -199,9 +199,9 @@ func (e *EndpointController) getPodServiceMemberships(pod *api.Pod) (sets.String
} }
// When a pod is added, figure out what services it will be a member of and // When a pod is added, figure out what services it will be a member of and
// enqueue them. obj must have *api.Pod type. // enqueue them. obj must have *v1.Pod type.
func (e *EndpointController) addPod(obj interface{}) { func (e *EndpointController) addPod(obj interface{}) {
pod := obj.(*api.Pod) pod := obj.(*v1.Pod)
services, err := e.getPodServiceMemberships(pod) services, err := e.getPodServiceMemberships(pod)
if err != nil { if err != nil {
utilruntime.HandleError(fmt.Errorf("Unable to get pod %v/%v's service memberships: %v", pod.Namespace, pod.Name, err)) utilruntime.HandleError(fmt.Errorf("Unable to get pod %v/%v's service memberships: %v", pod.Namespace, pod.Name, err))
@@ -214,10 +214,10 @@ func (e *EndpointController) addPod(obj interface{}) {
// When a pod is updated, figure out what services it used to be a member of // When a pod is updated, figure out what services it used to be a member of
// and what services it will be a member of, and enqueue the union of these. // and what services it will be a member of, and enqueue the union of these.
// old and cur must be *api.Pod types. // old and cur must be *v1.Pod types.
func (e *EndpointController) updatePod(old, cur interface{}) { func (e *EndpointController) updatePod(old, cur interface{}) {
newPod := cur.(*api.Pod) newPod := cur.(*v1.Pod)
oldPod := old.(*api.Pod) oldPod := old.(*v1.Pod)
if newPod.ResourceVersion == oldPod.ResourceVersion { if newPod.ResourceVersion == oldPod.ResourceVersion {
// Periodic resync will send update events for all known pods. // Periodic resync will send update events for all known pods.
// Two different versions of the same pod will always have different RVs. // Two different versions of the same pod will always have different RVs.
@@ -244,12 +244,12 @@ func (e *EndpointController) updatePod(old, cur interface{}) {
} }
} }
func hostNameAndDomainAreEqual(pod1, pod2 *api.Pod) bool { func hostNameAndDomainAreEqual(pod1, pod2 *v1.Pod) bool {
return getHostname(pod1) == getHostname(pod2) && return getHostname(pod1) == getHostname(pod2) &&
getSubdomain(pod1) == getSubdomain(pod2) getSubdomain(pod1) == getSubdomain(pod2)
} }
func getHostname(pod *api.Pod) string { func getHostname(pod *v1.Pod) string {
if len(pod.Spec.Hostname) > 0 { if len(pod.Spec.Hostname) > 0 {
return pod.Spec.Hostname return pod.Spec.Hostname
} }
@@ -259,7 +259,7 @@ func getHostname(pod *api.Pod) string {
return "" return ""
} }
func getSubdomain(pod *api.Pod) string { func getSubdomain(pod *v1.Pod) string {
if len(pod.Spec.Subdomain) > 0 { if len(pod.Spec.Subdomain) > 0 {
return pod.Spec.Subdomain return pod.Spec.Subdomain
} }
@@ -270,9 +270,9 @@ func getSubdomain(pod *api.Pod) string {
} }
// When a pod is deleted, enqueue the services the pod used to be a member of. // When a pod is deleted, enqueue the services the pod used to be a member of.
// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item. // obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
func (e *EndpointController) deletePod(obj interface{}) { func (e *EndpointController) deletePod(obj interface{}) {
if _, ok := obj.(*api.Pod); ok { if _, ok := obj.(*v1.Pod); ok {
// Enqueue all the services that the pod used to be a member // Enqueue all the services that the pod used to be a member
// of. This happens to be exactly the same thing we do when a // of. This happens to be exactly the same thing we do when a
// pod is added. // pod is added.
@@ -289,7 +289,7 @@ func (e *EndpointController) deletePod(obj interface{}) {
// TODO: keep a map of pods to services to handle this condition. // TODO: keep a map of pods to services to handle this condition.
} }
// obj could be an *api.Service, or a DeletionFinalStateUnknown marker item. // obj could be an *v1.Service, or a DeletionFinalStateUnknown marker item.
func (e *EndpointController) enqueueService(obj interface{}) { func (e *EndpointController) enqueueService(obj interface{}) {
key, err := keyFunc(obj) key, err := keyFunc(obj)
if err != nil { if err != nil {
@@ -354,7 +354,7 @@ func (e *EndpointController) syncService(key string) error {
return nil return nil
} }
service := obj.(*api.Service) service := obj.(*v1.Service)
if service.Spec.Selector == nil { if service.Spec.Selector == nil {
// services without a selector receive no endpoints from this controller; // services without a selector receive no endpoints from this controller;
// these services will receive the endpoints that are created out-of-band via the REST API. // these services will receive the endpoints that are created out-of-band via the REST API.
@@ -369,7 +369,7 @@ func (e *EndpointController) syncService(key string) error {
return err return err
} }
subsets := []api.EndpointSubset{} subsets := []v1.EndpointSubset{}
podHostNames := map[string]endpoints.HostRecord{} podHostNames := map[string]endpoints.HostRecord{}
var tolerateUnreadyEndpoints bool var tolerateUnreadyEndpoints bool
@@ -407,11 +407,11 @@ func (e *EndpointController) syncService(key string) error {
continue continue
} }
epp := api.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto} epp := v1.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto}
epa := api.EndpointAddress{ epa := v1.EndpointAddress{
IP: pod.Status.PodIP, IP: pod.Status.PodIP,
NodeName: &pod.Spec.NodeName, NodeName: &pod.Spec.NodeName,
TargetRef: &api.ObjectReference{ TargetRef: &v1.ObjectReference{
Kind: "Pod", Kind: "Pod",
Namespace: pod.ObjectMeta.Namespace, Namespace: pod.ObjectMeta.Namespace,
Name: pod.ObjectMeta.Name, Name: pod.ObjectMeta.Name,
@@ -431,17 +431,17 @@ func (e *EndpointController) syncService(key string) error {
epa.Hostname = hostname epa.Hostname = hostname
} }
if tolerateUnreadyEndpoints || api.IsPodReady(pod) { if tolerateUnreadyEndpoints || v1.IsPodReady(pod) {
subsets = append(subsets, api.EndpointSubset{ subsets = append(subsets, v1.EndpointSubset{
Addresses: []api.EndpointAddress{epa}, Addresses: []v1.EndpointAddress{epa},
Ports: []api.EndpointPort{epp}, Ports: []v1.EndpointPort{epp},
}) })
readyEps++ readyEps++
} else { } else {
glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name) glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name)
subsets = append(subsets, api.EndpointSubset{ subsets = append(subsets, v1.EndpointSubset{
NotReadyAddresses: []api.EndpointAddress{epa}, NotReadyAddresses: []v1.EndpointAddress{epa},
Ports: []api.EndpointPort{epp}, Ports: []v1.EndpointPort{epp},
}) })
notReadyEps++ notReadyEps++
} }
@@ -453,8 +453,8 @@ func (e *EndpointController) syncService(key string) error {
currentEndpoints, err := e.client.Core().Endpoints(service.Namespace).Get(service.Name) currentEndpoints, err := e.client.Core().Endpoints(service.Namespace).Get(service.Name)
if err != nil { if err != nil {
if errors.IsNotFound(err) { if errors.IsNotFound(err) {
currentEndpoints = &api.Endpoints{ currentEndpoints = &v1.Endpoints{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: service.Name, Name: service.Name,
Labels: service.Labels, Labels: service.Labels,
}, },
@@ -521,7 +521,7 @@ func (e *EndpointController) syncService(key string) error {
// some stragglers could have been left behind if the endpoint controller // some stragglers could have been left behind if the endpoint controller
// reboots). // reboots).
func (e *EndpointController) checkLeftoverEndpoints() { func (e *EndpointController) checkLeftoverEndpoints() {
list, err := e.client.Core().Endpoints(api.NamespaceAll).List(api.ListOptions{}) list, err := e.client.Core().Endpoints(v1.NamespaceAll).List(v1.ListOptions{})
if err != nil { if err != nil {
utilruntime.HandleError(fmt.Errorf("Unable to list endpoints (%v); orphaned endpoints will not be cleaned up. (They're pretty harmless, but you can restart this component if you want another attempt made.)", err)) utilruntime.HandleError(fmt.Errorf("Unable to list endpoints (%v); orphaned endpoints will not be cleaned up. (They're pretty harmless, but you can restart this component if you want another attempt made.)", err))
return return

View File

@@ -22,14 +22,14 @@ import (
"net/http/httptest" "net/http/httptest"
"testing" "testing"
"k8s.io/kubernetes/pkg/api"
endptspkg "k8s.io/kubernetes/pkg/api/endpoints"
"k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
endptspkg "k8s.io/kubernetes/pkg/api/v1/endpoints"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
_ "k8s.io/kubernetes/pkg/apimachinery/registered" _ "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@@ -42,32 +42,32 @@ var emptyNodeName string
func addPods(store cache.Store, namespace string, nPods int, nPorts int, nNotReady int) { func addPods(store cache.Store, namespace string, nPods int, nPorts int, nNotReady int) {
for i := 0; i < nPods+nNotReady; i++ { for i := 0; i < nPods+nNotReady; i++ {
p := &api.Pod{ p := &v1.Pod{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()}, TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Namespace: namespace, Namespace: namespace,
Name: fmt.Sprintf("pod%d", i), Name: fmt.Sprintf("pod%d", i),
Labels: map[string]string{"foo": "bar"}, Labels: map[string]string{"foo": "bar"},
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{{Ports: []api.ContainerPort{}}}, Containers: []v1.Container{{Ports: []v1.ContainerPort{}}},
}, },
Status: api.PodStatus{ Status: v1.PodStatus{
PodIP: fmt.Sprintf("1.2.3.%d", 4+i), PodIP: fmt.Sprintf("1.2.3.%d", 4+i),
Conditions: []api.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: api.PodReady, Type: v1.PodReady,
Status: api.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
}, },
} }
if i >= nPods { if i >= nPods {
p.Status.Conditions[0].Status = api.ConditionFalse p.Status.Conditions[0].Status = v1.ConditionFalse
} }
for j := 0; j < nPorts; j++ { for j := 0; j < nPorts; j++ {
p.Spec.Containers[0].Ports = append(p.Spec.Containers[0].Ports, p.Spec.Containers[0].Ports = append(p.Spec.Containers[0].Ports,
api.ContainerPort{Name: fmt.Sprintf("port%d", i), ContainerPort: int32(8080 + j)}) v1.ContainerPort{Name: fmt.Sprintf("port%d", i), ContainerPort: int32(8080 + j)})
} }
store.Add(p) store.Add(p)
} }
@@ -94,54 +94,54 @@ func makeTestServer(t *testing.T, namespace string, endpointsResponse serverResp
} }
func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) { func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
ns := api.NamespaceDefault ns := v1.NamespaceDefault
testServer, endpointsHandler := makeTestServer(t, ns, testServer, endpointsHandler := makeTestServer(t, ns,
serverResponse{http.StatusOK, &api.Endpoints{ serverResponse{http.StatusOK, &v1.Endpoints{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
ResourceVersion: "1", ResourceVersion: "1",
}, },
Subsets: []api.EndpointSubset{{ Subsets: []v1.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}}, Addresses: []v1.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
Ports: []api.EndpointPort{{Port: 1000}}, Ports: []v1.EndpointPort{{Port: 1000}},
}}, }},
}}) }})
defer testServer.Close() defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
endpoints.serviceStore.Indexer.Add(&api.Service{ endpoints.serviceStore.Indexer.Add(&v1.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Port: 80}}}, Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 80}}},
}) })
endpoints.syncService(ns + "/foo") endpoints.syncService(ns + "/foo")
endpointsHandler.ValidateRequestCount(t, 0) endpointsHandler.ValidateRequestCount(t, 0)
} }
func TestCheckLeftoverEndpoints(t *testing.T) { func TestCheckLeftoverEndpoints(t *testing.T) {
ns := api.NamespaceDefault ns := v1.NamespaceDefault
// Note that this requests *all* endpoints, therefore the NamespaceAll // Note that this requests *all* endpoints, therefore the NamespaceAll
// below. // below.
testServer, _ := makeTestServer(t, api.NamespaceAll, testServer, _ := makeTestServer(t, v1.NamespaceAll,
serverResponse{http.StatusOK, &api.EndpointsList{ serverResponse{http.StatusOK, &v1.EndpointsList{
ListMeta: unversioned.ListMeta{ ListMeta: unversioned.ListMeta{
ResourceVersion: "1", ResourceVersion: "1",
}, },
Items: []api.Endpoints{{ Items: []v1.Endpoints{{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
ResourceVersion: "1", ResourceVersion: "1",
}, },
Subsets: []api.EndpointSubset{{ Subsets: []v1.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}}, Addresses: []v1.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
Ports: []api.EndpointPort{{Port: 1000}}, Ports: []v1.EndpointPort{{Port: 1000}},
}}, }},
}}, }},
}}) }})
defer testServer.Close() defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
endpoints.checkLeftoverEndpoints() endpoints.checkLeftoverEndpoints()
@@ -158,41 +158,41 @@ func TestCheckLeftoverEndpoints(t *testing.T) {
func TestSyncEndpointsProtocolTCP(t *testing.T) { func TestSyncEndpointsProtocolTCP(t *testing.T) {
ns := "other" ns := "other"
testServer, endpointsHandler := makeTestServer(t, ns, testServer, endpointsHandler := makeTestServer(t, ns,
serverResponse{http.StatusOK, &api.Endpoints{ serverResponse{http.StatusOK, &v1.Endpoints{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
ResourceVersion: "1", ResourceVersion: "1",
}, },
Subsets: []api.EndpointSubset{{ Subsets: []v1.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}}, Addresses: []v1.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
Ports: []api.EndpointPort{{Port: 1000, Protocol: "TCP"}}, Ports: []v1.EndpointPort{{Port: 1000, Protocol: "TCP"}},
}}, }},
}}) }})
defer testServer.Close() defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Indexer, ns, 1, 1, 0) addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
endpoints.serviceStore.Indexer.Add(&api.Service{ endpoints.serviceStore.Indexer.Add(&v1.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Selector: map[string]string{}, Selector: map[string]string{},
Ports: []api.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "TCP"}}, Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "TCP"}},
}, },
}) })
endpoints.syncService(ns + "/foo") endpoints.syncService(ns + "/foo")
endpointsHandler.ValidateRequestCount(t, 2) endpointsHandler.ValidateRequestCount(t, 2)
data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
ResourceVersion: "1", ResourceVersion: "1",
}, },
Subsets: []api.EndpointSubset{{ Subsets: []v1.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, Ports: []v1.EndpointPort{{Port: 8080, Protocol: "TCP"}},
}}, }},
}) })
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
@@ -201,40 +201,40 @@ func TestSyncEndpointsProtocolTCP(t *testing.T) {
func TestSyncEndpointsProtocolUDP(t *testing.T) { func TestSyncEndpointsProtocolUDP(t *testing.T) {
ns := "other" ns := "other"
testServer, endpointsHandler := makeTestServer(t, ns, testServer, endpointsHandler := makeTestServer(t, ns,
serverResponse{http.StatusOK, &api.Endpoints{ serverResponse{http.StatusOK, &v1.Endpoints{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
ResourceVersion: "1", ResourceVersion: "1",
}, },
Subsets: []api.EndpointSubset{{ Subsets: []v1.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}}, Addresses: []v1.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
Ports: []api.EndpointPort{{Port: 1000, Protocol: "UDP"}}, Ports: []v1.EndpointPort{{Port: 1000, Protocol: "UDP"}},
}}, }},
}}) }})
defer testServer.Close() defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Indexer, ns, 1, 1, 0) addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
endpoints.serviceStore.Indexer.Add(&api.Service{ endpoints.serviceStore.Indexer.Add(&v1.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Selector: map[string]string{}, Selector: map[string]string{},
Ports: []api.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "UDP"}}, Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "UDP"}},
}, },
}) })
endpoints.syncService(ns + "/foo") endpoints.syncService(ns + "/foo")
endpointsHandler.ValidateRequestCount(t, 2) endpointsHandler.ValidateRequestCount(t, 2)
data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
ResourceVersion: "1", ResourceVersion: "1",
}, },
Subsets: []api.EndpointSubset{{ Subsets: []v1.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
Ports: []api.EndpointPort{{Port: 8080, Protocol: "UDP"}}, Ports: []v1.EndpointPort{{Port: 8080, Protocol: "UDP"}},
}}, }},
}) })
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
@@ -243,36 +243,36 @@ func TestSyncEndpointsProtocolUDP(t *testing.T) {
func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) { func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) {
ns := "other" ns := "other"
testServer, endpointsHandler := makeTestServer(t, ns, testServer, endpointsHandler := makeTestServer(t, ns,
serverResponse{http.StatusOK, &api.Endpoints{ serverResponse{http.StatusOK, &v1.Endpoints{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
ResourceVersion: "1", ResourceVersion: "1",
}, },
Subsets: []api.EndpointSubset{}, Subsets: []v1.EndpointSubset{},
}}) }})
defer testServer.Close() defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Indexer, ns, 1, 1, 0) addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
endpoints.serviceStore.Indexer.Add(&api.Service{ endpoints.serviceStore.Indexer.Add(&v1.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Selector: map[string]string{}, Selector: map[string]string{},
Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}}, Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
}, },
}) })
endpoints.syncService(ns + "/foo") endpoints.syncService(ns + "/foo")
data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
ResourceVersion: "1", ResourceVersion: "1",
}, },
Subsets: []api.EndpointSubset{{ Subsets: []v1.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, Ports: []v1.EndpointPort{{Port: 8080, Protocol: "TCP"}},
}}, }},
}) })
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
@@ -281,36 +281,36 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) {
func TestSyncEndpointsItemsEmptySelectorSelectsAllNotReady(t *testing.T) { func TestSyncEndpointsItemsEmptySelectorSelectsAllNotReady(t *testing.T) {
ns := "other" ns := "other"
testServer, endpointsHandler := makeTestServer(t, ns, testServer, endpointsHandler := makeTestServer(t, ns,
serverResponse{http.StatusOK, &api.Endpoints{ serverResponse{http.StatusOK, &v1.Endpoints{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
ResourceVersion: "1", ResourceVersion: "1",
}, },
Subsets: []api.EndpointSubset{}, Subsets: []v1.EndpointSubset{},
}}) }})
defer testServer.Close() defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Indexer, ns, 0, 1, 1) addPods(endpoints.podStore.Indexer, ns, 0, 1, 1)
endpoints.serviceStore.Indexer.Add(&api.Service{ endpoints.serviceStore.Indexer.Add(&v1.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Selector: map[string]string{}, Selector: map[string]string{},
Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}}, Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
}, },
}) })
endpoints.syncService(ns + "/foo") endpoints.syncService(ns + "/foo")
data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
ResourceVersion: "1", ResourceVersion: "1",
}, },
Subsets: []api.EndpointSubset{{ Subsets: []v1.EndpointSubset{{
NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, Ports: []v1.EndpointPort{{Port: 8080, Protocol: "TCP"}},
}}, }},
}) })
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
@@ -319,37 +319,37 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllNotReady(t *testing.T) {
func TestSyncEndpointsItemsEmptySelectorSelectsAllMixed(t *testing.T) { func TestSyncEndpointsItemsEmptySelectorSelectsAllMixed(t *testing.T) {
ns := "other" ns := "other"
testServer, endpointsHandler := makeTestServer(t, ns, testServer, endpointsHandler := makeTestServer(t, ns,
serverResponse{http.StatusOK, &api.Endpoints{ serverResponse{http.StatusOK, &v1.Endpoints{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
ResourceVersion: "1", ResourceVersion: "1",
}, },
Subsets: []api.EndpointSubset{}, Subsets: []v1.EndpointSubset{},
}}) }})
defer testServer.Close() defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Indexer, ns, 1, 1, 1) addPods(endpoints.podStore.Indexer, ns, 1, 1, 1)
endpoints.serviceStore.Indexer.Add(&api.Service{ endpoints.serviceStore.Indexer.Add(&v1.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Selector: map[string]string{}, Selector: map[string]string{},
Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}}, Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
}, },
}) })
endpoints.syncService(ns + "/foo") endpoints.syncService(ns + "/foo")
data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
ResourceVersion: "1", ResourceVersion: "1",
}, },
Subsets: []api.EndpointSubset{{ Subsets: []v1.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.5", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}}}, NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.5", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}}},
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, Ports: []v1.EndpointPort{{Port: 8080, Protocol: "TCP"}},
}}, }},
}) })
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
@@ -358,108 +358,108 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllMixed(t *testing.T) {
func TestSyncEndpointsItemsPreexisting(t *testing.T) { func TestSyncEndpointsItemsPreexisting(t *testing.T) {
ns := "bar" ns := "bar"
testServer, endpointsHandler := makeTestServer(t, ns, testServer, endpointsHandler := makeTestServer(t, ns,
serverResponse{http.StatusOK, &api.Endpoints{ serverResponse{http.StatusOK, &v1.Endpoints{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
ResourceVersion: "1", ResourceVersion: "1",
}, },
Subsets: []api.EndpointSubset{{ Subsets: []v1.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}}, Addresses: []v1.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
Ports: []api.EndpointPort{{Port: 1000}}, Ports: []v1.EndpointPort{{Port: 1000}},
}}, }},
}}) }})
defer testServer.Close() defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Indexer, ns, 1, 1, 0) addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
endpoints.serviceStore.Indexer.Add(&api.Service{ endpoints.serviceStore.Indexer.Add(&v1.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Selector: map[string]string{"foo": "bar"}, Selector: map[string]string{"foo": "bar"},
Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}}, Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
}, },
}) })
endpoints.syncService(ns + "/foo") endpoints.syncService(ns + "/foo")
data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
ResourceVersion: "1", ResourceVersion: "1",
}, },
Subsets: []api.EndpointSubset{{ Subsets: []v1.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, Ports: []v1.EndpointPort{{Port: 8080, Protocol: "TCP"}},
}}, }},
}) })
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
} }
func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) { func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) {
ns := api.NamespaceDefault ns := v1.NamespaceDefault
testServer, endpointsHandler := makeTestServer(t, api.NamespaceDefault, testServer, endpointsHandler := makeTestServer(t, v1.NamespaceDefault,
serverResponse{http.StatusOK, &api.Endpoints{ serverResponse{http.StatusOK, &v1.Endpoints{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
ResourceVersion: "1", ResourceVersion: "1",
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
}, },
Subsets: []api.EndpointSubset{{ Subsets: []v1.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, Ports: []v1.EndpointPort{{Port: 8080, Protocol: "TCP"}},
}}, }},
}}) }})
defer testServer.Close() defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Indexer, api.NamespaceDefault, 1, 1, 0) addPods(endpoints.podStore.Indexer, v1.NamespaceDefault, 1, 1, 0)
endpoints.serviceStore.Indexer.Add(&api.Service{ endpoints.serviceStore.Indexer.Add(&v1.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault}, ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: v1.NamespaceDefault},
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Selector: map[string]string{"foo": "bar"}, Selector: map[string]string{"foo": "bar"},
Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}}, Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
}, },
}) })
endpoints.syncService(ns + "/foo") endpoints.syncService(ns + "/foo")
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", api.NamespaceDefault, "foo"), "GET", nil) endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", v1.NamespaceDefault, "foo"), "GET", nil)
} }
func TestSyncEndpointsItems(t *testing.T) { func TestSyncEndpointsItems(t *testing.T) {
ns := "other" ns := "other"
testServer, endpointsHandler := makeTestServer(t, ns, testServer, endpointsHandler := makeTestServer(t, ns,
serverResponse{http.StatusOK, &api.Endpoints{}}) serverResponse{http.StatusOK, &v1.Endpoints{}})
defer testServer.Close() defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Indexer, ns, 3, 2, 0) addPods(endpoints.podStore.Indexer, ns, 3, 2, 0)
addPods(endpoints.podStore.Indexer, "blah", 5, 2, 0) // make sure these aren't found! addPods(endpoints.podStore.Indexer, "blah", 5, 2, 0) // make sure these aren't found!
endpoints.serviceStore.Indexer.Add(&api.Service{ endpoints.serviceStore.Indexer.Add(&v1.Service{
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns}, ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Selector: map[string]string{"foo": "bar"}, Selector: map[string]string{"foo": "bar"},
Ports: []api.ServicePort{ Ports: []v1.ServicePort{
{Name: "port0", Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, {Name: "port0", Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
{Name: "port1", Port: 88, Protocol: "TCP", TargetPort: intstr.FromInt(8088)}, {Name: "port1", Port: 88, Protocol: "TCP", TargetPort: intstr.FromInt(8088)},
}, },
}, },
}) })
endpoints.syncService("other/foo") endpoints.syncService("other/foo")
expectedSubsets := []api.EndpointSubset{{ expectedSubsets := []v1.EndpointSubset{{
Addresses: []api.EndpointAddress{ Addresses: []v1.EndpointAddress{
{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}, {IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}},
{IP: "1.2.3.5", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}}, {IP: "1.2.3.5", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}},
{IP: "1.2.3.6", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod2", Namespace: ns}}, {IP: "1.2.3.6", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod2", Namespace: ns}},
}, },
Ports: []api.EndpointPort{ Ports: []v1.EndpointPort{
{Name: "port0", Port: 8080, Protocol: "TCP"}, {Name: "port0", Port: 8080, Protocol: "TCP"},
{Name: "port1", Port: 8088, Protocol: "TCP"}, {Name: "port1", Port: 8088, Protocol: "TCP"},
}, },
}} }}
data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
ResourceVersion: "", ResourceVersion: "",
}, },
Subsets: endptspkg.SortSubsets(expectedSubsets), Subsets: endptspkg.SortSubsets(expectedSubsets),
@@ -472,41 +472,41 @@ func TestSyncEndpointsItems(t *testing.T) {
func TestSyncEndpointsItemsWithLabels(t *testing.T) { func TestSyncEndpointsItemsWithLabels(t *testing.T) {
ns := "other" ns := "other"
testServer, endpointsHandler := makeTestServer(t, ns, testServer, endpointsHandler := makeTestServer(t, ns,
serverResponse{http.StatusOK, &api.Endpoints{}}) serverResponse{http.StatusOK, &v1.Endpoints{}})
defer testServer.Close() defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Indexer, ns, 3, 2, 0) addPods(endpoints.podStore.Indexer, ns, 3, 2, 0)
serviceLabels := map[string]string{"foo": "bar"} serviceLabels := map[string]string{"foo": "bar"}
endpoints.serviceStore.Indexer.Add(&api.Service{ endpoints.serviceStore.Indexer.Add(&v1.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
Labels: serviceLabels, Labels: serviceLabels,
}, },
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Selector: map[string]string{"foo": "bar"}, Selector: map[string]string{"foo": "bar"},
Ports: []api.ServicePort{ Ports: []v1.ServicePort{
{Name: "port0", Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}, {Name: "port0", Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
{Name: "port1", Port: 88, Protocol: "TCP", TargetPort: intstr.FromInt(8088)}, {Name: "port1", Port: 88, Protocol: "TCP", TargetPort: intstr.FromInt(8088)},
}, },
}, },
}) })
endpoints.syncService(ns + "/foo") endpoints.syncService(ns + "/foo")
expectedSubsets := []api.EndpointSubset{{ expectedSubsets := []v1.EndpointSubset{{
Addresses: []api.EndpointAddress{ Addresses: []v1.EndpointAddress{
{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}, {IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}},
{IP: "1.2.3.5", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}}, {IP: "1.2.3.5", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}},
{IP: "1.2.3.6", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod2", Namespace: ns}}, {IP: "1.2.3.6", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod2", Namespace: ns}},
}, },
Ports: []api.EndpointPort{ Ports: []v1.EndpointPort{
{Name: "port0", Port: 8080, Protocol: "TCP"}, {Name: "port0", Port: 8080, Protocol: "TCP"},
{Name: "port1", Port: 8088, Protocol: "TCP"}, {Name: "port1", Port: 8088, Protocol: "TCP"},
}, },
}} }}
data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
ResourceVersion: "", ResourceVersion: "",
Labels: serviceLabels, Labels: serviceLabels,
}, },
@@ -520,8 +520,8 @@ func TestSyncEndpointsItemsWithLabels(t *testing.T) {
func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) { func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) {
ns := "bar" ns := "bar"
testServer, endpointsHandler := makeTestServer(t, ns, testServer, endpointsHandler := makeTestServer(t, ns,
serverResponse{http.StatusOK, &api.Endpoints{ serverResponse{http.StatusOK, &v1.Endpoints{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
ResourceVersion: "1", ResourceVersion: "1",
@@ -529,39 +529,39 @@ func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) {
"foo": "bar", "foo": "bar",
}, },
}, },
Subsets: []api.EndpointSubset{{ Subsets: []v1.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}}, Addresses: []v1.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
Ports: []api.EndpointPort{{Port: 1000}}, Ports: []v1.EndpointPort{{Port: 1000}},
}}, }},
}}) }})
defer testServer.Close() defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc) endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
endpoints.podStoreSynced = alwaysReady endpoints.podStoreSynced = alwaysReady
addPods(endpoints.podStore.Indexer, ns, 1, 1, 0) addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
serviceLabels := map[string]string{"baz": "blah"} serviceLabels := map[string]string{"baz": "blah"}
endpoints.serviceStore.Indexer.Add(&api.Service{ endpoints.serviceStore.Indexer.Add(&v1.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
Labels: serviceLabels, Labels: serviceLabels,
}, },
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Selector: map[string]string{"foo": "bar"}, Selector: map[string]string{"foo": "bar"},
Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}}, Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
}, },
}) })
endpoints.syncService(ns + "/foo") endpoints.syncService(ns + "/foo")
data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{ data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo", Name: "foo",
Namespace: ns, Namespace: ns,
ResourceVersion: "1", ResourceVersion: "1",
Labels: serviceLabels, Labels: serviceLabels,
}, },
Subsets: []api.EndpointSubset{{ Subsets: []v1.EndpointSubset{{
Addresses: []api.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}}, Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}}, Ports: []v1.EndpointPort{{Port: 8080, Protocol: "TCP"}},
}}, }},
}) })
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data) endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data)

View File

@@ -232,7 +232,7 @@ func shouldOrphanDependents(e *event, accessor meta.Object) bool {
} }
finalizers := accessor.GetFinalizers() finalizers := accessor.GetFinalizers()
for _, finalizer := range finalizers { for _, finalizer := range finalizers {
if finalizer == api.FinalizerOrphan { if finalizer == v1.FinalizerOrphan {
return true return true
} }
} }
@@ -277,7 +277,7 @@ func (gc *GarbageCollector) removeOrphanFinalizer(owner *node) error {
var newFinalizers []string var newFinalizers []string
found := false found := false
for _, f := range finalizers { for _, f := range finalizers {
if f == api.FinalizerOrphan { if f == v1.FinalizerOrphan {
found = true found = true
break break
} else { } else {
@@ -450,24 +450,24 @@ type GarbageCollector struct {
func gcListWatcher(client *dynamic.Client, resource unversioned.GroupVersionResource) *cache.ListWatch { func gcListWatcher(client *dynamic.Client, resource unversioned.GroupVersionResource) *cache.ListWatch {
return &cache.ListWatch{ return &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
// APIResource.Kind is not used by the dynamic client, so // APIResource.Kind is not used by the dynamic client, so
// leave it empty. We want to list this resource in all // leave it empty. We want to list this resource in all
// namespaces if it's namespace scoped, so leave // namespaces if it's namespace scoped, so leave
// APIResource.Namespaced as false is all right. // APIResource.Namespaced as false is all right.
apiResource := unversioned.APIResource{Name: resource.Resource} apiResource := unversioned.APIResource{Name: resource.Resource}
return client.ParameterCodec(dynamic.VersionedParameterEncoderWithV1Fallback). return client.ParameterCodec(dynamic.VersionedParameterEncoderWithV1Fallback).
Resource(&apiResource, api.NamespaceAll). Resource(&apiResource, v1.NamespaceAll).
List(&options) List(&options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
// APIResource.Kind is not used by the dynamic client, so // APIResource.Kind is not used by the dynamic client, so
// leave it empty. We want to list this resource in all // leave it empty. We want to list this resource in all
// namespaces if it's namespace scoped, so leave // namespaces if it's namespace scoped, so leave
// APIResource.Namespaced as false is all right. // APIResource.Namespaced as false is all right.
apiResource := unversioned.APIResource{Name: resource.Resource} apiResource := unversioned.APIResource{Name: resource.Resource}
return client.ParameterCodec(dynamic.VersionedParameterEncoderWithV1Fallback). return client.ParameterCodec(dynamic.VersionedParameterEncoderWithV1Fallback).
Resource(&apiResource, api.NamespaceAll). Resource(&apiResource, v1.NamespaceAll).
Watch(&options) Watch(&options)
}, },
} }

View File

@@ -27,7 +27,6 @@ import (
_ "k8s.io/kubernetes/pkg/api/install" _ "k8s.io/kubernetes/pkg/api/install"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/meta/metatypes" "k8s.io/kubernetes/pkg/api/meta/metatypes"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
@@ -231,14 +230,14 @@ func verifyGraphInvariants(scenario string, uidToNode map[types.UID]*node, t *te
} }
func createEvent(eventType eventType, selfUID string, owners []string) event { func createEvent(eventType eventType, selfUID string, owners []string) event {
var ownerReferences []api.OwnerReference var ownerReferences []v1.OwnerReference
for i := 0; i < len(owners); i++ { for i := 0; i < len(owners); i++ {
ownerReferences = append(ownerReferences, api.OwnerReference{UID: types.UID(owners[i])}) ownerReferences = append(ownerReferences, v1.OwnerReference{UID: types.UID(owners[i])})
} }
return event{ return event{
eventType: eventType, eventType: eventType,
obj: &api.Pod{ obj: &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
UID: types.UID(selfUID), UID: types.UID(selfUID),
OwnerReferences: ownerReferences, OwnerReferences: ownerReferences,
}, },
@@ -350,8 +349,8 @@ func TestGCListWatcher(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
lw := gcListWatcher(client, podResource) lw := gcListWatcher(client, podResource)
lw.Watch(api.ListOptions{ResourceVersion: "1"}) lw.Watch(v1.ListOptions{ResourceVersion: "1"})
lw.List(api.ListOptions{ResourceVersion: "1"}) lw.List(v1.ListOptions{ResourceVersion: "1"})
if e, a := 2, len(testHandler.actions); e != a { if e, a := 2, len(testHandler.actions); e != a {
t.Errorf("expect %d requests, got %d", e, a) t.Errorf("expect %d requests, got %d", e, a)
} }

View File

@@ -24,13 +24,14 @@ package metaonly
import ( import (
"errors" "errors"
"fmt" "fmt"
"reflect"
"runtime"
time "time"
codec1978 "github.com/ugorji/go/codec" codec1978 "github.com/ugorji/go/codec"
pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned" pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
pkg2_v1 "k8s.io/kubernetes/pkg/api/v1" pkg2_v1 "k8s.io/kubernetes/pkg/api/v1"
pkg3_types "k8s.io/kubernetes/pkg/types" pkg3_types "k8s.io/kubernetes/pkg/types"
"reflect"
"runtime"
time "time"
) )
const ( const (

View File

@@ -20,11 +20,11 @@ import (
"reflect" "reflect"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/batch" batch "k8s.io/kubernetes/pkg/apis/batch/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
batchinternallisters "k8s.io/kubernetes/pkg/client/listers/batch/internalversion" batchv1listers "k8s.io/kubernetes/pkg/client/listers/batch/v1"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
) )
@@ -33,7 +33,7 @@ import (
// Interface provides constructor for informer and lister for jobs // Interface provides constructor for informer and lister for jobs
type JobInformer interface { type JobInformer interface {
Informer() cache.SharedIndexInformer Informer() cache.SharedIndexInformer
Lister() batchinternallisters.JobLister Lister() batchv1listers.JobLister
} }
type jobInformer struct { type jobInformer struct {
@@ -61,11 +61,11 @@ func (f *jobInformer) Informer() cache.SharedIndexInformer {
func NewJobInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { func NewJobInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer( sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return client.Batch().Jobs(api.NamespaceAll).List(options) return client.Batch().Jobs(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return client.Batch().Jobs(api.NamespaceAll).Watch(options) return client.Batch().Jobs(v1.NamespaceAll).Watch(options)
}, },
}, },
&batch.Job{}, &batch.Job{},
@@ -77,7 +77,7 @@ func NewJobInformer(client clientset.Interface, resyncPeriod time.Duration) cach
} }
// Lister returns lister for jobInformer // Lister returns lister for jobInformer
func (f *jobInformer) Lister() batchinternallisters.JobLister { func (f *jobInformer) Lister() batchv1listers.JobLister {
informer := f.Informer() informer := f.Informer()
return batchinternallisters.NewJobLister(informer.GetIndexer()) return batchv1listers.NewJobLister(informer.GetIndexer())
} }

View File

@@ -21,8 +21,10 @@ import (
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
coreinternallisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion" coreinternallisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
@@ -45,7 +47,7 @@ func (f *podInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
informerType := reflect.TypeOf(&api.Pod{}) informerType := reflect.TypeOf(&v1.Pod{})
informer, exists := f.informers[informerType] informer, exists := f.informers[informerType]
if exists { if exists {
return informer return informer
@@ -81,7 +83,7 @@ func (f *namespaceInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
informerType := reflect.TypeOf(&api.Namespace{}) informerType := reflect.TypeOf(&v1.Namespace{})
informer, exists := f.informers[informerType] informer, exists := f.informers[informerType]
if exists { if exists {
return informer return informer
@@ -100,6 +102,42 @@ func (f *namespaceInformer) Lister() *cache.IndexerToNamespaceLister {
//***************************************************************************** //*****************************************************************************
// InternalNamespaceInformer is type of SharedIndexInformer which watches and lists all namespaces.
// Interface provides constructor for informer and lister for namsespaces
type InternalNamespaceInformer interface {
Informer() cache.SharedIndexInformer
Lister() coreinternallisters.NamespaceLister
}
type internalNamespaceInformer struct {
*sharedInformerFactory
}
// Informer checks whether internalNamespaceInformer exists in sharedInformerFactory and if not, it creates new informer of type
// internalNamespaceInformer and connects it to sharedInformerFactory
func (f *internalNamespaceInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(&api.Namespace{})
informer, exists := f.informers[informerType]
if exists {
return informer
}
informer = NewInternalNamespaceInformer(f.internalclient, f.defaultResync)
f.informers[informerType] = informer
return informer
}
// Lister returns lister for internalNamespaceInformer
func (f *internalNamespaceInformer) Lister() coreinternallisters.NamespaceLister {
informer := f.Informer()
return coreinternallisters.NewNamespaceLister(informer.GetIndexer())
}
//*****************************************************************************
// NodeInformer is type of SharedIndexInformer which watches and lists all nodes. // NodeInformer is type of SharedIndexInformer which watches and lists all nodes.
// Interface provides constructor for informer and lister for nodes // Interface provides constructor for informer and lister for nodes
type NodeInformer interface { type NodeInformer interface {
@@ -117,7 +155,7 @@ func (f *nodeInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
informerType := reflect.TypeOf(&api.Node{}) informerType := reflect.TypeOf(&v1.Node{})
informer, exists := f.informers[informerType] informer, exists := f.informers[informerType]
if exists { if exists {
return informer return informer
@@ -153,7 +191,7 @@ func (f *pvcInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
informerType := reflect.TypeOf(&api.PersistentVolumeClaim{}) informerType := reflect.TypeOf(&v1.PersistentVolumeClaim{})
informer, exists := f.informers[informerType] informer, exists := f.informers[informerType]
if exists { if exists {
return informer return informer
@@ -189,7 +227,7 @@ func (f *pvInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
informerType := reflect.TypeOf(&api.PersistentVolume{}) informerType := reflect.TypeOf(&v1.PersistentVolume{})
informer, exists := f.informers[informerType] informer, exists := f.informers[informerType]
if exists { if exists {
return informer return informer
@@ -212,7 +250,7 @@ func (f *pvInformer) Lister() *cache.StoreToPVFetcher {
// Interface provides constructor for informer and lister for limit ranges. // Interface provides constructor for informer and lister for limit ranges.
type LimitRangeInformer interface { type LimitRangeInformer interface {
Informer() cache.SharedIndexInformer Informer() cache.SharedIndexInformer
Lister() coreinternallisters.LimitRangeLister Lister() *cache.StoreToLimitRangeLister
} }
type limitRangeInformer struct { type limitRangeInformer struct {
@@ -225,7 +263,7 @@ func (f *limitRangeInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
informerType := reflect.TypeOf(&api.LimitRange{}) informerType := reflect.TypeOf(&v1.LimitRange{})
informer, exists := f.informers[informerType] informer, exists := f.informers[informerType]
if exists { if exists {
return informer return informer
@@ -237,23 +275,61 @@ func (f *limitRangeInformer) Informer() cache.SharedIndexInformer {
} }
// Lister returns lister for limitRangeInformer // Lister returns lister for limitRangeInformer
func (f *limitRangeInformer) Lister() coreinternallisters.LimitRangeLister { func (f *limitRangeInformer) Lister() *cache.StoreToLimitRangeLister {
informer := f.Informer()
return &cache.StoreToLimitRangeLister{Indexer: informer.GetIndexer()}
}
//*****************************************************************************
// InternalLimitRangeInformer is type of SharedIndexInformer which watches and lists all limit ranges.
// Interface provides constructor for informer and lister for limit ranges.
type InternalLimitRangeInformer interface {
Informer() cache.SharedIndexInformer
Lister() coreinternallisters.LimitRangeLister
}
type internalLimitRangeInformer struct {
*sharedInformerFactory
}
// Informer checks whether pvcInformer exists in sharedInformerFactory and if not, it creates new informer of type
// internalLimitRangeInformer and connects it to sharedInformerFactory
func (f *internalLimitRangeInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(&api.LimitRange{})
informer, exists := f.informers[informerType]
if exists {
return informer
}
informer = NewInternalLimitRangeInformer(f.internalclient, f.defaultResync)
f.informers[informerType] = informer
return informer
}
// Lister returns lister for internalLimitRangeInformer
func (f *internalLimitRangeInformer) Lister() coreinternallisters.LimitRangeLister {
informer := f.Informer() informer := f.Informer()
return coreinternallisters.NewLimitRangeLister(informer.GetIndexer()) return coreinternallisters.NewLimitRangeLister(informer.GetIndexer())
} }
//*****************************************************************************
// NewPodInformer returns a SharedIndexInformer that lists and watches all pods // NewPodInformer returns a SharedIndexInformer that lists and watches all pods
func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer( sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return client.Core().Pods(api.NamespaceAll).List(options) return client.Core().Pods(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return client.Core().Pods(api.NamespaceAll).Watch(options) return client.Core().Pods(v1.NamespaceAll).Watch(options)
}, },
}, },
&api.Pod{}, &v1.Pod{},
resyncPeriod, resyncPeriod,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
) )
@@ -265,14 +341,14 @@ func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) cach
func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer( sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return client.Core().Nodes().List(options) return client.Core().Nodes().List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return client.Core().Nodes().Watch(options) return client.Core().Nodes().Watch(options)
}, },
}, },
&api.Node{}, &v1.Node{},
resyncPeriod, resyncPeriod,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
@@ -283,14 +359,14 @@ func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) cac
func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer( sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return client.Core().PersistentVolumeClaims(api.NamespaceAll).List(options) return client.Core().PersistentVolumeClaims(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return client.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options) return client.Core().PersistentVolumeClaims(v1.NamespaceAll).Watch(options)
}, },
}, },
&api.PersistentVolumeClaim{}, &v1.PersistentVolumeClaim{},
resyncPeriod, resyncPeriod,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
) )
@@ -302,14 +378,14 @@ func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) cach
func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer( sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return client.Core().PersistentVolumes().List(options) return client.Core().PersistentVolumes().List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return client.Core().PersistentVolumes().Watch(options) return client.Core().PersistentVolumes().Watch(options)
}, },
}, },
&api.PersistentVolume{}, &v1.PersistentVolume{},
resyncPeriod, resyncPeriod,
cache.Indexers{}) cache.Indexers{})
@@ -320,13 +396,35 @@ func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) cache
func NewNamespaceInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { func NewNamespaceInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer( sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return client.Core().Namespaces().List(options) return client.Core().Namespaces().List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return client.Core().Namespaces().Watch(options) return client.Core().Namespaces().Watch(options)
}, },
}, },
&v1.Namespace{},
resyncPeriod,
cache.Indexers{})
return sharedIndexInformer
}
// NewInternalNamespaceInformer returns a SharedIndexInformer that lists and watches namespaces
func NewInternalNamespaceInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
internalOptions := api.ListOptions{}
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
return client.Core().Namespaces().List(internalOptions)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
internalOptions := api.ListOptions{}
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
return client.Core().Namespaces().Watch(internalOptions)
},
},
&api.Namespace{}, &api.Namespace{},
resyncPeriod, resyncPeriod,
cache.Indexers{}) cache.Indexers{})
@@ -338,11 +436,33 @@ func NewNamespaceInformer(client clientset.Interface, resyncPeriod time.Duration
func NewLimitRangeInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { func NewLimitRangeInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer( sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return client.Core().LimitRanges(api.NamespaceAll).List(options) return client.Core().LimitRanges(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return client.Core().LimitRanges(api.NamespaceAll).Watch(options) return client.Core().LimitRanges(v1.NamespaceAll).Watch(options)
},
},
&v1.LimitRange{},
resyncPeriod,
cache.Indexers{})
return sharedIndexInformer
}
// NewInternalLimitRangeInformer returns a SharedIndexInformer that lists and watches all LimitRanges
func NewInternalLimitRangeInformer(internalclient internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
internalOptions := api.ListOptions{}
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
return internalclient.Core().LimitRanges(v1.NamespaceAll).List(internalOptions)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
internalOptions := api.ListOptions{}
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
return internalclient.Core().LimitRanges(v1.NamespaceAll).Watch(internalOptions)
}, },
}, },
&api.LimitRange{}, &api.LimitRange{},
@@ -371,7 +491,7 @@ func (f *serviceAccountInformer) Informer() cache.SharedIndexInformer {
f.lock.Lock() f.lock.Lock()
defer f.lock.Unlock() defer f.lock.Unlock()
informerType := reflect.TypeOf(&api.ServiceAccount{}) informerType := reflect.TypeOf(&v1.ServiceAccount{})
informer, exists := f.informers[informerType] informer, exists := f.informers[informerType]
if exists { if exists {
return informer return informer
@@ -392,14 +512,14 @@ func (f *serviceAccountInformer) Lister() *cache.StoreToServiceAccountLister {
func NewServiceAccountInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { func NewServiceAccountInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer( sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return client.Core().ServiceAccounts(api.NamespaceAll).List(options) return client.Core().ServiceAccounts(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return client.Core().ServiceAccounts(api.NamespaceAll).Watch(options) return client.Core().ServiceAccounts(v1.NamespaceAll).Watch(options)
}, },
}, },
&api.ServiceAccount{}, &v1.ServiceAccount{},
resyncPeriod, resyncPeriod,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}) cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})

View File

@@ -20,8 +20,8 @@ import (
"reflect" "reflect"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
@@ -49,11 +49,11 @@ func (f *daemonSetInformer) Informer() cache.SharedIndexInformer {
} }
informer = cache.NewSharedIndexInformer( informer = cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return f.client.Extensions().DaemonSets(api.NamespaceAll).List(options) return f.client.Extensions().DaemonSets(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return f.client.Extensions().DaemonSets(api.NamespaceAll).Watch(options) return f.client.Extensions().DaemonSets(v1.NamespaceAll).Watch(options)
}, },
}, },
&extensions.DaemonSet{}, &extensions.DaemonSet{},
@@ -91,11 +91,11 @@ func (f *deploymentInformer) Informer() cache.SharedIndexInformer {
} }
informer = cache.NewSharedIndexInformer( informer = cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return f.client.Extensions().Deployments(api.NamespaceAll).List(options) return f.client.Extensions().Deployments(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return f.client.Extensions().Deployments(api.NamespaceAll).Watch(options) return f.client.Extensions().Deployments(v1.NamespaceAll).Watch(options)
}, },
}, },
&extensions.Deployment{}, &extensions.Deployment{},
@@ -135,11 +135,11 @@ func (f *replicaSetInformer) Informer() cache.SharedIndexInformer {
} }
informer = cache.NewSharedIndexInformer( informer = cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return f.client.Extensions().ReplicaSets(api.NamespaceAll).List(options) return f.client.Extensions().ReplicaSets(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return f.client.Extensions().ReplicaSets(api.NamespaceAll).Watch(options) return f.client.Extensions().ReplicaSets(v1.NamespaceAll).Watch(options)
}, },
}, },
&extensions.ReplicaSet{}, &extensions.ReplicaSet{},

View File

@@ -23,7 +23,8 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
) )
// SharedInformerFactory provides interface which holds unique informers for pods, nodes, namespaces, persistent volume // SharedInformerFactory provides interface which holds unique informers for pods, nodes, namespaces, persistent volume
@@ -38,7 +39,9 @@ type SharedInformerFactory interface {
Pods() PodInformer Pods() PodInformer
LimitRanges() LimitRangeInformer LimitRanges() LimitRangeInformer
InternalLimitRanges() InternalLimitRangeInformer
Namespaces() NamespaceInformer Namespaces() NamespaceInformer
InternalNamespaces() InternalNamespaceInformer
Nodes() NodeInformer Nodes() NodeInformer
PersistentVolumeClaims() PVCInformer PersistentVolumeClaims() PVCInformer
PersistentVolumes() PVInformer PersistentVolumes() PVInformer
@@ -59,9 +62,11 @@ type SharedInformerFactory interface {
} }
type sharedInformerFactory struct { type sharedInformerFactory struct {
client clientset.Interface client clientset.Interface
lock sync.Mutex // for admission plugins etc.
defaultResync time.Duration internalclient internalclientset.Interface
lock sync.Mutex
defaultResync time.Duration
informers map[reflect.Type]cache.SharedIndexInformer informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started // startedInformers is used for tracking which informers have been started
@@ -70,9 +75,10 @@ type sharedInformerFactory struct {
} }
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory // NewSharedInformerFactory constructs a new instance of sharedInformerFactory
func NewSharedInformerFactory(client clientset.Interface, defaultResync time.Duration) SharedInformerFactory { func NewSharedInformerFactory(client clientset.Interface, internalclient internalclientset.Interface, defaultResync time.Duration) SharedInformerFactory {
return &sharedInformerFactory{ return &sharedInformerFactory{
client: client, client: client,
internalclient: internalclient,
defaultResync: defaultResync, defaultResync: defaultResync,
informers: make(map[reflect.Type]cache.SharedIndexInformer), informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool), startedInformers: make(map[reflect.Type]bool),
@@ -107,6 +113,11 @@ func (f *sharedInformerFactory) Namespaces() NamespaceInformer {
return &namespaceInformer{sharedInformerFactory: f} return &namespaceInformer{sharedInformerFactory: f}
} }
// InternalNamespaces returns a SharedIndexInformer that lists and watches all namespaces
func (f *sharedInformerFactory) InternalNamespaces() InternalNamespaceInformer {
return &internalNamespaceInformer{sharedInformerFactory: f}
}
// PersistentVolumeClaims returns a SharedIndexInformer that lists and watches all persistent volume claims // PersistentVolumeClaims returns a SharedIndexInformer that lists and watches all persistent volume claims
func (f *sharedInformerFactory) PersistentVolumeClaims() PVCInformer { func (f *sharedInformerFactory) PersistentVolumeClaims() PVCInformer {
return &pvcInformer{sharedInformerFactory: f} return &pvcInformer{sharedInformerFactory: f}
@@ -156,6 +167,11 @@ func (f *sharedInformerFactory) LimitRanges() LimitRangeInformer {
return &limitRangeInformer{sharedInformerFactory: f} return &limitRangeInformer{sharedInformerFactory: f}
} }
// InternalLimitRanges returns a SharedIndexInformer that lists and watches all limit ranges.
func (f *sharedInformerFactory) InternalLimitRanges() InternalLimitRangeInformer {
return &internalLimitRangeInformer{sharedInformerFactory: f}
}
// StorageClasses returns a SharedIndexInformer that lists and watches all storage classes // StorageClasses returns a SharedIndexInformer that lists and watches all storage classes
func (f *sharedInformerFactory) StorageClasses() StorageClassInformer { func (f *sharedInformerFactory) StorageClasses() StorageClassInformer {
return &storageClassInformer{sharedInformerFactory: f} return &storageClassInformer{sharedInformerFactory: f}

View File

@@ -22,8 +22,8 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/apis/batch"
"k8s.io/kubernetes/pkg/apis/extensions" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/rbac" rbacinternal "k8s.io/kubernetes/pkg/apis/rbac"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
) )
@@ -53,20 +53,20 @@ func (f *sharedInformerFactory) ForResource(resource unversioned.GroupResource)
case api.Resource("serviceaccounts"): case api.Resource("serviceaccounts"):
return &genericInformer{resource: resource, informer: f.ServiceAccounts().Informer()}, nil return &genericInformer{resource: resource, informer: f.ServiceAccounts().Informer()}, nil
case extensions.Resource("daemonsets"): case extensionsinternal.Resource("daemonsets"):
return &genericInformer{resource: resource, informer: f.DaemonSets().Informer()}, nil return &genericInformer{resource: resource, informer: f.DaemonSets().Informer()}, nil
case extensions.Resource("deployments"): case extensionsinternal.Resource("deployments"):
return &genericInformer{resource: resource, informer: f.Deployments().Informer()}, nil return &genericInformer{resource: resource, informer: f.Deployments().Informer()}, nil
case extensions.Resource("replicasets"): case extensionsinternal.Resource("replicasets"):
return &genericInformer{resource: resource, informer: f.ReplicaSets().Informer()}, nil return &genericInformer{resource: resource, informer: f.ReplicaSets().Informer()}, nil
case rbac.Resource("clusterrolebindings"): case rbacinternal.Resource("clusterrolebindings"):
return &genericInformer{resource: resource, informer: f.ClusterRoleBindings().Informer()}, nil return &genericInformer{resource: resource, informer: f.ClusterRoleBindings().Informer()}, nil
case rbac.Resource("clusterroles"): case rbacinternal.Resource("clusterroles"):
return &genericInformer{resource: resource, informer: f.ClusterRoles().Informer()}, nil return &genericInformer{resource: resource, informer: f.ClusterRoles().Informer()}, nil
case rbac.Resource("rolebindings"): case rbacinternal.Resource("rolebindings"):
return &genericInformer{resource: resource, informer: f.RoleBindings().Informer()}, nil return &genericInformer{resource: resource, informer: f.RoleBindings().Informer()}, nil
case rbac.Resource("roles"): case rbacinternal.Resource("roles"):
return &genericInformer{resource: resource, informer: f.Roles().Informer()}, nil return &genericInformer{resource: resource, informer: f.Roles().Informer()}, nil
case batch.Resource("jobs"): case batch.Resource("jobs"):

View File

@@ -19,8 +19,8 @@ package informers
import ( import (
"reflect" "reflect"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/rbac" rbac "k8s.io/kubernetes/pkg/apis/rbac"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
@@ -46,10 +46,10 @@ func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer {
} }
informer = cache.NewSharedIndexInformer( informer = cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return f.client.Rbac().ClusterRoles().List(options) return f.client.Rbac().ClusterRoles().List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return f.client.Rbac().ClusterRoles().Watch(options) return f.client.Rbac().ClusterRoles().Watch(options)
}, },
}, },
@@ -86,10 +86,10 @@ func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer {
} }
informer = cache.NewSharedIndexInformer( informer = cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return f.client.Rbac().ClusterRoleBindings().List(options) return f.client.Rbac().ClusterRoleBindings().List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return f.client.Rbac().ClusterRoleBindings().Watch(options) return f.client.Rbac().ClusterRoleBindings().Watch(options)
}, },
}, },
@@ -126,11 +126,11 @@ func (f *roleInformer) Informer() cache.SharedIndexInformer {
} }
informer = cache.NewSharedIndexInformer( informer = cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return f.client.Rbac().Roles(api.NamespaceAll).List(options) return f.client.Rbac().Roles(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return f.client.Rbac().Roles(api.NamespaceAll).Watch(options) return f.client.Rbac().Roles(v1.NamespaceAll).Watch(options)
}, },
}, },
&rbac.Role{}, &rbac.Role{},
@@ -166,11 +166,11 @@ func (f *roleBindingInformer) Informer() cache.SharedIndexInformer {
} }
informer = cache.NewSharedIndexInformer( informer = cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return f.client.Rbac().RoleBindings(api.NamespaceAll).List(options) return f.client.Rbac().RoleBindings(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return f.client.Rbac().RoleBindings(api.NamespaceAll).Watch(options) return f.client.Rbac().RoleBindings(v1.NamespaceAll).Watch(options)
}, },
}, },
&rbac.RoleBinding{}, &rbac.RoleBinding{},

View File

@@ -19,8 +19,8 @@ package informers
import ( import (
"reflect" "reflect"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/storage" storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/watch" "k8s.io/kubernetes/pkg/watch"
@@ -48,10 +48,10 @@ func (f *storageClassInformer) Informer() cache.SharedIndexInformer {
} }
informer = cache.NewSharedIndexInformer( informer = cache.NewSharedIndexInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return f.client.Storage().StorageClasses().List(options) return f.client.Storage().StorageClasses().List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return f.client.Storage().StorageClasses().Watch(options) return f.client.Storage().StorageClasses().Watch(options)
}, },
}, },

View File

@@ -23,14 +23,14 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/batch" "k8s.io/kubernetes/pkg/api/v1"
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
batchinternallisters "k8s.io/kubernetes/pkg/client/listers/batch/internalversion" batchv1listers "k8s.io/kubernetes/pkg/client/listers/batch/v1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/controller/informers"
@@ -60,7 +60,7 @@ type JobController struct {
expectations controller.ControllerExpectationsInterface expectations controller.ControllerExpectationsInterface
// A store of jobs // A store of jobs
jobLister batchinternallisters.JobLister jobLister batchv1listers.JobLister
// A store of pods, populated by the podController // A store of pods, populated by the podController
podStore cache.StoreToPodLister podStore cache.StoreToPodLister
@@ -75,7 +75,7 @@ func NewJobController(podInformer cache.SharedIndexInformer, jobInformer informe
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset. // TODO: remove the wrapper when every clients have moved to use the clientset.
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("job_controller", kubeClient.Core().RESTClient().GetRateLimiter()) metrics.RegisterMetricAndTrackRateLimiterUsage("job_controller", kubeClient.Core().RESTClient().GetRateLimiter())
@@ -85,11 +85,11 @@ func NewJobController(podInformer cache.SharedIndexInformer, jobInformer informe
kubeClient: kubeClient, kubeClient: kubeClient,
podControl: controller.RealPodControl{ podControl: controller.RealPodControl{
KubeClient: kubeClient, KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}), Recorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "job-controller"}),
}, },
expectations: controller.NewControllerExpectations(), expectations: controller.NewControllerExpectations(),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "job"), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "job"),
recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}), recorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "job-controller"}),
} }
jobInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ jobInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
@@ -135,7 +135,7 @@ func (jm *JobController) Run(workers int, stopCh <-chan struct{}) {
} }
// getPodJob returns the job managing the given pod. // getPodJob returns the job managing the given pod.
func (jm *JobController) getPodJob(pod *api.Pod) *batch.Job { func (jm *JobController) getPodJob(pod *v1.Pod) *batch.Job {
jobs, err := jm.jobLister.GetPodJobs(pod) jobs, err := jm.jobLister.GetPodJobs(pod)
if err != nil { if err != nil {
glog.V(4).Infof("No jobs found for pod %v, job controller will avoid syncing", pod.Name) glog.V(4).Infof("No jobs found for pod %v, job controller will avoid syncing", pod.Name)
@@ -150,7 +150,7 @@ func (jm *JobController) getPodJob(pod *api.Pod) *batch.Job {
// When a pod is created, enqueue the controller that manages it and update it's expectations. // When a pod is created, enqueue the controller that manages it and update it's expectations.
func (jm *JobController) addPod(obj interface{}) { func (jm *JobController) addPod(obj interface{}) {
pod := obj.(*api.Pod) pod := obj.(*v1.Pod)
if pod.DeletionTimestamp != nil { if pod.DeletionTimestamp != nil {
// on a restart of the controller controller, it's possible a new pod shows up in a state that // on a restart of the controller controller, it's possible a new pod shows up in a state that
// is already pending deletion. Prevent the pod from being a creation observation. // is already pending deletion. Prevent the pod from being a creation observation.
@@ -170,10 +170,10 @@ func (jm *JobController) addPod(obj interface{}) {
// When a pod is updated, figure out what job/s manage it and wake them up. // When a pod is updated, figure out what job/s manage it and wake them up.
// If the labels of the pod have changed we need to awaken both the old // If the labels of the pod have changed we need to awaken both the old
// and new job. old and cur must be *api.Pod types. // and new job. old and cur must be *v1.Pod types.
func (jm *JobController) updatePod(old, cur interface{}) { func (jm *JobController) updatePod(old, cur interface{}) {
curPod := cur.(*api.Pod) curPod := cur.(*v1.Pod)
oldPod := old.(*api.Pod) oldPod := old.(*v1.Pod)
if curPod.ResourceVersion == oldPod.ResourceVersion { if curPod.ResourceVersion == oldPod.ResourceVersion {
// Periodic resync will send update events for all known pods. // Periodic resync will send update events for all known pods.
// Two different versions of the same pod will always have different RVs. // Two different versions of the same pod will always have different RVs.
@@ -201,9 +201,9 @@ func (jm *JobController) updatePod(old, cur interface{}) {
} }
// When a pod is deleted, enqueue the job that manages the pod and update its expectations. // When a pod is deleted, enqueue the job that manages the pod and update its expectations.
// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item. // obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
func (jm *JobController) deletePod(obj interface{}) { func (jm *JobController) deletePod(obj interface{}) {
pod, ok := obj.(*api.Pod) pod, ok := obj.(*v1.Pod)
// When a delete is dropped, the relist will notice a pod in the store not // When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains // in the list, leading to the insertion of a tombstone object which contains
@@ -215,7 +215,7 @@ func (jm *JobController) deletePod(obj interface{}) {
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %+v", obj)) utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %+v", obj))
return return
} }
pod, ok = tombstone.Obj.(*api.Pod) pod, ok = tombstone.Obj.(*v1.Pod)
if !ok { if !ok {
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a pod %+v", obj)) utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a pod %+v", obj))
return return
@@ -346,7 +346,7 @@ func (jm *JobController) syncJob(key string) error {
failed += active failed += active
active = 0 active = 0
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline")) job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline"))
jm.recorder.Event(&job, api.EventTypeNormal, "DeadlineExceeded", "Job was active longer than specified deadline") jm.recorder.Event(&job, v1.EventTypeNormal, "DeadlineExceeded", "Job was active longer than specified deadline")
} else { } else {
if jobNeedsSync && job.DeletionTimestamp == nil { if jobNeedsSync && job.DeletionTimestamp == nil {
active = jm.manageJob(activePods, succeeded, &job) active = jm.manageJob(activePods, succeeded, &job)
@@ -371,10 +371,10 @@ func (jm *JobController) syncJob(key string) error {
if completions >= *job.Spec.Completions { if completions >= *job.Spec.Completions {
complete = true complete = true
if active > 0 { if active > 0 {
jm.recorder.Event(&job, api.EventTypeWarning, "TooManyActivePods", "Too many active pods running after completion count reached") jm.recorder.Event(&job, v1.EventTypeWarning, "TooManyActivePods", "Too many active pods running after completion count reached")
} }
if completions > *job.Spec.Completions { if completions > *job.Spec.Completions {
jm.recorder.Event(&job, api.EventTypeWarning, "TooManySucceededPods", "Too many succeeded pods running after completion count reached") jm.recorder.Event(&job, v1.EventTypeWarning, "TooManySucceededPods", "Too many succeeded pods running after completion count reached")
} }
} }
} }
@@ -413,7 +413,7 @@ func pastActiveDeadline(job *batch.Job) bool {
func newCondition(conditionType batch.JobConditionType, reason, message string) batch.JobCondition { func newCondition(conditionType batch.JobConditionType, reason, message string) batch.JobCondition {
return batch.JobCondition{ return batch.JobCondition{
Type: conditionType, Type: conditionType,
Status: api.ConditionTrue, Status: v1.ConditionTrue,
LastProbeTime: unversioned.Now(), LastProbeTime: unversioned.Now(),
LastTransitionTime: unversioned.Now(), LastTransitionTime: unversioned.Now(),
Reason: reason, Reason: reason,
@@ -422,16 +422,16 @@ func newCondition(conditionType batch.JobConditionType, reason, message string)
} }
// getStatus returns no of succeeded and failed pods running a job // getStatus returns no of succeeded and failed pods running a job
func getStatus(pods []*api.Pod) (succeeded, failed int32) { func getStatus(pods []*v1.Pod) (succeeded, failed int32) {
succeeded = int32(filterPods(pods, api.PodSucceeded)) succeeded = int32(filterPods(pods, v1.PodSucceeded))
failed = int32(filterPods(pods, api.PodFailed)) failed = int32(filterPods(pods, v1.PodFailed))
return return
} }
// manageJob is the core method responsible for managing the number of running // manageJob is the core method responsible for managing the number of running
// pods according to what is specified in the job.Spec. // pods according to what is specified in the job.Spec.
// Does NOT modify <activePods>. // Does NOT modify <activePods>.
func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int32, job *batch.Job) int32 { func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *batch.Job) int32 {
var activeLock sync.Mutex var activeLock sync.Mutex
active := int32(len(activePods)) active := int32(len(activePods))
parallelism := *job.Spec.Parallelism parallelism := *job.Spec.Parallelism
@@ -523,7 +523,7 @@ func (jm *JobController) updateJobStatus(job *batch.Job) error {
} }
// filterPods returns pods based on their phase. // filterPods returns pods based on their phase.
func filterPods(pods []*api.Pod, phase api.PodPhase) int { func filterPods(pods []*v1.Pod, phase v1.PodPhase) int {
result := 0 result := 0
for i := range pods { for i := range pods {
if phase == pods[i].Status.Phase { if phase == pods[i].Status.Phase {

View File

@@ -21,13 +21,13 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/batch" batch "k8s.io/kubernetes/pkg/apis/batch/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
@@ -41,22 +41,22 @@ var alwaysReady = func() bool { return true }
func newJob(parallelism, completions int32) *batch.Job { func newJob(parallelism, completions int32) *batch.Job {
j := &batch.Job{ j := &batch.Job{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foobar", Name: "foobar",
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
}, },
Spec: batch.JobSpec{ Spec: batch.JobSpec{
Selector: &unversioned.LabelSelector{ Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"}, MatchLabels: map[string]string{"foo": "bar"},
}, },
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{ Labels: map[string]string{
"foo": "bar", "foo": "bar",
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{Image: "foo/bar"}, {Image: "foo/bar"},
}, },
}, },
@@ -88,23 +88,23 @@ func getKey(job *batch.Job, t *testing.T) string {
} }
func newJobControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) (*JobController, informers.SharedInformerFactory) { func newJobControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) (*JobController, informers.SharedInformerFactory) {
sharedInformers := informers.NewSharedInformerFactory(kubeClient, resyncPeriod()) sharedInformers := informers.NewSharedInformerFactory(kubeClient, nil, resyncPeriod())
jm := NewJobController(sharedInformers.Pods().Informer(), sharedInformers.Jobs(), kubeClient) jm := NewJobController(sharedInformers.Pods().Informer(), sharedInformers.Jobs(), kubeClient)
return jm, sharedInformers return jm, sharedInformers
} }
// create count pods with the given phase for the given job // create count pods with the given phase for the given job
func newPodList(count int32, status api.PodPhase, job *batch.Job) []api.Pod { func newPodList(count int32, status v1.PodPhase, job *batch.Job) []v1.Pod {
pods := []api.Pod{} pods := []v1.Pod{}
for i := int32(0); i < count; i++ { for i := int32(0); i < count; i++ {
newPod := api.Pod{ newPod := v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("pod-%v", rand.String(10)), Name: fmt.Sprintf("pod-%v", rand.String(10)),
Labels: job.Spec.Selector.MatchLabels, Labels: job.Spec.Selector.MatchLabels,
Namespace: job.Namespace, Namespace: job.Namespace,
}, },
Status: api.PodStatus{Phase: status}, Status: v1.PodStatus{Phase: status},
} }
pods = append(pods, newPod) pods = append(pods, newPod)
} }
@@ -227,7 +227,7 @@ func TestControllerSyncJob(t *testing.T) {
for name, tc := range testCases { for name, tc := range testCases {
// job manager setup // job manager setup
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{Err: tc.podControllerError} fakePodControl := controller.FakePodControl{Err: tc.podControllerError}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@@ -247,16 +247,16 @@ func TestControllerSyncJob(t *testing.T) {
} }
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job) sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job)
podIndexer := sharedInformerFactory.Pods().Informer().GetIndexer() podIndexer := sharedInformerFactory.Pods().Informer().GetIndexer()
for _, pod := range newPodList(tc.pendingPods, api.PodPending, job) { for _, pod := range newPodList(tc.pendingPods, v1.PodPending, job) {
podIndexer.Add(&pod) podIndexer.Add(&pod)
} }
for _, pod := range newPodList(tc.activePods, api.PodRunning, job) { for _, pod := range newPodList(tc.activePods, v1.PodRunning, job) {
podIndexer.Add(&pod) podIndexer.Add(&pod)
} }
for _, pod := range newPodList(tc.succeededPods, api.PodSucceeded, job) { for _, pod := range newPodList(tc.succeededPods, v1.PodSucceeded, job) {
podIndexer.Add(&pod) podIndexer.Add(&pod)
} }
for _, pod := range newPodList(tc.failedPods, api.PodFailed, job) { for _, pod := range newPodList(tc.failedPods, v1.PodFailed, job) {
podIndexer.Add(&pod) podIndexer.Add(&pod)
} }
@@ -331,7 +331,7 @@ func TestSyncJobPastDeadline(t *testing.T) {
for name, tc := range testCases { for name, tc := range testCases {
// job manager setup // job manager setup
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@@ -350,13 +350,13 @@ func TestSyncJobPastDeadline(t *testing.T) {
job.Status.StartTime = &start job.Status.StartTime = &start
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job) sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job)
podIndexer := sharedInformerFactory.Pods().Informer().GetIndexer() podIndexer := sharedInformerFactory.Pods().Informer().GetIndexer()
for _, pod := range newPodList(tc.activePods, api.PodRunning, job) { for _, pod := range newPodList(tc.activePods, v1.PodRunning, job) {
podIndexer.Add(&pod) podIndexer.Add(&pod)
} }
for _, pod := range newPodList(tc.succeededPods, api.PodSucceeded, job) { for _, pod := range newPodList(tc.succeededPods, v1.PodSucceeded, job) {
podIndexer.Add(&pod) podIndexer.Add(&pod)
} }
for _, pod := range newPodList(tc.failedPods, api.PodFailed, job) { for _, pod := range newPodList(tc.failedPods, v1.PodFailed, job) {
podIndexer.Add(&pod) podIndexer.Add(&pod)
} }
@@ -395,7 +395,7 @@ func TestSyncJobPastDeadline(t *testing.T) {
func getCondition(job *batch.Job, condition batch.JobConditionType) bool { func getCondition(job *batch.Job, condition batch.JobConditionType) bool {
for _, v := range job.Status.Conditions { for _, v := range job.Status.Conditions {
if v.Type == condition && v.Status == api.ConditionTrue { if v.Type == condition && v.Status == v1.ConditionTrue {
return true return true
} }
} }
@@ -403,7 +403,7 @@ func getCondition(job *batch.Job, condition batch.JobConditionType) bool {
} }
func TestSyncPastDeadlineJobFinished(t *testing.T) { func TestSyncPastDeadlineJobFinished(t *testing.T) {
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@@ -438,7 +438,7 @@ func TestSyncPastDeadlineJobFinished(t *testing.T) {
} }
func TestSyncJobComplete(t *testing.T) { func TestSyncJobComplete(t *testing.T) {
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@@ -463,7 +463,7 @@ func TestSyncJobComplete(t *testing.T) {
} }
func TestSyncJobDeleted(t *testing.T) { func TestSyncJobDeleted(t *testing.T) {
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
manager, _ := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) manager, _ := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@@ -484,7 +484,7 @@ func TestSyncJobDeleted(t *testing.T) {
} }
func TestSyncJobUpdateRequeue(t *testing.T) { func TestSyncJobUpdateRequeue(t *testing.T) {
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@@ -510,38 +510,38 @@ func TestSyncJobUpdateRequeue(t *testing.T) {
} }
func TestJobPodLookup(t *testing.T) { func TestJobPodLookup(t *testing.T) {
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
manager.jobStoreSynced = alwaysReady manager.jobStoreSynced = alwaysReady
testCases := []struct { testCases := []struct {
job *batch.Job job *batch.Job
pod *api.Pod pod *v1.Pod
expectedName string expectedName string
}{ }{
// pods without labels don't match any job // pods without labels don't match any job
{ {
job: &batch.Job{ job: &batch.Job{
ObjectMeta: api.ObjectMeta{Name: "basic"}, ObjectMeta: v1.ObjectMeta{Name: "basic"},
}, },
pod: &api.Pod{ pod: &v1.Pod{
ObjectMeta: api.ObjectMeta{Name: "foo1", Namespace: api.NamespaceAll}, ObjectMeta: v1.ObjectMeta{Name: "foo1", Namespace: v1.NamespaceAll},
}, },
expectedName: "", expectedName: "",
}, },
// matching labels, different namespace // matching labels, different namespace
{ {
job: &batch.Job{ job: &batch.Job{
ObjectMeta: api.ObjectMeta{Name: "foo"}, ObjectMeta: v1.ObjectMeta{Name: "foo"},
Spec: batch.JobSpec{ Spec: batch.JobSpec{
Selector: &unversioned.LabelSelector{ Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"}, MatchLabels: map[string]string{"foo": "bar"},
}, },
}, },
}, },
pod: &api.Pod{ pod: &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo2", Name: "foo2",
Namespace: "ns", Namespace: "ns",
Labels: map[string]string{"foo": "bar"}, Labels: map[string]string{"foo": "bar"},
@@ -552,7 +552,7 @@ func TestJobPodLookup(t *testing.T) {
// matching ns and labels returns // matching ns and labels returns
{ {
job: &batch.Job{ job: &batch.Job{
ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"}, ObjectMeta: v1.ObjectMeta{Name: "bar", Namespace: "ns"},
Spec: batch.JobSpec{ Spec: batch.JobSpec{
Selector: &unversioned.LabelSelector{ Selector: &unversioned.LabelSelector{
MatchExpressions: []unversioned.LabelSelectorRequirement{ MatchExpressions: []unversioned.LabelSelectorRequirement{
@@ -565,8 +565,8 @@ func TestJobPodLookup(t *testing.T) {
}, },
}, },
}, },
pod: &api.Pod{ pod: &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo3", Name: "foo3",
Namespace: "ns", Namespace: "ns",
Labels: map[string]string{"foo": "bar"}, Labels: map[string]string{"foo": "bar"},
@@ -601,7 +601,7 @@ func (fe FakeJobExpectations) SatisfiedExpectations(controllerKey string) bool {
// TestSyncJobExpectations tests that a pod cannot sneak in between counting active pods // TestSyncJobExpectations tests that a pod cannot sneak in between counting active pods
// and checking expectations. // and checking expectations.
func TestSyncJobExpectations(t *testing.T) { func TestSyncJobExpectations(t *testing.T) {
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc) manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@@ -611,7 +611,7 @@ func TestSyncJobExpectations(t *testing.T) {
job := newJob(2, 2) job := newJob(2, 2)
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job) sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job)
pods := newPodList(2, api.PodPending, job) pods := newPodList(2, v1.PodPending, job)
podIndexer := sharedInformerFactory.Pods().Informer().GetIndexer() podIndexer := sharedInformerFactory.Pods().Informer().GetIndexer()
podIndexer.Add(&pods[0]) podIndexer.Add(&pods[0])
@@ -656,7 +656,7 @@ func TestWatchJobs(t *testing.T) {
t.Errorf("Expected to find job under key %v: %v", key, err) t.Errorf("Expected to find job under key %v: %v", key, err)
return nil return nil
} }
if !api.Semantic.DeepDerivative(*job, testJob) { if !v1.Semantic.DeepDerivative(*job, testJob) {
t.Errorf("Expected %#v, but got %#v", testJob, *job) t.Errorf("Expected %#v, but got %#v", testJob, *job)
} }
return nil return nil
@@ -699,7 +699,7 @@ func TestWatchPods(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("Expected to find job under key %v: %v", key, err) t.Errorf("Expected to find job under key %v: %v", key, err)
} }
if !api.Semantic.DeepDerivative(job, testJob) { if !v1.Semantic.DeepDerivative(job, testJob) {
t.Errorf("\nExpected %#v,\nbut got %#v", testJob, job) t.Errorf("\nExpected %#v,\nbut got %#v", testJob, job)
close(received) close(received)
return nil return nil
@@ -714,9 +714,9 @@ func TestWatchPods(t *testing.T) {
go sharedInformerFactory.Pods().Informer().Run(stopCh) go sharedInformerFactory.Pods().Informer().Run(stopCh)
go wait.Until(manager.worker, 10*time.Millisecond, stopCh) go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
pods := newPodList(1, api.PodRunning, testJob) pods := newPodList(1, v1.PodRunning, testJob)
testPod := pods[0] testPod := pods[0]
testPod.Status.Phase = api.PodFailed testPod.Status.Phase = v1.PodFailed
fakeWatch.Add(&testPod) fakeWatch.Add(&testPod)
t.Log("Waiting for pod to reach syncHandler") t.Log("Waiting for pod to reach syncHandler")

View File

@@ -17,13 +17,13 @@ limitations under the License.
package job package job
import ( import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/batch" batch "k8s.io/kubernetes/pkg/apis/batch/v1"
) )
func IsJobFinished(j *batch.Job) bool { func IsJobFinished(j *batch.Job) bool {
for _, c := range j.Status.Conditions { for _, c := range j.Status.Conditions {
if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == api.ConditionTrue { if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == v1.ConditionTrue {
return true return true
} }
} }

View File

@@ -19,8 +19,8 @@ package job
import ( import (
"testing" "testing"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/batch" batch "k8s.io/kubernetes/pkg/apis/batch/v1"
) )
func TestIsJobFinished(t *testing.T) { func TestIsJobFinished(t *testing.T) {
@@ -28,7 +28,7 @@ func TestIsJobFinished(t *testing.T) {
Status: batch.JobStatus{ Status: batch.JobStatus{
Conditions: []batch.JobCondition{{ Conditions: []batch.JobCondition{{
Type: batch.JobComplete, Type: batch.JobComplete,
Status: api.ConditionTrue, Status: v1.ConditionTrue,
}}, }},
}, },
} }
@@ -37,12 +37,12 @@ func TestIsJobFinished(t *testing.T) {
t.Error("Job was expected to be finished") t.Error("Job was expected to be finished")
} }
job.Status.Conditions[0].Status = api.ConditionFalse job.Status.Conditions[0].Status = v1.ConditionFalse
if IsJobFinished(job) { if IsJobFinished(job) {
t.Error("Job was not expected to be finished") t.Error("Job was not expected to be finished")
} }
job.Status.Conditions[0].Status = api.ConditionUnknown job.Status.Conditions[0].Status = v1.ConditionUnknown
if IsJobFinished(job) { if IsJobFinished(job) {
t.Error("Job was not expected to be finished") t.Error("Job was not expected to be finished")
} }

View File

@@ -19,10 +19,10 @@ package namespace
import ( import (
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/typed/dynamic" "k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@@ -52,7 +52,7 @@ type NamespaceController struct {
// opCache is a cache to remember if a particular operation is not supported to aid dynamic client. // opCache is a cache to remember if a particular operation is not supported to aid dynamic client.
opCache *operationNotSupportedCache opCache *operationNotSupportedCache
// finalizerToken is the finalizer token managed by this controller // finalizerToken is the finalizer token managed by this controller
finalizerToken api.FinalizerName finalizerToken v1.FinalizerName
} }
// NewNamespaceController creates a new NamespaceController // NewNamespaceController creates a new NamespaceController
@@ -61,7 +61,7 @@ func NewNamespaceController(
clientPool dynamic.ClientPool, clientPool dynamic.ClientPool,
groupVersionResourcesFn func() ([]unversioned.GroupVersionResource, error), groupVersionResourcesFn func() ([]unversioned.GroupVersionResource, error),
resyncPeriod time.Duration, resyncPeriod time.Duration,
finalizerToken api.FinalizerName) *NamespaceController { finalizerToken v1.FinalizerName) *NamespaceController {
// the namespace deletion code looks at the discovery document to enumerate the set of resources on the server. // the namespace deletion code looks at the discovery document to enumerate the set of resources on the server.
// it then finds all namespaced resources, and in response to namespace deletion, will call delete on all of them. // it then finds all namespaced resources, and in response to namespace deletion, will call delete on all of them.
@@ -98,22 +98,22 @@ func NewNamespaceController(
// configure the backing store/controller // configure the backing store/controller
store, controller := cache.NewInformer( store, controller := cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return kubeClient.Core().Namespaces().List(options) return kubeClient.Core().Namespaces().List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return kubeClient.Core().Namespaces().Watch(options) return kubeClient.Core().Namespaces().Watch(options)
}, },
}, },
&api.Namespace{}, &v1.Namespace{},
resyncPeriod, resyncPeriod,
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { AddFunc: func(obj interface{}) {
namespace := obj.(*api.Namespace) namespace := obj.(*v1.Namespace)
namespaceController.enqueueNamespace(namespace) namespaceController.enqueueNamespace(namespace)
}, },
UpdateFunc: func(oldObj, newObj interface{}) { UpdateFunc: func(oldObj, newObj interface{}) {
namespace := newObj.(*api.Namespace) namespace := newObj.(*v1.Namespace)
namespaceController.enqueueNamespace(namespace) namespaceController.enqueueNamespace(namespace)
}, },
}, },
@@ -125,7 +125,7 @@ func NewNamespaceController(
} }
// enqueueNamespace adds an object to the controller work queue // enqueueNamespace adds an object to the controller work queue
// obj could be an *api.Namespace, or a DeletionFinalStateUnknown item. // obj could be an *v1.Namespace, or a DeletionFinalStateUnknown item.
func (nm *NamespaceController) enqueueNamespace(obj interface{}) { func (nm *NamespaceController) enqueueNamespace(obj interface{}) {
key, err := controller.KeyFunc(obj) key, err := controller.KeyFunc(obj)
if err != nil { if err != nil {
@@ -190,7 +190,7 @@ func (nm *NamespaceController) syncNamespaceFromKey(key string) (err error) {
nm.queue.Add(key) nm.queue.Add(key)
return err return err
} }
namespace := obj.(*api.Namespace) namespace := obj.(*v1.Namespace)
return syncNamespace(nm.kubeClient, nm.clientPool, nm.opCache, nm.groupVersionResourcesFn, namespace, nm.finalizerToken) return syncNamespace(nm.kubeClient, nm.clientPool, nm.opCache, nm.groupVersionResourcesFn, namespace, nm.finalizerToken)
} }

View File

@@ -28,9 +28,10 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/client/typed/dynamic" "k8s.io/kubernetes/pkg/client/typed/dynamic"
@@ -39,15 +40,15 @@ import (
) )
func TestFinalized(t *testing.T) { func TestFinalized(t *testing.T) {
testNamespace := &api.Namespace{ testNamespace := &v1.Namespace{
Spec: api.NamespaceSpec{ Spec: v1.NamespaceSpec{
Finalizers: []api.FinalizerName{"a", "b"}, Finalizers: []v1.FinalizerName{"a", "b"},
}, },
} }
if finalized(testNamespace) { if finalized(testNamespace) {
t.Errorf("Unexpected result, namespace is not finalized") t.Errorf("Unexpected result, namespace is not finalized")
} }
testNamespace.Spec.Finalizers = []api.FinalizerName{} testNamespace.Spec.Finalizers = []v1.FinalizerName{}
if !finalized(testNamespace) { if !finalized(testNamespace) {
t.Errorf("Expected object to be finalized") t.Errorf("Expected object to be finalized")
} }
@@ -55,16 +56,16 @@ func TestFinalized(t *testing.T) {
func TestFinalizeNamespaceFunc(t *testing.T) { func TestFinalizeNamespaceFunc(t *testing.T) {
mockClient := &fake.Clientset{} mockClient := &fake.Clientset{}
testNamespace := &api.Namespace{ testNamespace := &v1.Namespace{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "test", Name: "test",
ResourceVersion: "1", ResourceVersion: "1",
}, },
Spec: api.NamespaceSpec{ Spec: v1.NamespaceSpec{
Finalizers: []api.FinalizerName{"kubernetes", "other"}, Finalizers: []v1.FinalizerName{"kubernetes", "other"},
}, },
} }
finalizeNamespace(mockClient, testNamespace, api.FinalizerKubernetes) finalizeNamespace(mockClient, testNamespace, v1.FinalizerKubernetes)
actions := mockClient.Actions() actions := mockClient.Actions()
if len(actions) != 1 { if len(actions) != 1 {
t.Errorf("Expected 1 mock client action, but got %v", len(actions)) t.Errorf("Expected 1 mock client action, but got %v", len(actions))
@@ -72,7 +73,7 @@ func TestFinalizeNamespaceFunc(t *testing.T) {
if !actions[0].Matches("create", "namespaces") || actions[0].GetSubresource() != "finalize" { if !actions[0].Matches("create", "namespaces") || actions[0].GetSubresource() != "finalize" {
t.Errorf("Expected finalize-namespace action %v", actions[0]) t.Errorf("Expected finalize-namespace action %v", actions[0])
} }
finalizers := actions[0].(core.CreateAction).GetObject().(*api.Namespace).Spec.Finalizers finalizers := actions[0].(core.CreateAction).GetObject().(*v1.Namespace).Spec.Finalizers
if len(finalizers) != 1 { if len(finalizers) != 1 {
t.Errorf("There should be a single finalizer remaining") t.Errorf("There should be a single finalizer remaining")
} }
@@ -84,28 +85,28 @@ func TestFinalizeNamespaceFunc(t *testing.T) {
func testSyncNamespaceThatIsTerminating(t *testing.T, versions *unversioned.APIVersions) { func testSyncNamespaceThatIsTerminating(t *testing.T, versions *unversioned.APIVersions) {
now := unversioned.Now() now := unversioned.Now()
namespaceName := "test" namespaceName := "test"
testNamespacePendingFinalize := &api.Namespace{ testNamespacePendingFinalize := &v1.Namespace{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: namespaceName, Name: namespaceName,
ResourceVersion: "1", ResourceVersion: "1",
DeletionTimestamp: &now, DeletionTimestamp: &now,
}, },
Spec: api.NamespaceSpec{ Spec: v1.NamespaceSpec{
Finalizers: []api.FinalizerName{"kubernetes"}, Finalizers: []v1.FinalizerName{"kubernetes"},
}, },
Status: api.NamespaceStatus{ Status: v1.NamespaceStatus{
Phase: api.NamespaceTerminating, Phase: v1.NamespaceTerminating,
}, },
} }
testNamespaceFinalizeComplete := &api.Namespace{ testNamespaceFinalizeComplete := &v1.Namespace{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: namespaceName, Name: namespaceName,
ResourceVersion: "1", ResourceVersion: "1",
DeletionTimestamp: &now, DeletionTimestamp: &now,
}, },
Spec: api.NamespaceSpec{}, Spec: v1.NamespaceSpec{},
Status: api.NamespaceStatus{ Status: v1.NamespaceStatus{
Phase: api.NamespaceTerminating, Phase: v1.NamespaceTerminating,
}, },
} }
@@ -126,7 +127,7 @@ func testSyncNamespaceThatIsTerminating(t *testing.T, versions *unversioned.APIV
} }
scenarios := map[string]struct { scenarios := map[string]struct {
testNamespace *api.Namespace testNamespace *v1.Namespace
kubeClientActionSet sets.String kubeClientActionSet sets.String
dynamicClientActionSet sets.String dynamicClientActionSet sets.String
gvrError error gvrError error
@@ -172,7 +173,7 @@ func testSyncNamespaceThatIsTerminating(t *testing.T, versions *unversioned.APIV
return groupVersionResources, nil return groupVersionResources, nil
} }
err := syncNamespace(mockClient, clientPool, &operationNotSupportedCache{m: make(map[operationKey]bool)}, fn, testInput.testNamespace, api.FinalizerKubernetes) err := syncNamespace(mockClient, clientPool, &operationNotSupportedCache{m: make(map[operationKey]bool)}, fn, testInput.testNamespace, v1.FinalizerKubernetes)
if err != nil { if err != nil {
t.Errorf("scenario %s - Unexpected error when synching namespace %v", scenario, err) t.Errorf("scenario %s - Unexpected error when synching namespace %v", scenario, err)
} }
@@ -202,14 +203,14 @@ func testSyncNamespaceThatIsTerminating(t *testing.T, versions *unversioned.APIV
func TestRetryOnConflictError(t *testing.T) { func TestRetryOnConflictError(t *testing.T) {
mockClient := &fake.Clientset{} mockClient := &fake.Clientset{}
numTries := 0 numTries := 0
retryOnce := func(kubeClient clientset.Interface, namespace *api.Namespace) (*api.Namespace, error) { retryOnce := func(kubeClient clientset.Interface, namespace *v1.Namespace) (*v1.Namespace, error) {
numTries++ numTries++
if numTries <= 1 { if numTries <= 1 {
return namespace, errors.NewConflict(api.Resource("namespaces"), namespace.Name, fmt.Errorf("ERROR!")) return namespace, errors.NewConflict(api.Resource("namespaces"), namespace.Name, fmt.Errorf("ERROR!"))
} }
return namespace, nil return namespace, nil
} }
namespace := &api.Namespace{} namespace := &v1.Namespace{}
_, err := retryOnConflictError(mockClient, namespace, retryOnce) _, err := retryOnConflictError(mockClient, namespace, retryOnce)
if err != nil { if err != nil {
t.Errorf("Unexpected error %v", err) t.Errorf("Unexpected error %v", err)
@@ -229,22 +230,22 @@ func TestSyncNamespaceThatIsTerminatingV1Beta1(t *testing.T) {
func TestSyncNamespaceThatIsActive(t *testing.T) { func TestSyncNamespaceThatIsActive(t *testing.T) {
mockClient := &fake.Clientset{} mockClient := &fake.Clientset{}
testNamespace := &api.Namespace{ testNamespace := &v1.Namespace{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "test", Name: "test",
ResourceVersion: "1", ResourceVersion: "1",
}, },
Spec: api.NamespaceSpec{ Spec: v1.NamespaceSpec{
Finalizers: []api.FinalizerName{"kubernetes"}, Finalizers: []v1.FinalizerName{"kubernetes"},
}, },
Status: api.NamespaceStatus{ Status: v1.NamespaceStatus{
Phase: api.NamespaceActive, Phase: v1.NamespaceActive,
}, },
} }
fn := func() ([]unversioned.GroupVersionResource, error) { fn := func() ([]unversioned.GroupVersionResource, error) {
return testGroupVersionResources(), nil return testGroupVersionResources(), nil
} }
err := syncNamespace(mockClient, nil, &operationNotSupportedCache{m: make(map[operationKey]bool)}, fn, testNamespace, api.FinalizerKubernetes) err := syncNamespace(mockClient, nil, &operationNotSupportedCache{m: make(map[operationKey]bool)}, fn, testNamespace, v1.FinalizerKubernetes)
if err != nil { if err != nil {
t.Errorf("Unexpected error when synching namespace %v", err) t.Errorf("Unexpected error when synching namespace %v", err)
} }

View File

@@ -22,11 +22,10 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/typed/dynamic" "k8s.io/kubernetes/pkg/client/typed/dynamic"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@@ -80,12 +79,12 @@ func (o *operationNotSupportedCache) setNotSupported(key operationKey) {
} }
// updateNamespaceFunc is a function that makes an update to a namespace // updateNamespaceFunc is a function that makes an update to a namespace
type updateNamespaceFunc func(kubeClient clientset.Interface, namespace *api.Namespace) (*api.Namespace, error) type updateNamespaceFunc func(kubeClient clientset.Interface, namespace *v1.Namespace) (*v1.Namespace, error)
// retryOnConflictError retries the specified fn if there was a conflict error // retryOnConflictError retries the specified fn if there was a conflict error
// it will return an error if the UID for an object changes across retry operations. // it will return an error if the UID for an object changes across retry operations.
// TODO RetryOnConflict should be a generic concept in client code // TODO RetryOnConflict should be a generic concept in client code
func retryOnConflictError(kubeClient clientset.Interface, namespace *api.Namespace, fn updateNamespaceFunc) (result *api.Namespace, err error) { func retryOnConflictError(kubeClient clientset.Interface, namespace *v1.Namespace, fn updateNamespaceFunc) (result *v1.Namespace, err error) {
latestNamespace := namespace latestNamespace := namespace
for { for {
result, err = fn(kubeClient, latestNamespace) result, err = fn(kubeClient, latestNamespace)
@@ -107,32 +106,32 @@ func retryOnConflictError(kubeClient clientset.Interface, namespace *api.Namespa
} }
// updateNamespaceStatusFunc will verify that the status of the namespace is correct // updateNamespaceStatusFunc will verify that the status of the namespace is correct
func updateNamespaceStatusFunc(kubeClient clientset.Interface, namespace *api.Namespace) (*api.Namespace, error) { func updateNamespaceStatusFunc(kubeClient clientset.Interface, namespace *v1.Namespace) (*v1.Namespace, error) {
if namespace.DeletionTimestamp.IsZero() || namespace.Status.Phase == api.NamespaceTerminating { if namespace.DeletionTimestamp.IsZero() || namespace.Status.Phase == v1.NamespaceTerminating {
return namespace, nil return namespace, nil
} }
newNamespace := api.Namespace{} newNamespace := v1.Namespace{}
newNamespace.ObjectMeta = namespace.ObjectMeta newNamespace.ObjectMeta = namespace.ObjectMeta
newNamespace.Status = namespace.Status newNamespace.Status = namespace.Status
newNamespace.Status.Phase = api.NamespaceTerminating newNamespace.Status.Phase = v1.NamespaceTerminating
return kubeClient.Core().Namespaces().UpdateStatus(&newNamespace) return kubeClient.Core().Namespaces().UpdateStatus(&newNamespace)
} }
// finalized returns true if the namespace.Spec.Finalizers is an empty list // finalized returns true if the namespace.Spec.Finalizers is an empty list
func finalized(namespace *api.Namespace) bool { func finalized(namespace *v1.Namespace) bool {
return len(namespace.Spec.Finalizers) == 0 return len(namespace.Spec.Finalizers) == 0
} }
// finalizeNamespaceFunc returns a function that knows how to finalize a namespace for specified token. // finalizeNamespaceFunc returns a function that knows how to finalize a namespace for specified token.
func finalizeNamespaceFunc(finalizerToken api.FinalizerName) updateNamespaceFunc { func finalizeNamespaceFunc(finalizerToken v1.FinalizerName) updateNamespaceFunc {
return func(kubeClient clientset.Interface, namespace *api.Namespace) (*api.Namespace, error) { return func(kubeClient clientset.Interface, namespace *v1.Namespace) (*v1.Namespace, error) {
return finalizeNamespace(kubeClient, namespace, finalizerToken) return finalizeNamespace(kubeClient, namespace, finalizerToken)
} }
} }
// finalizeNamespace removes the specified finalizerToken and finalizes the namespace // finalizeNamespace removes the specified finalizerToken and finalizes the namespace
func finalizeNamespace(kubeClient clientset.Interface, namespace *api.Namespace, finalizerToken api.FinalizerName) (*api.Namespace, error) { func finalizeNamespace(kubeClient clientset.Interface, namespace *v1.Namespace, finalizerToken v1.FinalizerName) (*v1.Namespace, error) {
namespaceFinalize := api.Namespace{} namespaceFinalize := v1.Namespace{}
namespaceFinalize.ObjectMeta = namespace.ObjectMeta namespaceFinalize.ObjectMeta = namespace.ObjectMeta
namespaceFinalize.Spec = namespace.Spec namespaceFinalize.Spec = namespace.Spec
finalizerSet := sets.NewString() finalizerSet := sets.NewString()
@@ -141,9 +140,9 @@ func finalizeNamespace(kubeClient clientset.Interface, namespace *api.Namespace,
finalizerSet.Insert(string(namespace.Spec.Finalizers[i])) finalizerSet.Insert(string(namespace.Spec.Finalizers[i]))
} }
} }
namespaceFinalize.Spec.Finalizers = make([]api.FinalizerName, 0, len(finalizerSet)) namespaceFinalize.Spec.Finalizers = make([]v1.FinalizerName, 0, len(finalizerSet))
for _, value := range finalizerSet.List() { for _, value := range finalizerSet.List() {
namespaceFinalize.Spec.Finalizers = append(namespaceFinalize.Spec.Finalizers, api.FinalizerName(value)) namespaceFinalize.Spec.Finalizers = append(namespaceFinalize.Spec.Finalizers, v1.FinalizerName(value))
} }
namespace, err := kubeClient.Core().Namespaces().Finalize(&namespaceFinalize) namespace, err := kubeClient.Core().Namespaces().Finalize(&namespaceFinalize)
if err != nil { if err != nil {
@@ -372,8 +371,8 @@ func syncNamespace(
clientPool dynamic.ClientPool, clientPool dynamic.ClientPool,
opCache *operationNotSupportedCache, opCache *operationNotSupportedCache,
groupVersionResourcesFn func() ([]unversioned.GroupVersionResource, error), groupVersionResourcesFn func() ([]unversioned.GroupVersionResource, error),
namespace *api.Namespace, namespace *v1.Namespace,
finalizerToken api.FinalizerName, finalizerToken v1.FinalizerName,
) error { ) error {
if namespace.DeletionTimestamp == nil { if namespace.DeletionTimestamp == nil {
return nil return nil
@@ -409,10 +408,10 @@ func syncNamespace(
// if the namespace is already finalized, delete it // if the namespace is already finalized, delete it
if finalized(namespace) { if finalized(namespace) {
var opts *api.DeleteOptions var opts *v1.DeleteOptions
uid := namespace.UID uid := namespace.UID
if len(uid) > 0 { if len(uid) > 0 {
opts = &api.DeleteOptions{Preconditions: &api.Preconditions{UID: &uid}} opts = &v1.DeleteOptions{Preconditions: &v1.Preconditions{UID: &uid}}
} }
err = kubeClient.Core().Namespaces().Delete(namespace.Name, opts) err = kubeClient.Core().Namespaces().Delete(namespace.Name, opts)
if err != nil && !errors.IsNotFound(err) { if err != nil && !errors.IsNotFound(err) {
@@ -483,14 +482,14 @@ func estimateGracefulTermination(kubeClient clientset.Interface, groupVersionRes
func estimateGracefulTerminationForPods(kubeClient clientset.Interface, ns string) (int64, error) { func estimateGracefulTerminationForPods(kubeClient clientset.Interface, ns string) (int64, error) {
glog.V(5).Infof("namespace controller - estimateGracefulTerminationForPods - namespace %s", ns) glog.V(5).Infof("namespace controller - estimateGracefulTerminationForPods - namespace %s", ns)
estimate := int64(0) estimate := int64(0)
items, err := kubeClient.Core().Pods(ns).List(api.ListOptions{}) items, err := kubeClient.Core().Pods(ns).List(v1.ListOptions{})
if err != nil { if err != nil {
return estimate, err return estimate, err
} }
for i := range items.Items { for i := range items.Items {
// filter out terminal pods // filter out terminal pods
phase := items.Items[i].Status.Phase phase := items.Items[i].Status.Phase
if api.PodSucceeded == phase || api.PodFailed == phase { if v1.PodSucceeded == phase || v1.PodFailed == phase {
continue continue
} }
if items.Items[i].Spec.TerminationGracePeriodSeconds != nil { if items.Items[i].Spec.TerminationGracePeriodSeconds != nil {

View File

@@ -22,9 +22,9 @@ import (
"net" "net"
"sync" "sync"
"k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors" apierrors "k8s.io/kubernetes/pkg/api/errors"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@@ -50,8 +50,8 @@ type nodeAndCIDR struct {
// CIDRAllocator is an interface implemented by things that know how to allocate/occupy/recycle CIDR for nodes. // CIDRAllocator is an interface implemented by things that know how to allocate/occupy/recycle CIDR for nodes.
type CIDRAllocator interface { type CIDRAllocator interface {
AllocateOrOccupyCIDR(node *api.Node) error AllocateOrOccupyCIDR(node *v1.Node) error
ReleaseCIDR(node *api.Node) error ReleaseCIDR(node *v1.Node) error
} }
type rangeAllocator struct { type rangeAllocator struct {
@@ -72,9 +72,9 @@ type rangeAllocator struct {
// Caller must ensure subNetMaskSize is not less than cluster CIDR mask size. // Caller must ensure subNetMaskSize is not less than cluster CIDR mask size.
// Caller must always pass in a list of existing nodes so the new allocator // Caller must always pass in a list of existing nodes so the new allocator
// can initialize its CIDR map. NodeList is only nil in testing. // can initialize its CIDR map. NodeList is only nil in testing.
func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *api.NodeList) (CIDRAllocator, error) { func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "cidrAllocator"}) recorder := eventBroadcaster.NewRecorder(v1.EventSource{Component: "cidrAllocator"})
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
ra := &rangeAllocator{ ra := &rangeAllocator{
@@ -145,7 +145,7 @@ func (r *rangeAllocator) removeNodeFromProcessing(nodeName string) {
r.nodesInProcessing.Delete(nodeName) r.nodesInProcessing.Delete(nodeName)
} }
func (r *rangeAllocator) occupyCIDR(node *api.Node) error { func (r *rangeAllocator) occupyCIDR(node *v1.Node) error {
defer r.removeNodeFromProcessing(node.Name) defer r.removeNodeFromProcessing(node.Name)
if node.Spec.PodCIDR == "" { if node.Spec.PodCIDR == "" {
return nil return nil
@@ -164,7 +164,7 @@ func (r *rangeAllocator) occupyCIDR(node *api.Node) error {
// if it doesn't currently have one or mark the CIDR as used if the node already have one. // if it doesn't currently have one or mark the CIDR as used if the node already have one.
// WARNING: If you're adding any return calls or defer any more work from this function // WARNING: If you're adding any return calls or defer any more work from this function
// you have to handle correctly nodesInProcessing. // you have to handle correctly nodesInProcessing.
func (r *rangeAllocator) AllocateOrOccupyCIDR(node *api.Node) error { func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
if node == nil { if node == nil {
return nil return nil
} }
@@ -191,7 +191,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *api.Node) error {
} }
// ReleaseCIDR releases the CIDR of the removed node // ReleaseCIDR releases the CIDR of the removed node
func (r *rangeAllocator) ReleaseCIDR(node *api.Node) error { func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error {
if node == nil || node.Spec.PodCIDR == "" { if node == nil || node.Spec.PodCIDR == "" {
return nil return nil
} }
@@ -225,7 +225,7 @@ func (r *rangeAllocator) filterOutServiceRange(serviceCIDR *net.IPNet) {
// Assigns CIDR to Node and sends an update to the API server. // Assigns CIDR to Node and sends an update to the API server.
func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error { func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
var err error var err error
var node *api.Node var node *v1.Node
defer r.removeNodeFromProcessing(data.nodeName) defer r.removeNodeFromProcessing(data.nodeName)
for rep := 0; rep < podCIDRUpdateRetry; rep++ { for rep := 0; rep < podCIDRUpdateRetry; rep++ {
// TODO: change it to using PATCH instead of full Node updates. // TODO: change it to using PATCH instead of full Node updates.

View File

@@ -21,8 +21,8 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
) )
@@ -52,9 +52,9 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
{ {
description: "When there's no ServiceCIDR return first CIDR in range", description: "When there's no ServiceCIDR return first CIDR in range",
fakeNodeHandler: &FakeNodeHandler{ fakeNodeHandler: &FakeNodeHandler{
Existing: []*api.Node{ Existing: []*v1.Node{
{ {
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "node0", Name: "node0",
}, },
}, },
@@ -72,9 +72,9 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
{ {
description: "Correctly filter out ServiceCIDR", description: "Correctly filter out ServiceCIDR",
fakeNodeHandler: &FakeNodeHandler{ fakeNodeHandler: &FakeNodeHandler{
Existing: []*api.Node{ Existing: []*v1.Node{
{ {
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "node0", Name: "node0",
}, },
}, },
@@ -96,9 +96,9 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
{ {
description: "Correctly ignore already allocated CIDRs", description: "Correctly ignore already allocated CIDRs",
fakeNodeHandler: &FakeNodeHandler{ fakeNodeHandler: &FakeNodeHandler{
Existing: []*api.Node{ Existing: []*v1.Node{
{ {
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "node0", Name: "node0",
}, },
}, },
@@ -182,9 +182,9 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
{ {
description: "When there's no ServiceCIDR return first CIDR in range", description: "When there's no ServiceCIDR return first CIDR in range",
fakeNodeHandler: &FakeNodeHandler{ fakeNodeHandler: &FakeNodeHandler{
Existing: []*api.Node{ Existing: []*v1.Node{
{ {
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "node0", Name: "node0",
}, },
}, },
@@ -265,9 +265,9 @@ func TestReleaseCIDRSuccess(t *testing.T) {
{ {
description: "Correctly release preallocated CIDR", description: "Correctly release preallocated CIDR",
fakeNodeHandler: &FakeNodeHandler{ fakeNodeHandler: &FakeNodeHandler{
Existing: []*api.Node{ Existing: []*v1.Node{
{ {
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "node0", Name: "node0",
}, },
}, },
@@ -288,9 +288,9 @@ func TestReleaseCIDRSuccess(t *testing.T) {
{ {
description: "Correctly recycle CIDR", description: "Correctly recycle CIDR",
fakeNodeHandler: &FakeNodeHandler{ fakeNodeHandler: &FakeNodeHandler{
Existing: []*api.Node{ Existing: []*v1.Node{
{ {
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "node0", Name: "node0",
}, },
}, },
@@ -357,8 +357,8 @@ func TestReleaseCIDRSuccess(t *testing.T) {
} }
for _, cidrToRelease := range tc.cidrsToRelease { for _, cidrToRelease := range tc.cidrsToRelease {
nodeToRelease := api.Node{ nodeToRelease := v1.Node{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "node0", Name: "node0",
}, },
} }

View File

@@ -22,8 +22,9 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
@@ -46,9 +47,9 @@ const (
// if any pods were deleted, or were found pending deletion. // if any pods were deleted, or were found pending deletion.
func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore cache.StoreToDaemonSetLister) (bool, error) { func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore cache.StoreToDaemonSetLister) (bool, error) {
remaining := false remaining := false
selector := fields.OneTermEqualSelector(api.PodHostField, nodeName) selector := fields.OneTermEqualSelector(api.PodHostField, nodeName).String()
options := api.ListOptions{FieldSelector: selector} options := v1.ListOptions{FieldSelector: selector}
pods, err := kubeClient.Core().Pods(api.NamespaceAll).List(options) pods, err := kubeClient.Core().Pods(v1.NamespaceAll).List(options)
var updateErrList []error var updateErrList []error
if err != nil { if err != nil {
@@ -56,7 +57,7 @@ func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, n
} }
if len(pods.Items) > 0 { if len(pods.Items) > 0 {
recordNodeEvent(recorder, nodeName, nodeUID, api.EventTypeNormal, "DeletingAllPods", fmt.Sprintf("Deleting all Pods from Node %v.", nodeName)) recordNodeEvent(recorder, nodeName, nodeUID, v1.EventTypeNormal, "DeletingAllPods", fmt.Sprintf("Deleting all Pods from Node %v.", nodeName))
} }
for _, pod := range pods.Items { for _, pod := range pods.Items {
@@ -85,7 +86,7 @@ func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, n
} }
glog.V(2).Infof("Starting deletion of pod %v", pod.Name) glog.V(2).Infof("Starting deletion of pod %v", pod.Name)
recorder.Eventf(&pod, api.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName) recorder.Eventf(&pod, v1.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
if err := kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil { if err := kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
return false, err return false, err
} }
@@ -100,7 +101,7 @@ func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, n
// setPodTerminationReason attempts to set a reason and message in the pod status, updates it in the apiserver, // setPodTerminationReason attempts to set a reason and message in the pod status, updates it in the apiserver,
// and returns an error if it encounters one. // and returns an error if it encounters one.
func setPodTerminationReason(kubeClient clientset.Interface, pod *api.Pod, nodeName string) (*api.Pod, error) { func setPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeName string) (*v1.Pod, error) {
if pod.Status.Reason == node.NodeUnreachablePodReason { if pod.Status.Reason == node.NodeUnreachablePodReason {
return pod, nil return pod, nil
} }
@@ -108,7 +109,7 @@ func setPodTerminationReason(kubeClient clientset.Interface, pod *api.Pod, nodeN
pod.Status.Reason = node.NodeUnreachablePodReason pod.Status.Reason = node.NodeUnreachablePodReason
pod.Status.Message = fmt.Sprintf(node.NodeUnreachablePodMessage, nodeName, pod.Name) pod.Status.Message = fmt.Sprintf(node.NodeUnreachablePodMessage, nodeName, pod.Name)
var updatedPod *api.Pod var updatedPod *v1.Pod
var err error var err error
if updatedPod, err = kubeClient.Core().Pods(pod.Namespace).UpdateStatus(pod); err != nil { if updatedPod, err = kubeClient.Core().Pods(pod.Namespace).UpdateStatus(pod); err != nil {
return nil, err return nil, err
@@ -116,10 +117,10 @@ func setPodTerminationReason(kubeClient clientset.Interface, pod *api.Pod, nodeN
return updatedPod, nil return updatedPod, nil
} }
func forcefullyDeletePod(c clientset.Interface, pod *api.Pod) error { func forcefullyDeletePod(c clientset.Interface, pod *v1.Pod) error {
var zero int64 var zero int64
glog.Infof("NodeController is force deleting Pod: %v:%v", pod.Namespace, pod.Name) glog.Infof("NodeController is force deleting Pod: %v:%v", pod.Namespace, pod.Name)
err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &api.DeleteOptions{GracePeriodSeconds: &zero}) err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &v1.DeleteOptions{GracePeriodSeconds: &zero})
if err == nil { if err == nil {
glog.V(4).Infof("forceful deletion of %s succeeded", pod.Name) glog.V(4).Infof("forceful deletion of %s succeeded", pod.Name)
} }
@@ -138,14 +139,14 @@ func forcefullyDeleteNode(kubeClient clientset.Interface, nodeName string) error
// maybeDeleteTerminatingPod non-gracefully deletes pods that are terminating // maybeDeleteTerminatingPod non-gracefully deletes pods that are terminating
// that should not be gracefully terminated. // that should not be gracefully terminated.
func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) { func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
pod, ok := obj.(*api.Pod) pod, ok := obj.(*v1.Pod)
if !ok { if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown) tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok { if !ok {
glog.Errorf("Couldn't get object from tombstone %#v", obj) glog.Errorf("Couldn't get object from tombstone %#v", obj)
return return
} }
pod, ok = tombstone.Obj.(*api.Pod) pod, ok = tombstone.Obj.(*v1.Pod)
if !ok { if !ok {
glog.Errorf("Tombstone contained object that is not a Pod %#v", obj) glog.Errorf("Tombstone contained object that is not a Pod %#v", obj)
return return
@@ -176,7 +177,7 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
// TODO(mikedanese): this can be removed when we no longer // TODO(mikedanese): this can be removed when we no longer
// guarantee backwards compatibility of master API to kubelets with // guarantee backwards compatibility of master API to kubelets with
// versions less than 1.1.0 // versions less than 1.1.0
node := nodeObj.(*api.Node) node := nodeObj.(*v1.Node)
v, err := version.Parse(node.Status.NodeInfo.KubeletVersion) v, err := version.Parse(node.Status.NodeInfo.KubeletVersion)
if err != nil { if err != nil {
glog.V(0).Infof("couldn't parse verions %q of minion: %v", node.Status.NodeInfo.KubeletVersion, err) glog.V(0).Infof("couldn't parse verions %q of minion: %v", node.Status.NodeInfo.KubeletVersion, err)
@@ -191,7 +192,7 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
// update ready status of all pods running on given node from master // update ready status of all pods running on given node from master
// return true if success // return true if success
func markAllPodsNotReady(kubeClient clientset.Interface, node *api.Node) error { func markAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error {
// Don't set pods to NotReady if the kubelet is running a version that // Don't set pods to NotReady if the kubelet is running a version that
// doesn't understand how to correct readiness. // doesn't understand how to correct readiness.
// TODO: Remove this check when we no longer guarantee backward compatibility // TODO: Remove this check when we no longer guarantee backward compatibility
@@ -201,8 +202,8 @@ func markAllPodsNotReady(kubeClient clientset.Interface, node *api.Node) error {
} }
nodeName := node.Name nodeName := node.Name
glog.V(2).Infof("Update ready status of pods on node [%v]", nodeName) glog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
opts := api.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName)} opts := v1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()}
pods, err := kubeClient.Core().Pods(api.NamespaceAll).List(opts) pods, err := kubeClient.Core().Pods(v1.NamespaceAll).List(opts)
if err != nil { if err != nil {
return err return err
} }
@@ -215,8 +216,8 @@ func markAllPodsNotReady(kubeClient clientset.Interface, node *api.Node) error {
} }
for i, cond := range pod.Status.Conditions { for i, cond := range pod.Status.Conditions {
if cond.Type == api.PodReady { if cond.Type == v1.PodReady {
pod.Status.Conditions[i].Status = api.ConditionFalse pod.Status.Conditions[i].Status = v1.ConditionFalse
glog.V(2).Infof("Updating ready status of pod %v to false", pod.Name) glog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
_, err := kubeClient.Core().Pods(pod.Namespace).UpdateStatus(&pod) _, err := kubeClient.Core().Pods(pod.Namespace).UpdateStatus(&pod)
if err != nil { if err != nil {
@@ -237,7 +238,7 @@ func markAllPodsNotReady(kubeClient clientset.Interface, node *api.Node) error {
// in the nodeInfo of the given node is "outdated", meaning < 1.2.0. // in the nodeInfo of the given node is "outdated", meaning < 1.2.0.
// Older versions were inflexible and modifying pod.Status directly through // Older versions were inflexible and modifying pod.Status directly through
// the apiserver would result in unexpected outcomes. // the apiserver would result in unexpected outcomes.
func nodeRunningOutdatedKubelet(node *api.Node) bool { func nodeRunningOutdatedKubelet(node *v1.Node) bool {
v, err := version.Parse(node.Status.NodeInfo.KubeletVersion) v, err := version.Parse(node.Status.NodeInfo.KubeletVersion)
if err != nil { if err != nil {
glog.Errorf("couldn't parse version %q of node %v", node.Status.NodeInfo.KubeletVersion, err) glog.Errorf("couldn't parse version %q of node %v", node.Status.NodeInfo.KubeletVersion, err)
@@ -265,7 +266,7 @@ func nodeExistsInCloudProvider(cloud cloudprovider.Interface, nodeName types.Nod
} }
func recordNodeEvent(recorder record.EventRecorder, nodeName, nodeUID, eventtype, reason, event string) { func recordNodeEvent(recorder record.EventRecorder, nodeName, nodeUID, eventtype, reason, event string) {
ref := &api.ObjectReference{ ref := &v1.ObjectReference{
Kind: "Node", Kind: "Node",
Name: nodeName, Name: nodeName,
UID: types.UID(nodeUID), UID: types.UID(nodeUID),
@@ -275,8 +276,8 @@ func recordNodeEvent(recorder record.EventRecorder, nodeName, nodeUID, eventtype
recorder.Eventf(ref, eventtype, reason, "Node %s event: %s", nodeName, event) recorder.Eventf(ref, eventtype, reason, "Node %s event: %s", nodeName, event)
} }
func recordNodeStatusChange(recorder record.EventRecorder, node *api.Node, new_status string) { func recordNodeStatusChange(recorder record.EventRecorder, node *v1.Node, new_status string) {
ref := &api.ObjectReference{ ref := &v1.ObjectReference{
Kind: "Node", Kind: "Node",
Name: node.Name, Name: node.Name,
UID: node.UID, UID: node.UID,
@@ -285,5 +286,5 @@ func recordNodeStatusChange(recorder record.EventRecorder, node *api.Node, new_s
glog.V(2).Infof("Recording status change %s event message for node %s", new_status, node.Name) glog.V(2).Infof("Recording status change %s event message for node %s", new_status, node.Name)
// TODO: This requires a transaction, either both node status is updated // TODO: This requires a transaction, either both node status is updated
// and event is recorded or neither should happen, see issue #6055. // and event is recorded or neither should happen, see issue #6055.
recorder.Eventf(ref, api.EventTypeNormal, new_status, "Node %s status is now: %s", node.Name, new_status) recorder.Eventf(ref, v1.EventTypeNormal, new_status, "Node %s status is now: %s", node.Name, new_status)
} }

View File

@@ -26,9 +26,10 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/controller/informers"
@@ -83,7 +84,7 @@ const (
type nodeStatusData struct { type nodeStatusData struct {
probeTimestamp unversioned.Time probeTimestamp unversioned.Time
readyTransitionTimestamp unversioned.Time readyTransitionTimestamp unversioned.Time
status api.NodeStatus status v1.NodeStatus
} }
type NodeController struct { type NodeController struct {
@@ -91,7 +92,7 @@ type NodeController struct {
cloud cloudprovider.Interface cloud cloudprovider.Interface
clusterCIDR *net.IPNet clusterCIDR *net.IPNet
serviceCIDR *net.IPNet serviceCIDR *net.IPNet
knownNodeSet map[string]*api.Node knownNodeSet map[string]*v1.Node
kubeClient clientset.Interface kubeClient clientset.Interface
// Method for easy mocking in unittest. // Method for easy mocking in unittest.
lookupIP func(host string) ([]net.IP, error) lookupIP func(host string) ([]net.IP, error)
@@ -140,9 +141,9 @@ type NodeController struct {
// allocate/recycle CIDRs for node if allocateNodeCIDRs == true // allocate/recycle CIDRs for node if allocateNodeCIDRs == true
cidrAllocator CIDRAllocator cidrAllocator CIDRAllocator
forcefullyDeletePod func(*api.Pod) error forcefullyDeletePod func(*v1.Pod) error
nodeExistsInCloudProvider func(types.NodeName) (bool, error) nodeExistsInCloudProvider func(types.NodeName) (bool, error)
computeZoneStateFunc func(nodeConditions []*api.NodeCondition) (int, zoneState) computeZoneStateFunc func(nodeConditions []*v1.NodeCondition) (int, zoneState)
enterPartialDisruptionFunc func(nodeNum int) float32 enterPartialDisruptionFunc func(nodeNum int) float32
enterFullDisruptionFunc func(nodeNum int) float32 enterFullDisruptionFunc func(nodeNum int) float32
@@ -183,11 +184,11 @@ func NewNodeController(
nodeCIDRMaskSize int, nodeCIDRMaskSize int,
allocateNodeCIDRs bool) (*NodeController, error) { allocateNodeCIDRs bool) (*NodeController, error) {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"}) recorder := eventBroadcaster.NewRecorder(v1.EventSource{Component: "controllermanager"})
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
if kubeClient != nil { if kubeClient != nil {
glog.V(0).Infof("Sending events to api server.") glog.V(0).Infof("Sending events to api server.")
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
} else { } else {
glog.V(0).Infof("No api server defined - no events will be sent to API server.") glog.V(0).Infof("No api server defined - no events will be sent to API server.")
} }
@@ -208,7 +209,7 @@ func NewNodeController(
nc := &NodeController{ nc := &NodeController{
cloud: cloud, cloud: cloud,
knownNodeSet: make(map[string]*api.Node), knownNodeSet: make(map[string]*v1.Node),
kubeClient: kubeClient, kubeClient: kubeClient,
recorder: recorder, recorder: recorder,
podEvictionTimeout: podEvictionTimeout, podEvictionTimeout: podEvictionTimeout,
@@ -223,7 +224,7 @@ func NewNodeController(
clusterCIDR: clusterCIDR, clusterCIDR: clusterCIDR,
serviceCIDR: serviceCIDR, serviceCIDR: serviceCIDR,
allocateNodeCIDRs: allocateNodeCIDRs, allocateNodeCIDRs: allocateNodeCIDRs,
forcefullyDeletePod: func(p *api.Pod) error { return forcefullyDeletePod(kubeClient, p) }, forcefullyDeletePod: func(p *v1.Pod) error { return forcefullyDeletePod(kubeClient, p) },
nodeExistsInCloudProvider: func(nodeName types.NodeName) (bool, error) { return nodeExistsInCloudProvider(cloud, nodeName) }, nodeExistsInCloudProvider: func(nodeName types.NodeName) (bool, error) { return nodeExistsInCloudProvider(cloud, nodeName) },
evictionLimiterQPS: evictionLimiterQPS, evictionLimiterQPS: evictionLimiterQPS,
secondaryEvictionLimiterQPS: secondaryEvictionLimiterQPS, secondaryEvictionLimiterQPS: secondaryEvictionLimiterQPS,
@@ -246,14 +247,14 @@ func NewNodeController(
nodeEventHandlerFuncs := cache.ResourceEventHandlerFuncs{} nodeEventHandlerFuncs := cache.ResourceEventHandlerFuncs{}
if nc.allocateNodeCIDRs { if nc.allocateNodeCIDRs {
var nodeList *api.NodeList var nodeList *v1.NodeList
var err error var err error
// We must poll because apiserver might not be up. This error causes // We must poll because apiserver might not be up. This error causes
// controller manager to restart. // controller manager to restart.
if pollErr := wait.Poll(10*time.Second, apiserverStartupGracePeriod, func() (bool, error) { if pollErr := wait.Poll(10*time.Second, apiserverStartupGracePeriod, func() (bool, error) {
nodeList, err = kubeClient.Core().Nodes().List(api.ListOptions{ nodeList, err = kubeClient.Core().Nodes().List(v1.ListOptions{
FieldSelector: fields.Everything(), FieldSelector: fields.Everything().String(),
LabelSelector: labels.Everything(), LabelSelector: labels.Everything().String(),
}) })
if err != nil { if err != nil {
glog.Errorf("Failed to list all nodes: %v", err) glog.Errorf("Failed to list all nodes: %v", err)
@@ -275,14 +276,14 @@ func NewNodeController(
utilruntime.HandleError(err) utilruntime.HandleError(err)
return return
} }
node := obj.(*api.Node) node := obj.(*v1.Node)
if err := nc.cidrAllocator.AllocateOrOccupyCIDR(node); err != nil { if err := nc.cidrAllocator.AllocateOrOccupyCIDR(node); err != nil {
utilruntime.HandleError(fmt.Errorf("Error allocating CIDR: %v", err)) utilruntime.HandleError(fmt.Errorf("Error allocating CIDR: %v", err))
} }
}, },
UpdateFunc: func(_, obj interface{}) { UpdateFunc: func(_, obj interface{}) {
node := obj.(*api.Node) node := obj.(*v1.Node)
// If the PodCIDR is not empty we either: // If the PodCIDR is not empty we either:
// - already processed a Node that already had a CIDR after NC restarted // - already processed a Node that already had a CIDR after NC restarted
// (cidr is marked as used), // (cidr is marked as used),
@@ -309,7 +310,7 @@ func NewNodeController(
return return
} }
if err := nc.cidrAllocator.AllocateOrOccupyCIDR(nodeCopy.(*api.Node)); err != nil { if err := nc.cidrAllocator.AllocateOrOccupyCIDR(nodeCopy.(*v1.Node)); err != nil {
utilruntime.HandleError(fmt.Errorf("Error allocating CIDR: %v", err)) utilruntime.HandleError(fmt.Errorf("Error allocating CIDR: %v", err))
} }
} }
@@ -321,15 +322,15 @@ func NewNodeController(
return return
} }
node, isNode := obj.(*api.Node) node, isNode := obj.(*v1.Node)
// We can get DeletedFinalStateUnknown instead of *api.Node here and we need to handle that correctly. #34692 // We can get DeletedFinalStateUnknown instead of *v1.Node here and we need to handle that correctly. #34692
if !isNode { if !isNode {
deletedState, ok := obj.(cache.DeletedFinalStateUnknown) deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok { if !ok {
glog.Errorf("Received unexpected object: %v", obj) glog.Errorf("Received unexpected object: %v", obj)
return return
} }
node, ok = deletedState.Obj.(*api.Node) node, ok = deletedState.Obj.(*v1.Node)
if !ok { if !ok {
glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj) glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
return return
@@ -381,7 +382,7 @@ func (nc *NodeController) Run() {
} else if !exists { } else if !exists {
glog.Warningf("Node %v no longer present in nodeStore!", value.Value) glog.Warningf("Node %v no longer present in nodeStore!", value.Value)
} else { } else {
node, _ := obj.(*api.Node) node, _ := obj.(*v1.Node)
zone := utilnode.GetZoneKey(node) zone := utilnode.GetZoneKey(node)
EvictionsNumber.WithLabelValues(zone).Inc() EvictionsNumber.WithLabelValues(zone).Inc()
} }
@@ -410,14 +411,14 @@ func (nc *NodeController) monitorNodeStatus() error {
// It is enough to list Nodes from apiserver, since we can tolerate some small // It is enough to list Nodes from apiserver, since we can tolerate some small
// delays comparing to state from etcd and there is eventual consistency anyway. // delays comparing to state from etcd and there is eventual consistency anyway.
// TODO: We should list them from local cache: nodeStore. // TODO: We should list them from local cache: nodeStore.
nodes, err := nc.kubeClient.Core().Nodes().List(api.ListOptions{ResourceVersion: "0"}) nodes, err := nc.kubeClient.Core().Nodes().List(v1.ListOptions{ResourceVersion: "0"})
if err != nil { if err != nil {
return err return err
} }
added, deleted := nc.checkForNodeAddedDeleted(nodes) added, deleted := nc.checkForNodeAddedDeleted(nodes)
for i := range added { for i := range added {
glog.V(1).Infof("NodeController observed a new Node: %#v", added[i].Name) glog.V(1).Infof("NodeController observed a new Node: %#v", added[i].Name)
recordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), api.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in NodeController", added[i].Name)) recordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), v1.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in NodeController", added[i].Name))
nc.knownNodeSet[added[i].Name] = added[i] nc.knownNodeSet[added[i].Name] = added[i]
// When adding new Nodes we need to check if new zone appeared, and if so add new evictor. // When adding new Nodes we need to check if new zone appeared, and if so add new evictor.
zone := utilnode.GetZoneKey(added[i]) zone := utilnode.GetZoneKey(added[i])
@@ -434,15 +435,15 @@ func (nc *NodeController) monitorNodeStatus() error {
for i := range deleted { for i := range deleted {
glog.V(1).Infof("NodeController observed a Node deletion: %v", deleted[i].Name) glog.V(1).Infof("NodeController observed a Node deletion: %v", deleted[i].Name)
recordNodeEvent(nc.recorder, deleted[i].Name, string(deleted[i].UID), api.EventTypeNormal, "RemovingNode", fmt.Sprintf("Removing Node %v from NodeController", deleted[i].Name)) recordNodeEvent(nc.recorder, deleted[i].Name, string(deleted[i].UID), v1.EventTypeNormal, "RemovingNode", fmt.Sprintf("Removing Node %v from NodeController", deleted[i].Name))
delete(nc.knownNodeSet, deleted[i].Name) delete(nc.knownNodeSet, deleted[i].Name)
} }
zoneToNodeConditions := map[string][]*api.NodeCondition{} zoneToNodeConditions := map[string][]*v1.NodeCondition{}
for i := range nodes.Items { for i := range nodes.Items {
var gracePeriod time.Duration var gracePeriod time.Duration
var observedReadyCondition api.NodeCondition var observedReadyCondition v1.NodeCondition
var currentReadyCondition *api.NodeCondition var currentReadyCondition *v1.NodeCondition
node := &nodes.Items[i] node := &nodes.Items[i]
for rep := 0; rep < nodeStatusUpdateRetry; rep++ { for rep := 0; rep < nodeStatusUpdateRetry; rep++ {
gracePeriod, observedReadyCondition, currentReadyCondition, err = nc.tryUpdateNodeStatus(node) gracePeriod, observedReadyCondition, currentReadyCondition, err = nc.tryUpdateNodeStatus(node)
@@ -463,33 +464,33 @@ func (nc *NodeController) monitorNodeStatus() error {
continue continue
} }
// We do not treat a master node as a part of the cluster for network disruption checking. // We do not treat a master node as a part of the cluster for network disruption checking.
if !system.IsMasterNode(node) { if !system.IsMasterNode(node.Name) {
zoneToNodeConditions[utilnode.GetZoneKey(node)] = append(zoneToNodeConditions[utilnode.GetZoneKey(node)], currentReadyCondition) zoneToNodeConditions[utilnode.GetZoneKey(node)] = append(zoneToNodeConditions[utilnode.GetZoneKey(node)], currentReadyCondition)
} }
decisionTimestamp := nc.now() decisionTimestamp := nc.now()
if currentReadyCondition != nil { if currentReadyCondition != nil {
// Check eviction timeout against decisionTimestamp // Check eviction timeout against decisionTimestamp
if observedReadyCondition.Status == api.ConditionFalse && if observedReadyCondition.Status == v1.ConditionFalse &&
decisionTimestamp.After(nc.nodeStatusMap[node.Name].readyTransitionTimestamp.Add(nc.podEvictionTimeout)) { decisionTimestamp.After(nc.nodeStatusMap[node.Name].readyTransitionTimestamp.Add(nc.podEvictionTimeout)) {
if nc.evictPods(node) { if nc.evictPods(node) {
glog.V(2).Infof("Evicting pods on node %s: %v is later than %v + %v", node.Name, decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout) glog.V(2).Infof("Evicting pods on node %s: %v is later than %v + %v", node.Name, decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout)
} }
} }
if observedReadyCondition.Status == api.ConditionUnknown && if observedReadyCondition.Status == v1.ConditionUnknown &&
decisionTimestamp.After(nc.nodeStatusMap[node.Name].probeTimestamp.Add(nc.podEvictionTimeout)) { decisionTimestamp.After(nc.nodeStatusMap[node.Name].probeTimestamp.Add(nc.podEvictionTimeout)) {
if nc.evictPods(node) { if nc.evictPods(node) {
glog.V(2).Infof("Evicting pods on node %s: %v is later than %v + %v", node.Name, decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout-gracePeriod) glog.V(2).Infof("Evicting pods on node %s: %v is later than %v + %v", node.Name, decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout-gracePeriod)
} }
} }
if observedReadyCondition.Status == api.ConditionTrue { if observedReadyCondition.Status == v1.ConditionTrue {
if nc.cancelPodEviction(node) { if nc.cancelPodEviction(node) {
glog.V(2).Infof("Node %s is ready again, cancelled pod eviction", node.Name) glog.V(2).Infof("Node %s is ready again, cancelled pod eviction", node.Name)
} }
} }
// Report node event. // Report node event.
if currentReadyCondition.Status != api.ConditionTrue && observedReadyCondition.Status == api.ConditionTrue { if currentReadyCondition.Status != v1.ConditionTrue && observedReadyCondition.Status == v1.ConditionTrue {
recordNodeStatusChange(nc.recorder, node, "NodeNotReady") recordNodeStatusChange(nc.recorder, node, "NodeNotReady")
if err = markAllPodsNotReady(nc.kubeClient, node); err != nil { if err = markAllPodsNotReady(nc.kubeClient, node); err != nil {
utilruntime.HandleError(fmt.Errorf("Unable to mark all pods NotReady on node %v: %v", node.Name, err)) utilruntime.HandleError(fmt.Errorf("Unable to mark all pods NotReady on node %v: %v", node.Name, err))
@@ -498,7 +499,7 @@ func (nc *NodeController) monitorNodeStatus() error {
// Check with the cloud provider to see if the node still exists. If it // Check with the cloud provider to see if the node still exists. If it
// doesn't, delete the node immediately. // doesn't, delete the node immediately.
if currentReadyCondition.Status != api.ConditionTrue && nc.cloud != nil { if currentReadyCondition.Status != v1.ConditionTrue && nc.cloud != nil {
exists, err := nc.nodeExistsInCloudProvider(types.NodeName(node.Name)) exists, err := nc.nodeExistsInCloudProvider(types.NodeName(node.Name))
if err != nil { if err != nil {
glog.Errorf("Error determining if node %v exists in cloud: %v", node.Name, err) glog.Errorf("Error determining if node %v exists in cloud: %v", node.Name, err)
@@ -506,7 +507,7 @@ func (nc *NodeController) monitorNodeStatus() error {
} }
if !exists { if !exists {
glog.V(2).Infof("Deleting node (no longer present in cloud provider): %s", node.Name) glog.V(2).Infof("Deleting node (no longer present in cloud provider): %s", node.Name)
recordNodeEvent(nc.recorder, node.Name, string(node.UID), api.EventTypeNormal, "DeletingNode", fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name)) recordNodeEvent(nc.recorder, node.Name, string(node.UID), v1.EventTypeNormal, "DeletingNode", fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name))
go func(nodeName string) { go func(nodeName string) {
defer utilruntime.HandleCrash() defer utilruntime.HandleCrash()
// Kubelet is not reporting and Cloud Provider says node // Kubelet is not reporting and Cloud Provider says node
@@ -526,7 +527,7 @@ func (nc *NodeController) monitorNodeStatus() error {
return nil return nil
} }
func (nc *NodeController) handleDisruption(zoneToNodeConditions map[string][]*api.NodeCondition, nodes *api.NodeList) { func (nc *NodeController) handleDisruption(zoneToNodeConditions map[string][]*v1.NodeCondition, nodes *v1.NodeList) {
newZoneStates := map[string]zoneState{} newZoneStates := map[string]zoneState{}
allAreFullyDisrupted := true allAreFullyDisrupted := true
for k, v := range zoneToNodeConditions { for k, v := range zoneToNodeConditions {
@@ -627,18 +628,18 @@ func (nc *NodeController) setLimiterInZone(zone string, zoneSize int, state zone
// For a given node checks its conditions and tries to update it. Returns grace period to which given node // For a given node checks its conditions and tries to update it. Returns grace period to which given node
// is entitled, state of current and last observed Ready Condition, and an error if it occurred. // is entitled, state of current and last observed Ready Condition, and an error if it occurred.
func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, api.NodeCondition, *api.NodeCondition, error) { func (nc *NodeController) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1.NodeCondition, *v1.NodeCondition, error) {
var err error var err error
var gracePeriod time.Duration var gracePeriod time.Duration
var observedReadyCondition api.NodeCondition var observedReadyCondition v1.NodeCondition
_, currentReadyCondition := api.GetNodeCondition(&node.Status, api.NodeReady) _, currentReadyCondition := v1.GetNodeCondition(&node.Status, v1.NodeReady)
if currentReadyCondition == nil { if currentReadyCondition == nil {
// If ready condition is nil, then kubelet (or nodecontroller) never posted node status. // If ready condition is nil, then kubelet (or nodecontroller) never posted node status.
// A fake ready condition is created, where LastProbeTime and LastTransitionTime is set // A fake ready condition is created, where LastProbeTime and LastTransitionTime is set
// to node.CreationTimestamp to avoid handle the corner case. // to node.CreationTimestamp to avoid handle the corner case.
observedReadyCondition = api.NodeCondition{ observedReadyCondition = v1.NodeCondition{
Type: api.NodeReady, Type: v1.NodeReady,
Status: api.ConditionUnknown, Status: v1.ConditionUnknown,
LastHeartbeatTime: node.CreationTimestamp, LastHeartbeatTime: node.CreationTimestamp,
LastTransitionTime: node.CreationTimestamp, LastTransitionTime: node.CreationTimestamp,
} }
@@ -669,11 +670,11 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
// - if 'LastProbeTime' have gone back in time its probably an error, currently we ignore it, // - if 'LastProbeTime' have gone back in time its probably an error, currently we ignore it,
// - currently only correct Ready State transition outside of Node Controller is marking it ready by Kubelet, we don't check // - currently only correct Ready State transition outside of Node Controller is marking it ready by Kubelet, we don't check
// if that's the case, but it does not seem necessary. // if that's the case, but it does not seem necessary.
var savedCondition *api.NodeCondition var savedCondition *v1.NodeCondition
if found { if found {
_, savedCondition = api.GetNodeCondition(&savedNodeStatus.status, api.NodeReady) _, savedCondition = v1.GetNodeCondition(&savedNodeStatus.status, v1.NodeReady)
} }
_, observedCondition := api.GetNodeCondition(&node.Status, api.NodeReady) _, observedCondition := v1.GetNodeCondition(&node.Status, v1.NodeReady)
if !found { if !found {
glog.Warningf("Missing timestamp for Node %s. Assuming now as a timestamp.", node.Name) glog.Warningf("Missing timestamp for Node %s. Assuming now as a timestamp.", node.Name)
savedNodeStatus = nodeStatusData{ savedNodeStatus = nodeStatusData{
@@ -725,9 +726,9 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
// (regardless of its current value) in the master. // (regardless of its current value) in the master.
if currentReadyCondition == nil { if currentReadyCondition == nil {
glog.V(2).Infof("node %v is never updated by kubelet", node.Name) glog.V(2).Infof("node %v is never updated by kubelet", node.Name)
node.Status.Conditions = append(node.Status.Conditions, api.NodeCondition{ node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{
Type: api.NodeReady, Type: v1.NodeReady,
Status: api.ConditionUnknown, Status: v1.ConditionUnknown,
Reason: "NodeStatusNeverUpdated", Reason: "NodeStatusNeverUpdated",
Message: fmt.Sprintf("Kubelet never posted node status."), Message: fmt.Sprintf("Kubelet never posted node status."),
LastHeartbeatTime: node.CreationTimestamp, LastHeartbeatTime: node.CreationTimestamp,
@@ -736,8 +737,8 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
} else { } else {
glog.V(4).Infof("node %v hasn't been updated for %+v. Last ready condition is: %+v", glog.V(4).Infof("node %v hasn't been updated for %+v. Last ready condition is: %+v",
node.Name, nc.now().Time.Sub(savedNodeStatus.probeTimestamp.Time), observedReadyCondition) node.Name, nc.now().Time.Sub(savedNodeStatus.probeTimestamp.Time), observedReadyCondition)
if observedReadyCondition.Status != api.ConditionUnknown { if observedReadyCondition.Status != v1.ConditionUnknown {
currentReadyCondition.Status = api.ConditionUnknown currentReadyCondition.Status = v1.ConditionUnknown
currentReadyCondition.Reason = "NodeStatusUnknown" currentReadyCondition.Reason = "NodeStatusUnknown"
currentReadyCondition.Message = fmt.Sprintf("Kubelet stopped posting node status.") currentReadyCondition.Message = fmt.Sprintf("Kubelet stopped posting node status.")
// LastProbeTime is the last time we heard from kubelet. // LastProbeTime is the last time we heard from kubelet.
@@ -749,12 +750,12 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
// Like NodeReady condition, NodeOutOfDisk was last set longer ago than gracePeriod, so update // Like NodeReady condition, NodeOutOfDisk was last set longer ago than gracePeriod, so update
// it to Unknown (regardless of its current value) in the master. // it to Unknown (regardless of its current value) in the master.
// TODO(madhusudancs): Refactor this with readyCondition to remove duplicated code. // TODO(madhusudancs): Refactor this with readyCondition to remove duplicated code.
_, oodCondition := api.GetNodeCondition(&node.Status, api.NodeOutOfDisk) _, oodCondition := v1.GetNodeCondition(&node.Status, v1.NodeOutOfDisk)
if oodCondition == nil { if oodCondition == nil {
glog.V(2).Infof("Out of disk condition of node %v is never updated by kubelet", node.Name) glog.V(2).Infof("Out of disk condition of node %v is never updated by kubelet", node.Name)
node.Status.Conditions = append(node.Status.Conditions, api.NodeCondition{ node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{
Type: api.NodeOutOfDisk, Type: v1.NodeOutOfDisk,
Status: api.ConditionUnknown, Status: v1.ConditionUnknown,
Reason: "NodeStatusNeverUpdated", Reason: "NodeStatusNeverUpdated",
Message: fmt.Sprintf("Kubelet never posted node status."), Message: fmt.Sprintf("Kubelet never posted node status."),
LastHeartbeatTime: node.CreationTimestamp, LastHeartbeatTime: node.CreationTimestamp,
@@ -763,16 +764,16 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
} else { } else {
glog.V(4).Infof("node %v hasn't been updated for %+v. Last out of disk condition is: %+v", glog.V(4).Infof("node %v hasn't been updated for %+v. Last out of disk condition is: %+v",
node.Name, nc.now().Time.Sub(savedNodeStatus.probeTimestamp.Time), oodCondition) node.Name, nc.now().Time.Sub(savedNodeStatus.probeTimestamp.Time), oodCondition)
if oodCondition.Status != api.ConditionUnknown { if oodCondition.Status != v1.ConditionUnknown {
oodCondition.Status = api.ConditionUnknown oodCondition.Status = v1.ConditionUnknown
oodCondition.Reason = "NodeStatusUnknown" oodCondition.Reason = "NodeStatusUnknown"
oodCondition.Message = fmt.Sprintf("Kubelet stopped posting node status.") oodCondition.Message = fmt.Sprintf("Kubelet stopped posting node status.")
oodCondition.LastTransitionTime = nc.now() oodCondition.LastTransitionTime = nc.now()
} }
} }
_, currentCondition := api.GetNodeCondition(&node.Status, api.NodeReady) _, currentCondition := v1.GetNodeCondition(&node.Status, v1.NodeReady)
if !api.Semantic.DeepEqual(currentCondition, &observedReadyCondition) { if !v1.Semantic.DeepEqual(currentCondition, &observedReadyCondition) {
if _, err = nc.kubeClient.Core().Nodes().UpdateStatus(node); err != nil { if _, err = nc.kubeClient.Core().Nodes().UpdateStatus(node); err != nil {
glog.Errorf("Error updating node %s: %v", node.Name, err) glog.Errorf("Error updating node %s: %v", node.Name, err)
return gracePeriod, observedReadyCondition, currentReadyCondition, err return gracePeriod, observedReadyCondition, currentReadyCondition, err
@@ -790,7 +791,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
return gracePeriod, observedReadyCondition, currentReadyCondition, err return gracePeriod, observedReadyCondition, currentReadyCondition, err
} }
func (nc *NodeController) checkForNodeAddedDeleted(nodes *api.NodeList) (added, deleted []*api.Node) { func (nc *NodeController) checkForNodeAddedDeleted(nodes *v1.NodeList) (added, deleted []*v1.Node) {
for i := range nodes.Items { for i := range nodes.Items {
if _, has := nc.knownNodeSet[nodes.Items[i].Name]; !has { if _, has := nc.knownNodeSet[nodes.Items[i].Name]; !has {
added = append(added, &nodes.Items[i]) added = append(added, &nodes.Items[i])
@@ -799,7 +800,7 @@ func (nc *NodeController) checkForNodeAddedDeleted(nodes *api.NodeList) (added,
// If there's a difference between lengths of known Nodes and observed nodes // If there's a difference between lengths of known Nodes and observed nodes
// we must have removed some Node. // we must have removed some Node.
if len(nc.knownNodeSet)+len(added) != len(nodes.Items) { if len(nc.knownNodeSet)+len(added) != len(nodes.Items) {
knowSetCopy := map[string]*api.Node{} knowSetCopy := map[string]*v1.Node{}
for k, v := range nc.knownNodeSet { for k, v := range nc.knownNodeSet {
knowSetCopy[k] = v knowSetCopy[k] = v
} }
@@ -815,7 +816,7 @@ func (nc *NodeController) checkForNodeAddedDeleted(nodes *api.NodeList) (added,
// cancelPodEviction removes any queued evictions, typically because the node is available again. It // cancelPodEviction removes any queued evictions, typically because the node is available again. It
// returns true if an eviction was queued. // returns true if an eviction was queued.
func (nc *NodeController) cancelPodEviction(node *api.Node) bool { func (nc *NodeController) cancelPodEviction(node *v1.Node) bool {
zone := utilnode.GetZoneKey(node) zone := utilnode.GetZoneKey(node)
nc.evictorLock.Lock() nc.evictorLock.Lock()
defer nc.evictorLock.Unlock() defer nc.evictorLock.Unlock()
@@ -829,7 +830,7 @@ func (nc *NodeController) cancelPodEviction(node *api.Node) bool {
// evictPods queues an eviction for the provided node name, and returns false if the node is already // evictPods queues an eviction for the provided node name, and returns false if the node is already
// queued for eviction. // queued for eviction.
func (nc *NodeController) evictPods(node *api.Node) bool { func (nc *NodeController) evictPods(node *v1.Node) bool {
nc.evictorLock.Lock() nc.evictorLock.Lock()
defer nc.evictorLock.Unlock() defer nc.evictorLock.Unlock()
return nc.zonePodEvictor[utilnode.GetZoneKey(node)].Add(node.Name, string(node.UID)) return nc.zonePodEvictor[utilnode.GetZoneKey(node)].Add(node.Name, string(node.UID))
@@ -853,11 +854,11 @@ func (nc *NodeController) ReducedQPSFunc(nodeNum int) float32 {
// - fullyDisrupted if there're no Ready Nodes, // - fullyDisrupted if there're no Ready Nodes,
// - partiallyDisrupted if at least than nc.unhealthyZoneThreshold percent of Nodes are not Ready, // - partiallyDisrupted if at least than nc.unhealthyZoneThreshold percent of Nodes are not Ready,
// - normal otherwise // - normal otherwise
func (nc *NodeController) ComputeZoneState(nodeReadyConditions []*api.NodeCondition) (int, zoneState) { func (nc *NodeController) ComputeZoneState(nodeReadyConditions []*v1.NodeCondition) (int, zoneState) {
readyNodes := 0 readyNodes := 0
notReadyNodes := 0 notReadyNodes := 0
for i := range nodeReadyConditions { for i := range nodeReadyConditions {
if nodeReadyConditions[i] != nil && nodeReadyConditions[i].Status == api.ConditionTrue { if nodeReadyConditions[i] != nil && nodeReadyConditions[i].Status == v1.ConditionTrue {
readyNodes++ readyNodes++
} else { } else {
notReadyNodes++ notReadyNodes++

File diff suppressed because it is too large Load Diff

View File

@@ -26,8 +26,9 @@ import (
apierrors "k8s.io/kubernetes/pkg/api/errors" apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/api/v1"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/clock" "k8s.io/kubernetes/pkg/util/clock"
utilnode "k8s.io/kubernetes/pkg/util/node" utilnode "k8s.io/kubernetes/pkg/util/node"
@@ -43,14 +44,14 @@ type FakeNodeHandler struct {
*fake.Clientset *fake.Clientset
// Input: Hooks determine if request is valid or not // Input: Hooks determine if request is valid or not
CreateHook func(*FakeNodeHandler, *api.Node) bool CreateHook func(*FakeNodeHandler, *v1.Node) bool
Existing []*api.Node Existing []*v1.Node
// Output // Output
CreatedNodes []*api.Node CreatedNodes []*v1.Node
DeletedNodes []*api.Node DeletedNodes []*v1.Node
UpdatedNodes []*api.Node UpdatedNodes []*v1.Node
UpdatedNodeStatuses []*api.Node UpdatedNodeStatuses []*v1.Node
RequestCount int RequestCount int
// Synchronization // Synchronization
@@ -59,29 +60,29 @@ type FakeNodeHandler struct {
} }
type FakeLegacyHandler struct { type FakeLegacyHandler struct {
unversionedcore.CoreInterface v1core.CoreV1Interface
n *FakeNodeHandler n *FakeNodeHandler
} }
func (c *FakeNodeHandler) getUpdatedNodesCopy() []*api.Node { func (c *FakeNodeHandler) getUpdatedNodesCopy() []*v1.Node {
c.lock.Lock() c.lock.Lock()
defer c.lock.Unlock() defer c.lock.Unlock()
updatedNodesCopy := make([]*api.Node, len(c.UpdatedNodes), len(c.UpdatedNodes)) updatedNodesCopy := make([]*v1.Node, len(c.UpdatedNodes), len(c.UpdatedNodes))
for i, ptr := range c.UpdatedNodes { for i, ptr := range c.UpdatedNodes {
updatedNodesCopy[i] = ptr updatedNodesCopy[i] = ptr
} }
return updatedNodesCopy return updatedNodesCopy
} }
func (c *FakeNodeHandler) Core() unversionedcore.CoreInterface { func (c *FakeNodeHandler) Core() v1core.CoreV1Interface {
return &FakeLegacyHandler{c.Clientset.Core(), c} return &FakeLegacyHandler{c.Clientset.Core(), c}
} }
func (m *FakeLegacyHandler) Nodes() unversionedcore.NodeInterface { func (m *FakeLegacyHandler) Nodes() v1core.NodeInterface {
return m.n return m.n
} }
func (m *FakeNodeHandler) Create(node *api.Node) (*api.Node, error) { func (m *FakeNodeHandler) Create(node *v1.Node) (*v1.Node, error) {
m.lock.Lock() m.lock.Lock()
defer func() { defer func() {
m.RequestCount++ m.RequestCount++
@@ -101,7 +102,7 @@ func (m *FakeNodeHandler) Create(node *api.Node) (*api.Node, error) {
} }
} }
func (m *FakeNodeHandler) Get(name string) (*api.Node, error) { func (m *FakeNodeHandler) Get(name string) (*v1.Node, error) {
m.lock.Lock() m.lock.Lock()
defer func() { defer func() {
m.RequestCount++ m.RequestCount++
@@ -122,13 +123,13 @@ func (m *FakeNodeHandler) Get(name string) (*api.Node, error) {
return nil, nil return nil, nil
} }
func (m *FakeNodeHandler) List(opts api.ListOptions) (*api.NodeList, error) { func (m *FakeNodeHandler) List(opts v1.ListOptions) (*v1.NodeList, error) {
m.lock.Lock() m.lock.Lock()
defer func() { defer func() {
m.RequestCount++ m.RequestCount++
m.lock.Unlock() m.lock.Unlock()
}() }()
var nodes []*api.Node var nodes []*v1.Node
for i := 0; i < len(m.UpdatedNodes); i++ { for i := 0; i < len(m.UpdatedNodes); i++ {
if !contains(m.UpdatedNodes[i], m.DeletedNodes) { if !contains(m.UpdatedNodes[i], m.DeletedNodes) {
nodes = append(nodes, m.UpdatedNodes[i]) nodes = append(nodes, m.UpdatedNodes[i])
@@ -144,14 +145,14 @@ func (m *FakeNodeHandler) List(opts api.ListOptions) (*api.NodeList, error) {
nodes = append(nodes, m.CreatedNodes[i]) nodes = append(nodes, m.CreatedNodes[i])
} }
} }
nodeList := &api.NodeList{} nodeList := &v1.NodeList{}
for _, node := range nodes { for _, node := range nodes {
nodeList.Items = append(nodeList.Items, *node) nodeList.Items = append(nodeList.Items, *node)
} }
return nodeList, nil return nodeList, nil
} }
func (m *FakeNodeHandler) Delete(id string, opt *api.DeleteOptions) error { func (m *FakeNodeHandler) Delete(id string, opt *v1.DeleteOptions) error {
m.lock.Lock() m.lock.Lock()
defer func() { defer func() {
m.RequestCount++ m.RequestCount++
@@ -164,11 +165,11 @@ func (m *FakeNodeHandler) Delete(id string, opt *api.DeleteOptions) error {
return nil return nil
} }
func (m *FakeNodeHandler) DeleteCollection(opt *api.DeleteOptions, listOpts api.ListOptions) error { func (m *FakeNodeHandler) DeleteCollection(opt *v1.DeleteOptions, listOpts v1.ListOptions) error {
return nil return nil
} }
func (m *FakeNodeHandler) Update(node *api.Node) (*api.Node, error) { func (m *FakeNodeHandler) Update(node *v1.Node) (*v1.Node, error) {
m.lock.Lock() m.lock.Lock()
defer func() { defer func() {
m.RequestCount++ m.RequestCount++
@@ -185,7 +186,7 @@ func (m *FakeNodeHandler) Update(node *api.Node) (*api.Node, error) {
return node, nil return node, nil
} }
func (m *FakeNodeHandler) UpdateStatus(node *api.Node) (*api.Node, error) { func (m *FakeNodeHandler) UpdateStatus(node *v1.Node) (*v1.Node, error) {
m.lock.Lock() m.lock.Lock()
defer func() { defer func() {
m.RequestCount++ m.RequestCount++
@@ -196,23 +197,23 @@ func (m *FakeNodeHandler) UpdateStatus(node *api.Node) (*api.Node, error) {
return node, nil return node, nil
} }
func (m *FakeNodeHandler) PatchStatus(nodeName string, data []byte) (*api.Node, error) { func (m *FakeNodeHandler) PatchStatus(nodeName string, data []byte) (*v1.Node, error) {
m.RequestCount++ m.RequestCount++
return &api.Node{}, nil return &v1.Node{}, nil
} }
func (m *FakeNodeHandler) Watch(opts api.ListOptions) (watch.Interface, error) { func (m *FakeNodeHandler) Watch(opts v1.ListOptions) (watch.Interface, error) {
return watch.NewFake(), nil return watch.NewFake(), nil
} }
func (m *FakeNodeHandler) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (*api.Node, error) { func (m *FakeNodeHandler) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (*v1.Node, error) {
return nil, nil return nil, nil
} }
// FakeRecorder is used as a fake during testing. // FakeRecorder is used as a fake during testing.
type FakeRecorder struct { type FakeRecorder struct {
source api.EventSource source v1.EventSource
events []*api.Event events []*v1.Event
clock clock.Clock clock clock.Clock
} }
@@ -228,7 +229,7 @@ func (f *FakeRecorder) PastEventf(obj runtime.Object, timestamp unversioned.Time
} }
func (f *FakeRecorder) generateEvent(obj runtime.Object, timestamp unversioned.Time, eventtype, reason, message string) { func (f *FakeRecorder) generateEvent(obj runtime.Object, timestamp unversioned.Time, eventtype, reason, message string) {
ref, err := api.GetReference(obj) ref, err := v1.GetReference(obj)
if err != nil { if err != nil {
return return
} }
@@ -240,15 +241,15 @@ func (f *FakeRecorder) generateEvent(obj runtime.Object, timestamp unversioned.T
} }
} }
func (f *FakeRecorder) makeEvent(ref *api.ObjectReference, eventtype, reason, message string) *api.Event { func (f *FakeRecorder) makeEvent(ref *v1.ObjectReference, eventtype, reason, message string) *v1.Event {
fmt.Println("make event") fmt.Println("make event")
t := unversioned.Time{Time: f.clock.Now()} t := unversioned.Time{Time: f.clock.Now()}
namespace := ref.Namespace namespace := ref.Namespace
if namespace == "" { if namespace == "" {
namespace = api.NamespaceDefault namespace = v1.NamespaceDefault
} }
return &api.Event{ return &v1.Event{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()), Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()),
Namespace: namespace, Namespace: namespace,
}, },
@@ -264,41 +265,41 @@ func (f *FakeRecorder) makeEvent(ref *api.ObjectReference, eventtype, reason, me
func NewFakeRecorder() *FakeRecorder { func NewFakeRecorder() *FakeRecorder {
return &FakeRecorder{ return &FakeRecorder{
source: api.EventSource{Component: "nodeControllerTest"}, source: v1.EventSource{Component: "nodeControllerTest"},
events: []*api.Event{}, events: []*v1.Event{},
clock: clock.NewFakeClock(time.Now()), clock: clock.NewFakeClock(time.Now()),
} }
} }
func newNode(name string) *api.Node { func newNode(name string) *v1.Node {
return &api.Node{ return &v1.Node{
ObjectMeta: api.ObjectMeta{Name: name}, ObjectMeta: v1.ObjectMeta{Name: name},
Spec: api.NodeSpec{ Spec: v1.NodeSpec{
ExternalID: name, ExternalID: name,
}, },
Status: api.NodeStatus{ Status: v1.NodeStatus{
Capacity: api.ResourceList{ Capacity: v1.ResourceList{
api.ResourceName(api.ResourceCPU): resource.MustParse("10"), v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"),
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"), v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
}, },
}, },
} }
} }
func newPod(name, host string) *api.Pod { func newPod(name, host string) *v1.Pod {
pod := &api.Pod{ pod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Namespace: "default", Namespace: "default",
Name: name, Name: name,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
NodeName: host, NodeName: host,
}, },
Status: api.PodStatus{ Status: v1.PodStatus{
Conditions: []api.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: api.PodReady, Type: v1.PodReady,
Status: api.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
}, },
@@ -307,7 +308,7 @@ func newPod(name, host string) *api.Pod {
return pod return pod
} }
func contains(node *api.Node, nodes []*api.Node) bool { func contains(node *v1.Node, nodes []*v1.Node) bool {
for i := 0; i < len(nodes); i++ { for i := 0; i < len(nodes); i++ {
if node.Name == nodes[i].Name { if node.Name == nodes[i].Name {
return true return true
@@ -318,7 +319,7 @@ func contains(node *api.Node, nodes []*api.Node) bool {
// Returns list of zones for all Nodes stored in FakeNodeHandler // Returns list of zones for all Nodes stored in FakeNodeHandler
func getZones(nodeHandler *FakeNodeHandler) []string { func getZones(nodeHandler *FakeNodeHandler) []string {
nodes, _ := nodeHandler.List(api.ListOptions{}) nodes, _ := nodeHandler.List(v1.ListOptions{})
zones := sets.NewString() zones := sets.NewString()
for _, node := range nodes.Items { for _, node := range nodes.Items {
zones.Insert(utilnode.GetZoneKey(&node)) zones.Insert(utilnode.GetZoneKey(&node))

View File

@@ -22,11 +22,11 @@ import (
inf "gopkg.in/inf.v0" inf "gopkg.in/inf.v0"
"k8s.io/kubernetes/pkg/api"
api_pod "k8s.io/kubernetes/pkg/api/pod"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/api/v1"
api_pod "k8s.io/kubernetes/pkg/api/v1/pod"
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@@ -36,34 +36,34 @@ func dec(i int64, exponent int) *inf.Dec {
return inf.NewDec(i, inf.Scale(-exponent)) return inf.NewDec(i, inf.Scale(-exponent))
} }
func newPVC(name string) api.PersistentVolumeClaim { func newPVC(name string) v1.PersistentVolumeClaim {
return api.PersistentVolumeClaim{ return v1.PersistentVolumeClaim{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
}, },
Spec: api.PersistentVolumeClaimSpec{ Spec: v1.PersistentVolumeClaimSpec{
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: api.ResourceList{ Requests: v1.ResourceList{
api.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI), v1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI),
}, },
}, },
}, },
} }
} }
func newStatefulSetWithVolumes(replicas int, name string, petMounts []api.VolumeMount, podMounts []api.VolumeMount) *apps.StatefulSet { func newStatefulSetWithVolumes(replicas int, name string, petMounts []v1.VolumeMount, podMounts []v1.VolumeMount) *apps.StatefulSet {
mounts := append(petMounts, podMounts...) mounts := append(petMounts, podMounts...)
claims := []api.PersistentVolumeClaim{} claims := []v1.PersistentVolumeClaim{}
for _, m := range petMounts { for _, m := range petMounts {
claims = append(claims, newPVC(m.Name)) claims = append(claims, newPVC(m.Name))
} }
vols := []api.Volume{} vols := []v1.Volume{}
for _, m := range podMounts { for _, m := range podMounts {
vols = append(vols, api.Volume{ vols = append(vols, v1.Volume{
Name: m.Name, Name: m.Name,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
HostPath: &api.HostPathVolumeSource{ HostPath: &v1.HostPathVolumeSource{
Path: fmt.Sprintf("/tmp/%v", m.Name), Path: fmt.Sprintf("/tmp/%v", m.Name),
}, },
}, },
@@ -75,19 +75,19 @@ func newStatefulSetWithVolumes(replicas int, name string, petMounts []api.Volume
Kind: "StatefulSet", Kind: "StatefulSet",
APIVersion: "apps/v1beta1", APIVersion: "apps/v1beta1",
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
UID: types.UID("test"), UID: types.UID("test"),
}, },
Spec: apps.StatefulSetSpec{ Spec: apps.StatefulSetSpec{
Selector: &unversioned.LabelSelector{ Selector: &unversioned.LabelSelector{
MatchLabels: map[string]string{"foo": "bar"}, MatchLabels: map[string]string{"foo": "bar"},
}, },
Replicas: int32(replicas), Replicas: func() *int32 { i := int32(replicas); return &i }(),
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "nginx", Name: "nginx",
Image: "nginx", Image: "nginx",
@@ -103,16 +103,16 @@ func newStatefulSetWithVolumes(replicas int, name string, petMounts []api.Volume
} }
} }
func runningPod(ns, name string) *api.Pod { func runningPod(ns, name string) *v1.Pod {
p := &api.Pod{Status: api.PodStatus{Phase: api.PodRunning}} p := &v1.Pod{Status: v1.PodStatus{Phase: v1.PodRunning}}
p.Namespace = ns p.Namespace = ns
p.Name = name p.Name = name
return p return p
} }
func newPodList(ps *apps.StatefulSet, num int) []*api.Pod { func newPodList(ps *apps.StatefulSet, num int) []*v1.Pod {
// knownPods are pods in the system // knownPods are pods in the system
knownPods := []*api.Pod{} knownPods := []*v1.Pod{}
for i := 0; i < num; i++ { for i := 0; i < num; i++ {
k, _ := newPCB(fmt.Sprintf("%v", i), ps) k, _ := newPCB(fmt.Sprintf("%v", i), ps)
knownPods = append(knownPods, k.pod) knownPods = append(knownPods, k.pod)
@@ -121,16 +121,16 @@ func newPodList(ps *apps.StatefulSet, num int) []*api.Pod {
} }
func newStatefulSet(replicas int) *apps.StatefulSet { func newStatefulSet(replicas int) *apps.StatefulSet {
petMounts := []api.VolumeMount{ petMounts := []v1.VolumeMount{
{Name: "datadir", MountPath: "/tmp/zookeeper"}, {Name: "datadir", MountPath: "/tmp/zookeeper"},
} }
podMounts := []api.VolumeMount{ podMounts := []v1.VolumeMount{
{Name: "home", MountPath: "/home"}, {Name: "home", MountPath: "/home"},
} }
return newStatefulSetWithVolumes(replicas, "foo", petMounts, podMounts) return newStatefulSetWithVolumes(replicas, "foo", petMounts, podMounts)
} }
func checkPodForMount(pod *api.Pod, mountName string) error { func checkPodForMount(pod *v1.Pod, mountName string) error {
for _, c := range pod.Spec.Containers { for _, c := range pod.Spec.Containers {
for _, v := range c.VolumeMounts { for _, v := range c.VolumeMounts {
if v.Name == mountName { if v.Name == mountName {
@@ -144,7 +144,7 @@ func checkPodForMount(pod *api.Pod, mountName string) error {
func newFakePetClient() *fakePetClient { func newFakePetClient() *fakePetClient {
return &fakePetClient{ return &fakePetClient{
pets: []*pcb{}, pets: []*pcb{},
claims: []api.PersistentVolumeClaim{}, claims: []v1.PersistentVolumeClaim{},
recorder: &record.FakeRecorder{}, recorder: &record.FakeRecorder{},
petHealthChecker: &defaultPetHealthChecker{}, petHealthChecker: &defaultPetHealthChecker{},
} }
@@ -152,7 +152,7 @@ func newFakePetClient() *fakePetClient {
type fakePetClient struct { type fakePetClient struct {
pets []*pcb pets []*pcb
claims []api.PersistentVolumeClaim claims []v1.PersistentVolumeClaim
petsCreated int petsCreated int
petsDeleted int petsDeleted int
claimsCreated int claimsCreated int
@@ -168,7 +168,7 @@ func (f *fakePetClient) Delete(p *pcb) error {
for i, pet := range f.pets { for i, pet := range f.pets {
if p.pod.Name == pet.pod.Name { if p.pod.Name == pet.pod.Name {
found = true found = true
f.recorder.Eventf(pet.parent, api.EventTypeNormal, "SuccessfulDelete", "pod: %v", pet.pod.Name) f.recorder.Eventf(pet.parent, v1.EventTypeNormal, "SuccessfulDelete", "pod: %v", pet.pod.Name)
continue continue
} }
pets = append(pets, f.pets[i]) pets = append(pets, f.pets[i])
@@ -199,7 +199,7 @@ func (f *fakePetClient) Create(p *pcb) error {
return fmt.Errorf("Create failed: pod %v already exists", p.pod.Name) return fmt.Errorf("Create failed: pod %v already exists", p.pod.Name)
} }
} }
f.recorder.Eventf(p.parent, api.EventTypeNormal, "SuccessfulCreate", "pod: %v", p.pod.Name) f.recorder.Eventf(p.parent, v1.EventTypeNormal, "SuccessfulCreate", "pod: %v", p.pod.Name)
f.pets = append(f.pets, p) f.pets = append(f.pets, p)
f.petsCreated++ f.petsCreated++
return nil return nil
@@ -226,8 +226,8 @@ func (f *fakePetClient) Update(expected, wanted *pcb) error {
return nil return nil
} }
func (f *fakePetClient) getPodList() []*api.Pod { func (f *fakePetClient) getPodList() []*v1.Pod {
p := []*api.Pod{} p := []*v1.Pod{}
for i, pet := range f.pets { for i, pet := range f.pets {
if pet.pod == nil { if pet.pod == nil {
continue continue
@@ -251,10 +251,10 @@ func (f *fakePetClient) setHealthy(index int) error {
if len(f.pets) <= index { if len(f.pets) <= index {
return fmt.Errorf("Index out of range, len %v index %v", len(f.pets), index) return fmt.Errorf("Index out of range, len %v index %v", len(f.pets), index)
} }
f.pets[index].pod.Status.Phase = api.PodRunning f.pets[index].pod.Status.Phase = v1.PodRunning
f.pets[index].pod.Annotations[StatefulSetInitAnnotation] = "true" f.pets[index].pod.Annotations[StatefulSetInitAnnotation] = "true"
f.pets[index].pod.Status.Conditions = []api.PodCondition{ f.pets[index].pod.Status.Conditions = []v1.PodCondition{
{Type: api.PodReady, Status: api.ConditionTrue}, {Type: v1.PodReady, Status: v1.ConditionTrue},
} }
return nil return nil
} }
@@ -262,7 +262,7 @@ func (f *fakePetClient) setHealthy(index int) error {
// isHealthy is a convenience wrapper around the default health checker. // isHealthy is a convenience wrapper around the default health checker.
// The first invocation returns not-healthy, but marks the pet healthy so // The first invocation returns not-healthy, but marks the pet healthy so
// subsequent invocations see it as healthy. // subsequent invocations see it as healthy.
func (f *fakePetClient) isHealthy(pod *api.Pod) bool { func (f *fakePetClient) isHealthy(pod *v1.Pod) bool {
if f.petHealthChecker.isHealthy(pod) { if f.petHealthChecker.isHealthy(pod) {
return true return true
} }
@@ -280,11 +280,11 @@ func (f *fakePetClient) setDeletionTimestamp(index int) error {
// SyncPVCs fakes pvc syncing. // SyncPVCs fakes pvc syncing.
func (f *fakePetClient) SyncPVCs(pet *pcb) error { func (f *fakePetClient) SyncPVCs(pet *pcb) error {
v := pet.pvcs v := pet.pvcs
updateClaims := map[string]api.PersistentVolumeClaim{} updateClaims := map[string]v1.PersistentVolumeClaim{}
for i, update := range v { for i, update := range v {
updateClaims[update.Name] = v[i] updateClaims[update.Name] = v[i]
} }
claimList := []api.PersistentVolumeClaim{} claimList := []v1.PersistentVolumeClaim{}
for i, existing := range f.claims { for i, existing := range f.claims {
if update, ok := updateClaims[existing.Name]; ok { if update, ok := updateClaims[existing.Name]; ok {
claimList = append(claimList, update) claimList = append(claimList, update)
@@ -296,7 +296,7 @@ func (f *fakePetClient) SyncPVCs(pet *pcb) error {
for _, remaining := range updateClaims { for _, remaining := range updateClaims {
claimList = append(claimList, remaining) claimList = append(claimList, remaining)
f.claimsCreated++ f.claimsCreated++
f.recorder.Eventf(pet.parent, api.EventTypeNormal, "SuccessfulCreate", "pvc: %v", remaining.Name) f.recorder.Eventf(pet.parent, v1.EventTypeNormal, "SuccessfulCreate", "pvc: %v", remaining.Name)
} }
f.claims = claimList f.claims = claimList
return nil return nil
@@ -309,12 +309,12 @@ func (f *fakePetClient) DeletePVCs(pet *pcb) error {
for _, c := range claimsToDelete { for _, c := range claimsToDelete {
deleteClaimNames.Insert(c.Name) deleteClaimNames.Insert(c.Name)
} }
pvcs := []api.PersistentVolumeClaim{} pvcs := []v1.PersistentVolumeClaim{}
for i, existing := range f.claims { for i, existing := range f.claims {
if deleteClaimNames.Has(existing.Name) { if deleteClaimNames.Has(existing.Name) {
deleteClaimNames.Delete(existing.Name) deleteClaimNames.Delete(existing.Name)
f.claimsDeleted++ f.claimsDeleted++
f.recorder.Eventf(pet.parent, api.EventTypeNormal, "SuccessfulDelete", "pvc: %v", existing.Name) f.recorder.Eventf(pet.parent, v1.EventTypeNormal, "SuccessfulDelete", "pvc: %v", existing.Name)
continue continue
} }
pvcs = append(pvcs, f.claims[i]) pvcs = append(pvcs, f.claims[i])

View File

@@ -23,9 +23,9 @@ import (
"strings" "strings"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
podapi "k8s.io/kubernetes/pkg/api/pod" podapi "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/apis/apps" apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
) )
@@ -41,10 +41,10 @@ type identityMapper interface {
// SetIdentity takes an id and assigns the given pet an identity based // SetIdentity takes an id and assigns the given pet an identity based
// on the stateful set spec. The is must be unique amongst members of the // on the stateful set spec. The is must be unique amongst members of the
// stateful set. // stateful set.
SetIdentity(id string, pet *api.Pod) SetIdentity(id string, pet *v1.Pod)
// Identity returns the identity of the pet. // Identity returns the identity of the pet.
Identity(pod *api.Pod) string Identity(pod *v1.Pod) string
} }
func newIdentityMappers(ps *apps.StatefulSet) []identityMapper { func newIdentityMappers(ps *apps.StatefulSet) []identityMapper {
@@ -61,19 +61,19 @@ type NetworkIdentityMapper struct {
} }
// SetIdentity sets network identity on the pet. // SetIdentity sets network identity on the pet.
func (n *NetworkIdentityMapper) SetIdentity(id string, pet *api.Pod) { func (n *NetworkIdentityMapper) SetIdentity(id string, pet *v1.Pod) {
pet.Annotations[podapi.PodHostnameAnnotation] = fmt.Sprintf("%v-%v", n.ps.Name, id) pet.Annotations[podapi.PodHostnameAnnotation] = fmt.Sprintf("%v-%v", n.ps.Name, id)
pet.Annotations[podapi.PodSubdomainAnnotation] = n.ps.Spec.ServiceName pet.Annotations[podapi.PodSubdomainAnnotation] = n.ps.Spec.ServiceName
return return
} }
// Identity returns the network identity of the pet. // Identity returns the network identity of the pet.
func (n *NetworkIdentityMapper) Identity(pet *api.Pod) string { func (n *NetworkIdentityMapper) Identity(pet *v1.Pod) string {
return n.String(pet) return n.String(pet)
} }
// String is a string function for the network identity of the pet. // String is a string function for the network identity of the pet.
func (n *NetworkIdentityMapper) String(pet *api.Pod) string { func (n *NetworkIdentityMapper) String(pet *v1.Pod) string {
hostname := pet.Annotations[podapi.PodHostnameAnnotation] hostname := pet.Annotations[podapi.PodHostnameAnnotation]
subdomain := pet.Annotations[podapi.PodSubdomainAnnotation] subdomain := pet.Annotations[podapi.PodSubdomainAnnotation]
return strings.Join([]string{hostname, subdomain, n.ps.Namespace}, ".") return strings.Join([]string{hostname, subdomain, n.ps.Namespace}, ".")
@@ -85,13 +85,13 @@ type VolumeIdentityMapper struct {
} }
// SetIdentity sets storge identity on the pet. // SetIdentity sets storge identity on the pet.
func (v *VolumeIdentityMapper) SetIdentity(id string, pet *api.Pod) { func (v *VolumeIdentityMapper) SetIdentity(id string, pet *v1.Pod) {
petVolumes := []api.Volume{} petVolumes := []v1.Volume{}
petClaims := v.GetClaims(id) petClaims := v.GetClaims(id)
// These volumes will all go down with the pod. If a name matches one of // These volumes will all go down with the pod. If a name matches one of
// the claims in the stateful set, it gets clobbered. // the claims in the stateful set, it gets clobbered.
podVolumes := map[string]api.Volume{} podVolumes := map[string]v1.Volume{}
for _, podVol := range pet.Spec.Volumes { for _, podVol := range pet.Spec.Volumes {
podVolumes[podVol.Name] = podVol podVolumes[podVol.Name] = podVol
} }
@@ -105,10 +105,10 @@ func (v *VolumeIdentityMapper) SetIdentity(id string, pet *api.Pod) {
// TODO: Validate and reject this. // TODO: Validate and reject this.
glog.V(4).Infof("Overwriting existing volume source %v", podVol.Name) glog.V(4).Infof("Overwriting existing volume source %v", podVol.Name)
} }
newVol := api.Volume{ newVol := v1.Volume{
Name: name, Name: name,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: claim.Name, ClaimName: claim.Name,
// TODO: Use source definition to set this value when we have one. // TODO: Use source definition to set this value when we have one.
ReadOnly: false, ReadOnly: false,
@@ -129,13 +129,13 @@ func (v *VolumeIdentityMapper) SetIdentity(id string, pet *api.Pod) {
} }
// Identity returns the storage identity of the pet. // Identity returns the storage identity of the pet.
func (v *VolumeIdentityMapper) Identity(pet *api.Pod) string { func (v *VolumeIdentityMapper) Identity(pet *v1.Pod) string {
// TODO: Make this a hash? // TODO: Make this a hash?
return v.String(pet) return v.String(pet)
} }
// String is a string function for the network identity of the pet. // String is a string function for the network identity of the pet.
func (v *VolumeIdentityMapper) String(pet *api.Pod) string { func (v *VolumeIdentityMapper) String(pet *v1.Pod) string {
ids := []string{} ids := []string{}
petVols := sets.NewString() petVols := sets.NewString()
for _, petVol := range v.ps.Spec.VolumeClaimTemplates { for _, petVol := range v.ps.Spec.VolumeClaimTemplates {
@@ -160,8 +160,8 @@ func (v *VolumeIdentityMapper) String(pet *api.Pod) string {
// GetClaims returns the volume claims associated with the given id. // GetClaims returns the volume claims associated with the given id.
// The claims belong to the statefulset. The id should be unique within a statefulset. // The claims belong to the statefulset. The id should be unique within a statefulset.
func (v *VolumeIdentityMapper) GetClaims(id string) map[string]api.PersistentVolumeClaim { func (v *VolumeIdentityMapper) GetClaims(id string) map[string]v1.PersistentVolumeClaim {
petClaims := map[string]api.PersistentVolumeClaim{} petClaims := map[string]v1.PersistentVolumeClaim{}
for _, pvc := range v.ps.Spec.VolumeClaimTemplates { for _, pvc := range v.ps.Spec.VolumeClaimTemplates {
claim := pvc claim := pvc
// TODO: Name length checking in validation. // TODO: Name length checking in validation.
@@ -177,12 +177,12 @@ func (v *VolumeIdentityMapper) GetClaims(id string) map[string]api.PersistentVol
} }
// GetClaimsForPet returns the pvcs for the given pet. // GetClaimsForPet returns the pvcs for the given pet.
func (v *VolumeIdentityMapper) GetClaimsForPet(pet *api.Pod) []api.PersistentVolumeClaim { func (v *VolumeIdentityMapper) GetClaimsForPet(pet *v1.Pod) []v1.PersistentVolumeClaim {
// Strip out the "-(index)" from the pet name and use it to generate // Strip out the "-(index)" from the pet name and use it to generate
// claim names. // claim names.
id := strings.Split(pet.Name, "-") id := strings.Split(pet.Name, "-")
petID := id[len(id)-1] petID := id[len(id)-1]
pvcs := []api.PersistentVolumeClaim{} pvcs := []v1.PersistentVolumeClaim{}
for _, pvc := range v.GetClaims(petID) { for _, pvc := range v.GetClaims(petID) {
pvcs = append(pvcs, pvc) pvcs = append(pvcs, pvc)
} }
@@ -196,25 +196,25 @@ type NameIdentityMapper struct {
} }
// SetIdentity sets the pet namespace and name. // SetIdentity sets the pet namespace and name.
func (n *NameIdentityMapper) SetIdentity(id string, pet *api.Pod) { func (n *NameIdentityMapper) SetIdentity(id string, pet *v1.Pod) {
pet.Name = fmt.Sprintf("%v-%v", n.ps.Name, id) pet.Name = fmt.Sprintf("%v-%v", n.ps.Name, id)
pet.Namespace = n.ps.Namespace pet.Namespace = n.ps.Namespace
return return
} }
// Identity returns the name identity of the pet. // Identity returns the name identity of the pet.
func (n *NameIdentityMapper) Identity(pet *api.Pod) string { func (n *NameIdentityMapper) Identity(pet *v1.Pod) string {
return n.String(pet) return n.String(pet)
} }
// String is a string function for the name identity of the pet. // String is a string function for the name identity of the pet.
func (n *NameIdentityMapper) String(pet *api.Pod) string { func (n *NameIdentityMapper) String(pet *v1.Pod) string {
return fmt.Sprintf("%v/%v", pet.Namespace, pet.Name) return fmt.Sprintf("%v/%v", pet.Namespace, pet.Name)
} }
// identityHash computes a hash of the pet by running all the above identity // identityHash computes a hash of the pet by running all the above identity
// mappers. // mappers.
func identityHash(ps *apps.StatefulSet, pet *api.Pod) string { func identityHash(ps *apps.StatefulSet, pet *v1.Pod) string {
id := "" id := ""
for _, idMapper := range newIdentityMappers(ps) { for _, idMapper := range newIdentityMappers(ps) {
id += idMapper.Identity(pet) id += idMapper.Identity(pet)
@@ -226,7 +226,7 @@ func identityHash(ps *apps.StatefulSet, pet *api.Pod) string {
// Note that this is *not* a literal copy, but a copy of the fields that // Note that this is *not* a literal copy, but a copy of the fields that
// contribute to the pet's identity. The returned boolean 'needsUpdate' will // contribute to the pet's identity. The returned boolean 'needsUpdate' will
// be false if the realPet already has the same identity as the expectedPet. // be false if the realPet already has the same identity as the expectedPet.
func copyPetID(realPet, expectedPet *pcb) (pod api.Pod, needsUpdate bool, err error) { func copyPetID(realPet, expectedPet *pcb) (pod v1.Pod, needsUpdate bool, err error) {
if realPet.pod == nil || expectedPet.pod == nil { if realPet.pod == nil || expectedPet.pod == nil {
return pod, false, fmt.Errorf("Need a valid to and from pet for copy") return pod, false, fmt.Errorf("Need a valid to and from pet for copy")
} }

View File

@@ -23,8 +23,8 @@ import (
"testing" "testing"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
api_pod "k8s.io/kubernetes/pkg/api/pod" api_pod "k8s.io/kubernetes/pkg/api/v1/pod"
) )
func TestPetIDName(t *testing.T) { func TestPetIDName(t *testing.T) {
@@ -150,10 +150,10 @@ func TestPetIDReset(t *testing.T) {
if identityHash(ps, firstPCB.pod) == identityHash(ps, secondPCB.pod) { if identityHash(ps, firstPCB.pod) == identityHash(ps, secondPCB.pod) {
t.Fatalf("Failed to generate uniquey identities:\n%+v\n%+v", firstPCB.pod.Spec, secondPCB.pod.Spec) t.Fatalf("Failed to generate uniquey identities:\n%+v\n%+v", firstPCB.pod.Spec, secondPCB.pod.Spec)
} }
userAdded := api.Volume{ userAdded := v1.Volume{
Name: "test", Name: "test",
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}, EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory},
}, },
} }
firstPCB.pod.Spec.Volumes = append(firstPCB.pod.Spec.Volumes, userAdded) firstPCB.pod.Spec.Volumes = append(firstPCB.pod.Spec.Volumes, userAdded)

View File

@@ -21,8 +21,8 @@ import (
"sort" "sort"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/apps" apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
) )
@@ -35,7 +35,7 @@ func newPCB(id string, ps *apps.StatefulSet) (*pcb, error) {
for _, im := range newIdentityMappers(ps) { for _, im := range newIdentityMappers(ps) {
im.SetIdentity(id, petPod) im.SetIdentity(id, petPod)
} }
petPVCs := []api.PersistentVolumeClaim{} petPVCs := []v1.PersistentVolumeClaim{}
vMapper := &VolumeIdentityMapper{ps} vMapper := &VolumeIdentityMapper{ps}
for _, c := range vMapper.GetClaims(id) { for _, c := range vMapper.GetClaims(id) {
petPVCs = append(petPVCs, c) petPVCs = append(petPVCs, c)
@@ -87,7 +87,7 @@ func (pt *petQueue) empty() bool {
} }
// NewPetQueue returns a queue for tracking pets // NewPetQueue returns a queue for tracking pets
func NewPetQueue(ps *apps.StatefulSet, podList []*api.Pod) *petQueue { func NewPetQueue(ps *apps.StatefulSet, podList []*v1.Pod) *petQueue {
pt := petQueue{pets: []*pcb{}, idMapper: &NameIdentityMapper{ps}} pt := petQueue{pets: []*pcb{}, idMapper: &NameIdentityMapper{ps}}
// Seed the queue with existing pets. Assume all pets are scheduled for // Seed the queue with existing pets. Assume all pets are scheduled for
// deletion, enqueuing a pet will "undelete" it. We always want to delete // deletion, enqueuing a pet will "undelete" it. We always want to delete
@@ -118,7 +118,7 @@ type statefulSetIterator struct {
func (pi *statefulSetIterator) Next() bool { func (pi *statefulSetIterator) Next() bool {
var pet *pcb var pet *pcb
var err error var err error
if pi.petCount < pi.ps.Spec.Replicas { if pi.petCount < *(pi.ps.Spec.Replicas) {
pet, err = newPCB(fmt.Sprintf("%d", pi.petCount), pi.ps) pet, err = newPCB(fmt.Sprintf("%d", pi.petCount), pi.ps)
if err != nil { if err != nil {
pi.errs = append(pi.errs, err) pi.errs = append(pi.errs, err)
@@ -139,7 +139,7 @@ func (pi *statefulSetIterator) Value() *pcb {
// NewStatefulSetIterator returns a new iterator. All pods in the given podList // NewStatefulSetIterator returns a new iterator. All pods in the given podList
// are used to seed the queue of the iterator. // are used to seed the queue of the iterator.
func NewStatefulSetIterator(ps *apps.StatefulSet, podList []*api.Pod) *statefulSetIterator { func NewStatefulSetIterator(ps *apps.StatefulSet, podList []*v1.Pod) *statefulSetIterator {
pi := &statefulSetIterator{ pi := &statefulSetIterator{
ps: ps, ps: ps,
queue: NewPetQueue(ps, podList), queue: NewPetQueue(ps, podList),
@@ -150,7 +150,7 @@ func NewStatefulSetIterator(ps *apps.StatefulSet, podList []*api.Pod) *statefulS
} }
// PodsByCreationTimestamp sorts a list of Pods by creation timestamp, using their names as a tie breaker. // PodsByCreationTimestamp sorts a list of Pods by creation timestamp, using their names as a tie breaker.
type PodsByCreationTimestamp []*api.Pod type PodsByCreationTimestamp []*v1.Pod
func (o PodsByCreationTimestamp) Len() int { return len(o) } func (o PodsByCreationTimestamp) Len() int { return len(o) }
func (o PodsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } func (o PodsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }

View File

@@ -21,14 +21,14 @@ import (
"testing" "testing"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
) )
func TestPetQueueCreates(t *testing.T) { func TestPetQueueCreates(t *testing.T) {
replicas := 3 replicas := 3
ps := newStatefulSet(replicas) ps := newStatefulSet(replicas)
q := NewPetQueue(ps, []*api.Pod{}) q := NewPetQueue(ps, []*v1.Pod{})
for i := 0; i < replicas; i++ { for i := 0; i < replicas; i++ {
pet, _ := newPCB(fmt.Sprintf("%v", i), ps) pet, _ := newPCB(fmt.Sprintf("%v", i), ps)
q.enqueue(pet) q.enqueue(pet)
@@ -107,7 +107,7 @@ func TestStatefulSetIteratorRelist(t *testing.T) {
knownPods := newPodList(ps, 5) knownPods := newPodList(ps, 5)
for i := range knownPods { for i := range knownPods {
knownPods[i].Spec.NodeName = fmt.Sprintf("foo-node-%v", i) knownPods[i].Spec.NodeName = fmt.Sprintf("foo-node-%v", i)
knownPods[i].Status.Phase = api.PodRunning knownPods[i].Status.Phase = v1.PodRunning
} }
pi := NewStatefulSetIterator(ps, knownPods) pi := NewStatefulSetIterator(ps, knownPods)
@@ -128,7 +128,7 @@ func TestStatefulSetIteratorRelist(t *testing.T) {
} }
// Scale to 0 should delete all pods in system // Scale to 0 should delete all pods in system
ps.Spec.Replicas = 0 *(ps.Spec.Replicas) = 0
pi = NewStatefulSetIterator(ps, knownPods) pi = NewStatefulSetIterator(ps, knownPods)
i = 0 i = 0
for pi.Next() { for pi.Next() {
@@ -143,7 +143,7 @@ func TestStatefulSetIteratorRelist(t *testing.T) {
} }
// Relist with 0 replicas should no-op // Relist with 0 replicas should no-op
pi = NewStatefulSetIterator(ps, []*api.Pod{}) pi = NewStatefulSetIterator(ps, []*v1.Pod{})
if pi.Next() != false { if pi.Next() != false {
t.Errorf("Unexpected iteration without any replicas or pods in system") t.Errorf("Unexpected iteration without any replicas or pods in system")
} }

View File

@@ -20,10 +20,10 @@ import (
"fmt" "fmt"
"strconv" "strconv"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@@ -52,9 +52,9 @@ const (
// and parent fields to pass it around safely. // and parent fields to pass it around safely.
type pcb struct { type pcb struct {
// pod is the desired pet pod. // pod is the desired pet pod.
pod *api.Pod pod *v1.Pod
// pvcs is a list of desired persistent volume claims for the pet pod. // pvcs is a list of desired persistent volume claims for the pet pod.
pvcs []api.PersistentVolumeClaim pvcs []v1.PersistentVolumeClaim
// event is the lifecycle event associated with this update. // event is the lifecycle event associated with this update.
event petLifeCycleEvent event petLifeCycleEvent
// id is the identity index of this pet. // id is the identity index of this pet.
@@ -106,7 +106,7 @@ func (p *petSyncer) Sync(pet *pcb) error {
return err return err
} }
// if pet failed - we need to remove old one because of consistent naming // if pet failed - we need to remove old one because of consistent naming
if exists && realPet.pod.Status.Phase == api.PodFailed { if exists && realPet.pod.Status.Phase == v1.PodFailed {
glog.V(2).Infof("Deleting evicted pod %v/%v", realPet.pod.Namespace, realPet.pod.Name) glog.V(2).Infof("Deleting evicted pod %v/%v", realPet.pod.Namespace, realPet.pod.Name)
if err := p.petClient.Delete(realPet); err != nil { if err := p.petClient.Delete(realPet); err != nil {
return err return err
@@ -175,7 +175,7 @@ type petClient interface {
// apiServerPetClient is a statefulset aware Kubernetes client. // apiServerPetClient is a statefulset aware Kubernetes client.
type apiServerPetClient struct { type apiServerPetClient struct {
c internalclientset.Interface c clientset.Interface
recorder record.EventRecorder recorder record.EventRecorder
petHealthChecker petHealthChecker
} }
@@ -242,12 +242,12 @@ func (p *apiServerPetClient) DeletePVCs(pet *pcb) error {
return nil return nil
} }
func (p *apiServerPetClient) getPVC(pvcName, pvcNamespace string) (*api.PersistentVolumeClaim, error) { func (p *apiServerPetClient) getPVC(pvcName, pvcNamespace string) (*v1.PersistentVolumeClaim, error) {
pvc, err := p.c.Core().PersistentVolumeClaims(pvcNamespace).Get(pvcName) pvc, err := p.c.Core().PersistentVolumeClaims(pvcNamespace).Get(pvcName)
return pvc, err return pvc, err
} }
func (p *apiServerPetClient) createPVC(pvc *api.PersistentVolumeClaim) error { func (p *apiServerPetClient) createPVC(pvc *v1.PersistentVolumeClaim) error {
_, err := p.c.Core().PersistentVolumeClaims(pvc.Namespace).Create(pvc) _, err := p.c.Core().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
return err return err
} }
@@ -280,17 +280,17 @@ func (p *apiServerPetClient) SyncPVCs(pet *pcb) error {
// event formats an event for the given runtime object. // event formats an event for the given runtime object.
func (p *apiServerPetClient) event(obj runtime.Object, reason, msg string, err error) { func (p *apiServerPetClient) event(obj runtime.Object, reason, msg string, err error) {
if err != nil { if err != nil {
p.recorder.Eventf(obj, api.EventTypeWarning, fmt.Sprintf("Failed%v", reason), fmt.Sprintf("%v, error: %v", msg, err)) p.recorder.Eventf(obj, v1.EventTypeWarning, fmt.Sprintf("Failed%v", reason), fmt.Sprintf("%v, error: %v", msg, err))
} else { } else {
p.recorder.Eventf(obj, api.EventTypeNormal, fmt.Sprintf("Successful%v", reason), msg) p.recorder.Eventf(obj, v1.EventTypeNormal, fmt.Sprintf("Successful%v", reason), msg)
} }
} }
// petHealthChecker is an interface to check pet health. It makes a boolean // petHealthChecker is an interface to check pet health. It makes a boolean
// decision based on the given pod. // decision based on the given pod.
type petHealthChecker interface { type petHealthChecker interface {
isHealthy(*api.Pod) bool isHealthy(*v1.Pod) bool
isDying(*api.Pod) bool isDying(*v1.Pod) bool
} }
// defaultPetHealthChecks does basic health checking. // defaultPetHealthChecks does basic health checking.
@@ -299,11 +299,11 @@ type defaultPetHealthChecker struct{}
// isHealthy returns true if the pod is ready & running. If the pod has the // isHealthy returns true if the pod is ready & running. If the pod has the
// "pod.alpha.kubernetes.io/initialized" annotation set to "false", pod state is ignored. // "pod.alpha.kubernetes.io/initialized" annotation set to "false", pod state is ignored.
func (d *defaultPetHealthChecker) isHealthy(pod *api.Pod) bool { func (d *defaultPetHealthChecker) isHealthy(pod *v1.Pod) bool {
if pod == nil || pod.Status.Phase != api.PodRunning { if pod == nil || pod.Status.Phase != v1.PodRunning {
return false return false
} }
podReady := api.IsPodReady(pod) podReady := v1.IsPodReady(pod)
// User may have specified a pod readiness override through a debug annotation. // User may have specified a pod readiness override through a debug annotation.
initialized, ok := pod.Annotations[StatefulSetInitAnnotation] initialized, ok := pod.Annotations[StatefulSetInitAnnotation]
@@ -321,6 +321,6 @@ func (d *defaultPetHealthChecker) isHealthy(pod *api.Pod) bool {
// isDying returns true if the pod has a non-nil deletion timestamp. Since the // isDying returns true if the pod has a non-nil deletion timestamp. Since the
// timestamp can only decrease, once this method returns true for a given pet, it // timestamp can only decrease, once this method returns true for a given pet, it
// will never return false. // will never return false.
func (d *defaultPetHealthChecker) isDying(pod *api.Pod) bool { func (d *defaultPetHealthChecker) isDying(pod *v1.Pod) bool {
return pod != nil && pod.DeletionTimestamp != nil return pod != nil && pod.DeletionTimestamp != nil
} }

View File

@@ -22,12 +22,12 @@ import (
"sort" "sort"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/apps" "k8s.io/kubernetes/pkg/api/v1"
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
@@ -52,7 +52,7 @@ const (
// StatefulSetController controls statefulsets. // StatefulSetController controls statefulsets.
type StatefulSetController struct { type StatefulSetController struct {
kubeClient internalclientset.Interface kubeClient clientset.Interface
// newSyncer returns an interface capable of syncing a single pet. // newSyncer returns an interface capable of syncing a single pet.
// Abstracted out for testing. // Abstracted out for testing.
@@ -83,11 +83,11 @@ type StatefulSetController struct {
} }
// NewStatefulSetController creates a new statefulset controller. // NewStatefulSetController creates a new statefulset controller.
func NewStatefulSetController(podInformer cache.SharedIndexInformer, kubeClient internalclientset.Interface, resyncPeriod time.Duration) *StatefulSetController { func NewStatefulSetController(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod time.Duration) *StatefulSetController {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "statefulset"}) recorder := eventBroadcaster.NewRecorder(v1.EventSource{Component: "statefulset"})
pc := &apiServerPetClient{kubeClient, recorder, &defaultPetHealthChecker{}} pc := &apiServerPetClient{kubeClient, recorder, &defaultPetHealthChecker{}}
psc := &StatefulSetController{ psc := &StatefulSetController{
@@ -112,11 +112,11 @@ func NewStatefulSetController(podInformer cache.SharedIndexInformer, kubeClient
psc.psStore.Store, psc.psController = cache.NewInformer( psc.psStore.Store, psc.psController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return psc.kubeClient.Apps().StatefulSets(api.NamespaceAll).List(options) return psc.kubeClient.Apps().StatefulSets(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return psc.kubeClient.Apps().StatefulSets(api.NamespaceAll).Watch(options) return psc.kubeClient.Apps().StatefulSets(v1.NamespaceAll).Watch(options)
}, },
}, },
&apps.StatefulSet{}, &apps.StatefulSet{},
@@ -156,7 +156,7 @@ func (psc *StatefulSetController) Run(workers int, stopCh <-chan struct{}) {
// addPod adds the statefulset for the pod to the sync queue // addPod adds the statefulset for the pod to the sync queue
func (psc *StatefulSetController) addPod(obj interface{}) { func (psc *StatefulSetController) addPod(obj interface{}) {
pod := obj.(*api.Pod) pod := obj.(*v1.Pod)
glog.V(4).Infof("Pod %s created, labels: %+v", pod.Name, pod.Labels) glog.V(4).Infof("Pod %s created, labels: %+v", pod.Name, pod.Labels)
ps := psc.getStatefulSetForPod(pod) ps := psc.getStatefulSetForPod(pod)
if ps == nil { if ps == nil {
@@ -168,8 +168,8 @@ func (psc *StatefulSetController) addPod(obj interface{}) {
// updatePod adds the statefulset for the current and old pods to the sync queue. // updatePod adds the statefulset for the current and old pods to the sync queue.
// If the labels of the pod didn't change, this method enqueues a single statefulset. // If the labels of the pod didn't change, this method enqueues a single statefulset.
func (psc *StatefulSetController) updatePod(old, cur interface{}) { func (psc *StatefulSetController) updatePod(old, cur interface{}) {
curPod := cur.(*api.Pod) curPod := cur.(*v1.Pod)
oldPod := old.(*api.Pod) oldPod := old.(*v1.Pod)
if curPod.ResourceVersion == oldPod.ResourceVersion { if curPod.ResourceVersion == oldPod.ResourceVersion {
// Periodic resync will send update events for all known pods. // Periodic resync will send update events for all known pods.
// Two different versions of the same pod will always have different RVs. // Two different versions of the same pod will always have different RVs.
@@ -189,7 +189,7 @@ func (psc *StatefulSetController) updatePod(old, cur interface{}) {
// deletePod enqueues the statefulset for the pod accounting for deletion tombstones. // deletePod enqueues the statefulset for the pod accounting for deletion tombstones.
func (psc *StatefulSetController) deletePod(obj interface{}) { func (psc *StatefulSetController) deletePod(obj interface{}) {
pod, ok := obj.(*api.Pod) pod, ok := obj.(*v1.Pod)
// When a delete is dropped, the relist will notice a pod in the store not // When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains // in the list, leading to the insertion of a tombstone object which contains
@@ -201,7 +201,7 @@ func (psc *StatefulSetController) deletePod(obj interface{}) {
glog.Errorf("couldn't get object from tombstone %+v", obj) glog.Errorf("couldn't get object from tombstone %+v", obj)
return return
} }
pod, ok = tombstone.Obj.(*api.Pod) pod, ok = tombstone.Obj.(*v1.Pod)
if !ok { if !ok {
glog.Errorf("tombstone contained object that is not a pod %+v", obj) glog.Errorf("tombstone contained object that is not a pod %+v", obj)
return return
@@ -214,18 +214,18 @@ func (psc *StatefulSetController) deletePod(obj interface{}) {
} }
// getPodsForStatefulSets returns the pods that match the selectors of the given statefulset. // getPodsForStatefulSets returns the pods that match the selectors of the given statefulset.
func (psc *StatefulSetController) getPodsForStatefulSet(ps *apps.StatefulSet) ([]*api.Pod, error) { func (psc *StatefulSetController) getPodsForStatefulSet(ps *apps.StatefulSet) ([]*v1.Pod, error) {
// TODO: Do we want the statefulset to fight with RCs? check parent statefulset annoation, or name prefix? // TODO: Do we want the statefulset to fight with RCs? check parent statefulset annoation, or name prefix?
sel, err := unversioned.LabelSelectorAsSelector(ps.Spec.Selector) sel, err := unversioned.LabelSelectorAsSelector(ps.Spec.Selector)
if err != nil { if err != nil {
return []*api.Pod{}, err return []*v1.Pod{}, err
} }
pods, err := psc.podStore.Pods(ps.Namespace).List(sel) pods, err := psc.podStore.Pods(ps.Namespace).List(sel)
if err != nil { if err != nil {
return []*api.Pod{}, err return []*v1.Pod{}, err
} }
// TODO: Do we need to copy? // TODO: Do we need to copy?
result := make([]*api.Pod, 0, len(pods)) result := make([]*v1.Pod, 0, len(pods))
for i := range pods { for i := range pods {
result = append(result, &(*pods[i])) result = append(result, &(*pods[i]))
} }
@@ -233,7 +233,7 @@ func (psc *StatefulSetController) getPodsForStatefulSet(ps *apps.StatefulSet) ([
} }
// getStatefulSetForPod returns the pet set managing the given pod. // getStatefulSetForPod returns the pet set managing the given pod.
func (psc *StatefulSetController) getStatefulSetForPod(pod *api.Pod) *apps.StatefulSet { func (psc *StatefulSetController) getStatefulSetForPod(pod *v1.Pod) *apps.StatefulSet {
ps, err := psc.psStore.GetPodStatefulSets(pod) ps, err := psc.psStore.GetPodStatefulSets(pod)
if err != nil { if err != nil {
glog.V(4).Infof("No StatefulSets found for pod %v, StatefulSet controller will avoid syncing", pod.Name) glog.V(4).Infof("No StatefulSets found for pod %v, StatefulSet controller will avoid syncing", pod.Name)
@@ -320,7 +320,7 @@ func (psc *StatefulSetController) Sync(key string) error {
} }
// syncStatefulSet syncs a tuple of (statefulset, pets). // syncStatefulSet syncs a tuple of (statefulset, pets).
func (psc *StatefulSetController) syncStatefulSet(ps *apps.StatefulSet, pets []*api.Pod) (int, error) { func (psc *StatefulSetController) syncStatefulSet(ps *apps.StatefulSet, pets []*v1.Pod) (int, error) {
glog.V(2).Infof("Syncing StatefulSet %v/%v with %d pods", ps.Namespace, ps.Name, len(pets)) glog.V(2).Infof("Syncing StatefulSet %v/%v with %d pods", ps.Namespace, ps.Name, len(pets))
it := NewStatefulSetIterator(ps, pets) it := NewStatefulSetIterator(ps, pets)

View File

@@ -22,12 +22,12 @@ import (
"reflect" "reflect"
"testing" "testing"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/apps" apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
fake_internal "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" fake_internal "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1/fake"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/errors"
) )
@@ -50,7 +50,7 @@ func checkPets(ps *apps.StatefulSet, creates, deletes int, fc *fakePetClient, t
if fc.petsCreated != creates || fc.petsDeleted != deletes { if fc.petsCreated != creates || fc.petsDeleted != deletes {
t.Errorf("Found (creates: %d, deletes: %d), expected (creates: %d, deletes: %d)", fc.petsCreated, fc.petsDeleted, creates, deletes) t.Errorf("Found (creates: %d, deletes: %d), expected (creates: %d, deletes: %d)", fc.petsCreated, fc.petsDeleted, creates, deletes)
} }
gotClaims := map[string]api.PersistentVolumeClaim{} gotClaims := map[string]v1.PersistentVolumeClaim{}
for _, pvc := range fc.claims { for _, pvc := range fc.claims {
gotClaims[pvc.Name] = pvc gotClaims[pvc.Name] = pvc
} }
@@ -88,7 +88,7 @@ func scaleStatefulSet(t *testing.T, ps *apps.StatefulSet, psc *StatefulSetContro
} }
func saturateStatefulSet(t *testing.T, ps *apps.StatefulSet, psc *StatefulSetController, fc *fakePetClient) { func saturateStatefulSet(t *testing.T, ps *apps.StatefulSet, psc *StatefulSetController, fc *fakePetClient) {
err := scaleStatefulSet(t, ps, psc, fc, int(ps.Spec.Replicas)) err := scaleStatefulSet(t, ps, psc, fc, int(*(ps.Spec.Replicas)))
if err != nil { if err != nil {
t.Errorf("Error scaleStatefulSet: %v", err) t.Errorf("Error scaleStatefulSet: %v", err)
} }
@@ -119,7 +119,7 @@ func TestStatefulSetControllerDeletes(t *testing.T) {
// Drain // Drain
errs := []error{} errs := []error{}
ps.Spec.Replicas = 0 *(ps.Spec.Replicas) = 0
knownPods := fc.getPodList() knownPods := fc.getPodList()
for i := replicas - 1; i >= 0; i-- { for i := replicas - 1; i >= 0; i-- {
if len(fc.pets) != i+1 { if len(fc.pets) != i+1 {
@@ -143,7 +143,7 @@ func TestStatefulSetControllerRespectsTermination(t *testing.T) {
saturateStatefulSet(t, ps, psc, fc) saturateStatefulSet(t, ps, psc, fc)
fc.setDeletionTimestamp(replicas - 1) fc.setDeletionTimestamp(replicas - 1)
ps.Spec.Replicas = 2 *(ps.Spec.Replicas) = 2
_, err := psc.syncStatefulSet(ps, fc.getPodList()) _, err := psc.syncStatefulSet(ps, fc.getPodList())
if err != nil { if err != nil {
t.Errorf("Error syncing StatefulSet: %v", err) t.Errorf("Error syncing StatefulSet: %v", err)
@@ -169,7 +169,7 @@ func TestStatefulSetControllerRespectsOrder(t *testing.T) {
saturateStatefulSet(t, ps, psc, fc) saturateStatefulSet(t, ps, psc, fc)
errs := []error{} errs := []error{}
ps.Spec.Replicas = 0 *(ps.Spec.Replicas) = 0
// Shuffle known list and check that pets are deleted in reverse // Shuffle known list and check that pets are deleted in reverse
knownPods := fc.getPodList() knownPods := fc.getPodList()
for i := range knownPods { for i := range knownPods {
@@ -285,16 +285,16 @@ type fakeClient struct {
statefulsetClient *fakeStatefulSetClient statefulsetClient *fakeStatefulSetClient
} }
func (c *fakeClient) Apps() internalversion.AppsInterface { func (c *fakeClient) Apps() v1beta1.AppsV1beta1Interface {
return &fakeApps{c, &fake.FakeApps{}} return &fakeApps{c, &fake.FakeAppsV1beta1{}}
} }
type fakeApps struct { type fakeApps struct {
*fakeClient *fakeClient
*fake.FakeApps *fake.FakeAppsV1beta1
} }
func (c *fakeApps) StatefulSets(namespace string) internalversion.StatefulSetInterface { func (c *fakeApps) StatefulSets(namespace string) v1beta1.StatefulSetInterface {
c.statefulsetClient.Namespace = namespace c.statefulsetClient.Namespace = namespace
return c.statefulsetClient return c.statefulsetClient
} }

View File

@@ -20,10 +20,10 @@ import (
"fmt" "fmt"
"sync" "sync"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/apps" apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
appsclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion" appsclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"github.com/golang/glog" "github.com/golang/glog"
@@ -51,7 +51,7 @@ func updatePetCount(psClient appsclientset.StatefulSetsGetter, ps apps.StatefulS
var getErr error var getErr error
for i, ps := 0, &ps; ; i++ { for i, ps := 0, &ps; ; i++ {
glog.V(4).Infof(fmt.Sprintf("Updating replica count for StatefulSet: %s/%s, ", ps.Namespace, ps.Name) + glog.V(4).Infof(fmt.Sprintf("Updating replica count for StatefulSet: %s/%s, ", ps.Namespace, ps.Name) +
fmt.Sprintf("replicas %d->%d (need %d), ", ps.Status.Replicas, numPets, ps.Spec.Replicas)) fmt.Sprintf("replicas %d->%d (need %d), ", ps.Status.Replicas, numPets, *(ps.Spec.Replicas)))
ps.Status = apps.StatefulSetStatus{Replicas: int32(numPets)} ps.Status = apps.StatefulSetStatus{Replicas: int32(numPets)}
_, updateErr = psClient.StatefulSets(ps.Namespace).UpdateStatus(ps) _, updateErr = psClient.StatefulSets(ps.Namespace).UpdateStatus(ps)
@@ -72,7 +72,7 @@ type unhealthyPetTracker struct {
} }
// Get returns a previously recorded blocking pet for the given statefulset. // Get returns a previously recorded blocking pet for the given statefulset.
func (u *unhealthyPetTracker) Get(ps *apps.StatefulSet, knownPets []*api.Pod) (*pcb, error) { func (u *unhealthyPetTracker) Get(ps *apps.StatefulSet, knownPets []*v1.Pod) (*pcb, error) {
u.storeLock.Lock() u.storeLock.Lock()
defer u.storeLock.Unlock() defer u.storeLock.Unlock()

View File

@@ -21,11 +21,11 @@ import (
"net/http/httptest" "net/http/httptest"
"testing" "testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@@ -39,10 +39,10 @@ func newPetClient(client *clientset.Clientset) *apiServerPetClient {
} }
func makeTwoDifferntPCB() (pcb1, pcb2 *pcb) { func makeTwoDifferntPCB() (pcb1, pcb2 *pcb) {
userAdded := api.Volume{ userAdded := v1.Volume{
Name: "test", Name: "test",
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory}, EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory},
}, },
} }
ps := newStatefulSet(2) ps := newStatefulSet(2)
@@ -88,14 +88,14 @@ func TestUpdatePetWithoutRetry(t *testing.T) {
} }
for k, tc := range testCases { for k, tc := range testCases {
body := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Pod{ObjectMeta: api.ObjectMeta{Name: "empty_pod"}}) body := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "empty_pod"}})
fakeHandler := utiltesting.FakeHandler{ fakeHandler := utiltesting.FakeHandler{
StatusCode: 200, StatusCode: 200,
ResponseBody: string(body), ResponseBody: string(body),
} }
testServer := httptest.NewServer(&fakeHandler) testServer := httptest.NewServer(&fakeHandler)
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
petClient := newPetClient(client) petClient := newPetClient(client)
err := petClient.Update(tc.realPet, tc.expectedPet) err := petClient.Update(tc.realPet, tc.expectedPet)
@@ -115,7 +115,7 @@ func TestUpdatePetWithFailure(t *testing.T) {
testServer := httptest.NewServer(&fakeHandler) testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close() defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
petClient := newPetClient(client) petClient := newPetClient(client)
pcb1, pcb2 := makeTwoDifferntPCB() pcb1, pcb2 := makeTwoDifferntPCB()

View File

@@ -23,15 +23,15 @@ import (
"time" "time"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/autoscaling" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions" autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
unversionedautoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion" unversionedautoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/autoscaling/v1"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
utilruntime "k8s.io/kubernetes/pkg/util/runtime" utilruntime "k8s.io/kubernetes/pkg/util/runtime"
@@ -75,11 +75,11 @@ var upscaleForbiddenWindow = 3 * time.Minute
func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (cache.Store, *cache.Controller) { func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (cache.Store, *cache.Controller) {
return cache.NewInformer( return cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return controller.hpaNamespacer.HorizontalPodAutoscalers(api.NamespaceAll).List(options) return controller.hpaNamespacer.HorizontalPodAutoscalers(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return controller.hpaNamespacer.HorizontalPodAutoscalers(api.NamespaceAll).Watch(options) return controller.hpaNamespacer.HorizontalPodAutoscalers(v1.NamespaceAll).Watch(options)
}, },
}, },
&autoscaling.HorizontalPodAutoscaler{}, &autoscaling.HorizontalPodAutoscaler{},
@@ -90,7 +90,7 @@ func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (
hasCPUPolicy := hpa.Spec.TargetCPUUtilizationPercentage != nil hasCPUPolicy := hpa.Spec.TargetCPUUtilizationPercentage != nil
_, hasCustomMetricsPolicy := hpa.Annotations[HpaCustomMetricsTargetAnnotationName] _, hasCustomMetricsPolicy := hpa.Annotations[HpaCustomMetricsTargetAnnotationName]
if !hasCPUPolicy && !hasCustomMetricsPolicy { if !hasCPUPolicy && !hasCustomMetricsPolicy {
controller.eventRecorder.Event(hpa, api.EventTypeNormal, "DefaultPolicy", "No scaling policy specified - will use default one. See documentation for details") controller.eventRecorder.Event(hpa, v1.EventTypeNormal, "DefaultPolicy", "No scaling policy specified - will use default one. See documentation for details")
} }
err := controller.reconcileAutoscaler(hpa) err := controller.reconcileAutoscaler(hpa)
if err != nil { if err != nil {
@@ -109,10 +109,10 @@ func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (
) )
} }
func NewHorizontalController(evtNamespacer unversionedcore.EventsGetter, scaleNamespacer unversionedextensions.ScalesGetter, hpaNamespacer unversionedautoscaling.HorizontalPodAutoscalersGetter, replicaCalc *ReplicaCalculator, resyncPeriod time.Duration) *HorizontalController { func NewHorizontalController(evtNamespacer v1core.EventsGetter, scaleNamespacer unversionedextensions.ScalesGetter, hpaNamespacer unversionedautoscaling.HorizontalPodAutoscalersGetter, replicaCalc *ReplicaCalculator, resyncPeriod time.Duration) *HorizontalController {
broadcaster := record.NewBroadcaster() broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: evtNamespacer.Events("")}) broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: evtNamespacer.Events("")})
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"}) recorder := broadcaster.NewRecorder(v1.EventSource{Component: "horizontal-pod-autoscaler"})
controller := &HorizontalController{ controller := &HorizontalController{
replicaCalc: replicaCalc, replicaCalc: replicaCalc,
@@ -153,31 +153,31 @@ func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *autoscaling
if scale.Status.Selector == nil { if scale.Status.Selector == nil {
errMsg := "selector is required" errMsg := "selector is required"
a.eventRecorder.Event(hpa, api.EventTypeWarning, "SelectorRequired", errMsg) a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg)
return 0, nil, time.Time{}, fmt.Errorf(errMsg) return 0, nil, time.Time{}, fmt.Errorf(errMsg)
} }
selector, err := unversioned.LabelSelectorAsSelector(scale.Status.Selector) selector, err := unversioned.LabelSelectorAsSelector(&unversioned.LabelSelector{MatchLabels: scale.Status.Selector})
if err != nil { if err != nil {
errMsg := fmt.Sprintf("couldn't convert selector string to a corresponding selector object: %v", err) errMsg := fmt.Sprintf("couldn't convert selector string to a corresponding selector object: %v", err)
a.eventRecorder.Event(hpa, api.EventTypeWarning, "InvalidSelector", errMsg) a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg)
return 0, nil, time.Time{}, fmt.Errorf(errMsg) return 0, nil, time.Time{}, fmt.Errorf(errMsg)
} }
desiredReplicas, utilization, timestamp, err := a.replicaCalc.GetResourceReplicas(currentReplicas, targetUtilization, api.ResourceCPU, hpa.Namespace, selector) desiredReplicas, utilization, timestamp, err := a.replicaCalc.GetResourceReplicas(currentReplicas, targetUtilization, v1.ResourceCPU, hpa.Namespace, selector)
if err != nil { if err != nil {
lastScaleTime := getLastScaleTime(hpa) lastScaleTime := getLastScaleTime(hpa)
if time.Now().After(lastScaleTime.Add(upscaleForbiddenWindow)) { if time.Now().After(lastScaleTime.Add(upscaleForbiddenWindow)) {
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedGetMetrics", err.Error()) a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetMetrics", err.Error())
} else { } else {
a.eventRecorder.Event(hpa, api.EventTypeNormal, "MetricsNotAvailableYet", err.Error()) a.eventRecorder.Event(hpa, v1.EventTypeNormal, "MetricsNotAvailableYet", err.Error())
} }
return 0, nil, time.Time{}, fmt.Errorf("failed to get CPU utilization: %v", err) return 0, nil, time.Time{}, fmt.Errorf("failed to get CPU utilization: %v", err)
} }
if desiredReplicas != currentReplicas { if desiredReplicas != currentReplicas {
a.eventRecorder.Eventf(hpa, api.EventTypeNormal, "DesiredReplicasComputed", a.eventRecorder.Eventf(hpa, v1.EventTypeNormal, "DesiredReplicasComputed",
"Computed the desired num of replicas: %d (avgCPUutil: %d, current replicas: %d)", "Computed the desired num of replicas: %d (avgCPUutil: %d, current replicas: %d)",
desiredReplicas, utilization, scale.Status.Replicas) desiredReplicas, utilization, scale.Status.Replicas)
} }
@@ -201,11 +201,11 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *autoscaling.
var targetList extensions.CustomMetricTargetList var targetList extensions.CustomMetricTargetList
if err := json.Unmarshal([]byte(cmAnnotation), &targetList); err != nil { if err := json.Unmarshal([]byte(cmAnnotation), &targetList); err != nil {
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedParseCustomMetricsAnnotation", err.Error()) a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedParseCustomMetricsAnnotation", err.Error())
return 0, "", "", time.Time{}, fmt.Errorf("failed to parse custom metrics annotation: %v", err) return 0, "", "", time.Time{}, fmt.Errorf("failed to parse custom metrics annotation: %v", err)
} }
if len(targetList.Items) == 0 { if len(targetList.Items) == 0 {
a.eventRecorder.Event(hpa, api.EventTypeWarning, "NoCustomMetricsInAnnotation", err.Error()) a.eventRecorder.Event(hpa, v1.EventTypeWarning, "NoCustomMetricsInAnnotation", err.Error())
return 0, "", "", time.Time{}, fmt.Errorf("no custom metrics in annotation") return 0, "", "", time.Time{}, fmt.Errorf("no custom metrics in annotation")
} }
@@ -216,14 +216,14 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *autoscaling.
for _, customMetricTarget := range targetList.Items { for _, customMetricTarget := range targetList.Items {
if scale.Status.Selector == nil { if scale.Status.Selector == nil {
errMsg := "selector is required" errMsg := "selector is required"
a.eventRecorder.Event(hpa, api.EventTypeWarning, "SelectorRequired", errMsg) a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg)
return 0, "", "", time.Time{}, fmt.Errorf("selector is required") return 0, "", "", time.Time{}, fmt.Errorf("selector is required")
} }
selector, err := unversioned.LabelSelectorAsSelector(scale.Status.Selector) selector, err := unversioned.LabelSelectorAsSelector(&unversioned.LabelSelector{MatchLabels: scale.Status.Selector})
if err != nil { if err != nil {
errMsg := fmt.Sprintf("couldn't convert selector string to a corresponding selector object: %v", err) errMsg := fmt.Sprintf("couldn't convert selector string to a corresponding selector object: %v", err)
a.eventRecorder.Event(hpa, api.EventTypeWarning, "InvalidSelector", errMsg) a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg)
return 0, "", "", time.Time{}, fmt.Errorf("couldn't convert selector string to a corresponding selector object: %v", err) return 0, "", "", time.Time{}, fmt.Errorf("couldn't convert selector string to a corresponding selector object: %v", err)
} }
floatTarget := float64(customMetricTarget.TargetValue.MilliValue()) / 1000.0 floatTarget := float64(customMetricTarget.TargetValue.MilliValue()) / 1000.0
@@ -231,9 +231,9 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *autoscaling.
if err != nil { if err != nil {
lastScaleTime := getLastScaleTime(hpa) lastScaleTime := getLastScaleTime(hpa)
if time.Now().After(lastScaleTime.Add(upscaleForbiddenWindow)) { if time.Now().After(lastScaleTime.Add(upscaleForbiddenWindow)) {
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedGetCustomMetrics", err.Error()) a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetCustomMetrics", err.Error())
} else { } else {
a.eventRecorder.Event(hpa, api.EventTypeNormal, "CustomMetricsNotAvailableYet", err.Error()) a.eventRecorder.Event(hpa, v1.EventTypeNormal, "CustomMetricsNotAvailableYet", err.Error())
} }
return 0, "", "", time.Time{}, fmt.Errorf("failed to get custom metric value: %v", err) return 0, "", "", time.Time{}, fmt.Errorf("failed to get custom metric value: %v", err)
@@ -246,7 +246,7 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *autoscaling.
} }
quantity, err := resource.ParseQuantity(fmt.Sprintf("%.3f", utilizationProposal)) quantity, err := resource.ParseQuantity(fmt.Sprintf("%.3f", utilizationProposal))
if err != nil { if err != nil {
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedSetCustomMetrics", err.Error()) a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedSetCustomMetrics", err.Error())
return 0, "", "", time.Time{}, fmt.Errorf("failed to set custom metric value: %v", err) return 0, "", "", time.Time{}, fmt.Errorf("failed to set custom metric value: %v", err)
} }
statusList.Items = append(statusList.Items, extensions.CustomMetricCurrentStatus{ statusList.Items = append(statusList.Items, extensions.CustomMetricCurrentStatus{
@@ -256,14 +256,14 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *autoscaling.
} }
byteStatusList, err := json.Marshal(statusList) byteStatusList, err := json.Marshal(statusList)
if err != nil { if err != nil {
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedSerializeCustomMetrics", err.Error()) a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedSerializeCustomMetrics", err.Error())
return 0, "", "", time.Time{}, fmt.Errorf("failed to serialize custom metric status: %v", err) return 0, "", "", time.Time{}, fmt.Errorf("failed to serialize custom metric status: %v", err)
} }
if replicas != currentReplicas { if replicas != currentReplicas {
a.eventRecorder.Eventf(hpa, api.EventTypeNormal, "DesiredReplicasComputedCustomMetric", a.eventRecorder.Eventf(hpa, v1.EventTypeNormal, "DesiredReplicasComputedCustomMetric",
"Computed the desired num of replicas: %d, metric: %s, current replicas: %d", "Computed the desired num of replicas: %d, metric: %s, current replicas: %d",
int32(replicas), metric, scale.Status.Replicas) func() *int32 { i := int32(replicas); return &i }(), metric, scale.Status.Replicas)
} }
return replicas, metric, string(byteStatusList), timestamp, nil return replicas, metric, string(byteStatusList), timestamp, nil
@@ -274,7 +274,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa *autoscaling.HorizontalPo
scale, err := a.scaleNamespacer.Scales(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Kind, hpa.Spec.ScaleTargetRef.Name) scale, err := a.scaleNamespacer.Scales(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Kind, hpa.Spec.ScaleTargetRef.Name)
if err != nil { if err != nil {
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedGetScale", err.Error()) a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error())
return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err) return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err)
} }
currentReplicas := scale.Status.Replicas currentReplicas := scale.Status.Replicas
@@ -367,10 +367,10 @@ func (a *HorizontalController) reconcileAutoscaler(hpa *autoscaling.HorizontalPo
scale.Spec.Replicas = desiredReplicas scale.Spec.Replicas = desiredReplicas
_, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(hpa.Spec.ScaleTargetRef.Kind, scale) _, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(hpa.Spec.ScaleTargetRef.Kind, scale)
if err != nil { if err != nil {
a.eventRecorder.Eventf(hpa, api.EventTypeWarning, "FailedRescale", "New size: %d; reason: %s; error: %v", desiredReplicas, rescaleReason, err.Error()) a.eventRecorder.Eventf(hpa, v1.EventTypeWarning, "FailedRescale", "New size: %d; reason: %s; error: %v", desiredReplicas, rescaleReason, err.Error())
return fmt.Errorf("failed to rescale %s: %v", reference, err) return fmt.Errorf("failed to rescale %s: %v", reference, err)
} }
a.eventRecorder.Eventf(hpa, api.EventTypeNormal, "SuccessfulRescale", "New size: %d; reason: %s", desiredReplicas, rescaleReason) a.eventRecorder.Eventf(hpa, v1.EventTypeNormal, "SuccessfulRescale", "New size: %d; reason: %s", desiredReplicas, rescaleReason)
glog.Infof("Successfull rescale of %s, old size: %d, new size: %d, reason: %s", glog.Infof("Successfull rescale of %s, old size: %d, new size: %d, reason: %s",
hpa.Name, currentReplicas, desiredReplicas, rescaleReason) hpa.Name, currentReplicas, desiredReplicas, rescaleReason)
} else { } else {
@@ -428,7 +428,7 @@ func (a *HorizontalController) updateStatus(hpa *autoscaling.HorizontalPodAutosc
_, err := a.hpaNamespacer.HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(hpa) _, err := a.hpaNamespacer.HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(hpa)
if err != nil { if err != nil {
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedUpdateStatus", err.Error()) a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedUpdateStatus", err.Error())
return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err) return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
} }
glog.V(2).Infof("Successfully updated status for %s", hpa.Name) glog.V(2).Infof("Successfully updated status for %s", hpa.Name)

View File

@@ -27,15 +27,14 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
_ "k8s.io/kubernetes/pkg/apimachinery/registered" _ "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/autoscaling" autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
"k8s.io/kubernetes/pkg/apis/extensions" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
@@ -84,7 +83,7 @@ type testCase struct {
verifyCPUCurrent bool verifyCPUCurrent bool
reportedLevels []uint64 reportedLevels []uint64
reportedCPURequests []resource.Quantity reportedCPURequests []resource.Quantity
reportedPodReadiness []api.ConditionStatus reportedPodReadiness []v1.ConditionStatus
cmTarget *extensions.CustomMetricTargetList cmTarget *extensions.CustomMetricTargetList
scaleUpdated bool scaleUpdated bool
statusUpdated bool statusUpdated bool
@@ -151,7 +150,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
obj := &autoscaling.HorizontalPodAutoscalerList{ obj := &autoscaling.HorizontalPodAutoscalerList{
Items: []autoscaling.HorizontalPodAutoscaler{ Items: []autoscaling.HorizontalPodAutoscaler{
{ {
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: hpaName, Name: hpaName,
Namespace: namespace, Namespace: namespace,
SelfLink: "experimental/v1/namespaces/" + namespace + "/horizontalpodautoscalers/" + hpaName, SelfLink: "experimental/v1/namespaces/" + namespace + "/horizontalpodautoscalers/" + hpaName,
@@ -192,7 +191,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
defer tc.Unlock() defer tc.Unlock()
obj := &extensions.Scale{ obj := &extensions.Scale{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: tc.resource.name, Name: tc.resource.name,
Namespace: namespace, Namespace: namespace,
}, },
@@ -201,7 +200,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
}, },
Status: extensions.ScaleStatus{ Status: extensions.ScaleStatus{
Replicas: tc.initialReplicas, Replicas: tc.initialReplicas,
Selector: selector, Selector: selector.MatchLabels,
}, },
} }
return true, obj, nil return true, obj, nil
@@ -212,7 +211,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
defer tc.Unlock() defer tc.Unlock()
obj := &extensions.Scale{ obj := &extensions.Scale{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: tc.resource.name, Name: tc.resource.name,
Namespace: namespace, Namespace: namespace,
}, },
@@ -221,7 +220,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
}, },
Status: extensions.ScaleStatus{ Status: extensions.ScaleStatus{
Replicas: tc.initialReplicas, Replicas: tc.initialReplicas,
Selector: selector, Selector: selector.MatchLabels,
}, },
} }
return true, obj, nil return true, obj, nil
@@ -232,7 +231,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
defer tc.Unlock() defer tc.Unlock()
obj := &extensions.Scale{ obj := &extensions.Scale{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: tc.resource.name, Name: tc.resource.name,
Namespace: namespace, Namespace: namespace,
}, },
@@ -241,7 +240,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
}, },
Status: extensions.ScaleStatus{ Status: extensions.ScaleStatus{
Replicas: tc.initialReplicas, Replicas: tc.initialReplicas,
Selector: selector, Selector: selector.MatchLabels,
}, },
} }
return true, obj, nil return true, obj, nil
@@ -251,36 +250,36 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
tc.Lock() tc.Lock()
defer tc.Unlock() defer tc.Unlock()
obj := &api.PodList{} obj := &v1.PodList{}
for i := 0; i < len(tc.reportedCPURequests); i++ { for i := 0; i < len(tc.reportedCPURequests); i++ {
podReadiness := api.ConditionTrue podReadiness := v1.ConditionTrue
if tc.reportedPodReadiness != nil { if tc.reportedPodReadiness != nil {
podReadiness = tc.reportedPodReadiness[i] podReadiness = tc.reportedPodReadiness[i]
} }
podName := fmt.Sprintf("%s-%d", podNamePrefix, i) podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
pod := api.Pod{ pod := v1.Pod{
Status: api.PodStatus{ Status: v1.PodStatus{
Phase: api.PodRunning, Phase: v1.PodRunning,
Conditions: []api.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: api.PodReady, Type: v1.PodReady,
Status: podReadiness, Status: podReadiness,
}, },
}, },
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Namespace: namespace, Namespace: namespace,
Labels: map[string]string{ Labels: map[string]string{
"name": podNamePrefix, "name": podNamePrefix,
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: api.ResourceList{ Requests: v1.ResourceList{
api.ResourceCPU: tc.reportedCPURequests[i], v1.ResourceCPU: tc.reportedCPURequests[i],
}, },
}, },
}, },
@@ -420,7 +419,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
tc.Lock() tc.Lock()
defer tc.Unlock() defer tc.Unlock()
obj := action.(core.CreateAction).GetObject().(*api.Event) obj := action.(core.CreateAction).GetObject().(*v1.Event)
if tc.verifyEvents { if tc.verifyEvents {
switch obj.Reason { switch obj.Reason {
case "SuccessfulRescale": case "SuccessfulRescale":
@@ -460,8 +459,8 @@ func (tc *testCase) runTest(t *testing.T) {
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort) metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
broadcaster := record.NewBroadcasterForTests(0) broadcaster := record.NewBroadcasterForTests(0)
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: testClient.Core().Events("")}) broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: testClient.Core().Events("")})
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"}) recorder := broadcaster.NewRecorder(v1.EventSource{Component: "horizontal-pod-autoscaler"})
replicaCalc := &ReplicaCalculator{ replicaCalc := &ReplicaCalculator{
metricsClient: metricsClient, metricsClient: metricsClient,
@@ -574,7 +573,7 @@ func TestScaleUpUnreadyLessScale(t *testing.T) {
verifyCPUCurrent: true, verifyCPUCurrent: true,
reportedLevels: []uint64{300, 500, 700}, reportedLevels: []uint64{300, 500, 700},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
reportedPodReadiness: []api.ConditionStatus{api.ConditionFalse, api.ConditionTrue, api.ConditionTrue}, reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
useMetricsApi: true, useMetricsApi: true,
} }
tc.runTest(t) tc.runTest(t)
@@ -591,7 +590,7 @@ func TestScaleUpUnreadyNoScale(t *testing.T) {
verifyCPUCurrent: true, verifyCPUCurrent: true,
reportedLevels: []uint64{400, 500, 700}, reportedLevels: []uint64{400, 500, 700},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
reportedPodReadiness: []api.ConditionStatus{api.ConditionTrue, api.ConditionFalse, api.ConditionFalse}, reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
useMetricsApi: true, useMetricsApi: true,
} }
tc.runTest(t) tc.runTest(t)
@@ -670,7 +669,7 @@ func TestScaleUpCMUnreadyLessScale(t *testing.T) {
}}, }},
}, },
reportedLevels: []uint64{50, 10, 30}, reportedLevels: []uint64{50, 10, 30},
reportedPodReadiness: []api.ConditionStatus{api.ConditionTrue, api.ConditionTrue, api.ConditionFalse}, reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
} }
tc.runTest(t) tc.runTest(t)
@@ -690,7 +689,7 @@ func TestScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
}}, }},
}, },
reportedLevels: []uint64{50, 15, 30}, reportedLevels: []uint64{50, 15, 30},
reportedPodReadiness: []api.ConditionStatus{api.ConditionFalse, api.ConditionTrue, api.ConditionFalse}, reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
} }
tc.runTest(t) tc.runTest(t)
@@ -755,7 +754,7 @@ func TestScaleDownIgnoresUnreadyPods(t *testing.T) {
reportedLevels: []uint64{100, 300, 500, 250, 250}, reportedLevels: []uint64{100, 300, 500, 250, 250},
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
useMetricsApi: true, useMetricsApi: true,
reportedPodReadiness: []api.ConditionStatus{api.ConditionTrue, api.ConditionTrue, api.ConditionTrue, api.ConditionFalse, api.ConditionFalse}, reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
} }
tc.runTest(t) tc.runTest(t)
} }

View File

@@ -23,10 +23,9 @@ import (
"time" "time"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
heapster "k8s.io/heapster/metrics/api/v1/types" heapster "k8s.io/heapster/metrics/api/v1/types"
@@ -46,7 +45,7 @@ type PodMetricsInfo map[string]float64
type MetricsClient interface { type MetricsClient interface {
// GetResourceMetric gets the given resource metric (and an associated oldest timestamp) // GetResourceMetric gets the given resource metric (and an associated oldest timestamp)
// for all pods matching the specified selector in the given namespace // for all pods matching the specified selector in the given namespace
GetResourceMetric(resource api.ResourceName, namespace string, selector labels.Selector) (PodResourceInfo, time.Time, error) GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodResourceInfo, time.Time, error)
// GetRawMetric gets the given metric (and an associated oldest timestamp) // GetRawMetric gets the given metric (and an associated oldest timestamp)
// for all pods matching the specified selector in the given namespace // for all pods matching the specified selector in the given namespace
@@ -63,8 +62,8 @@ const (
var heapsterQueryStart = -5 * time.Minute var heapsterQueryStart = -5 * time.Minute
type HeapsterMetricsClient struct { type HeapsterMetricsClient struct {
services unversionedcore.ServiceInterface services v1core.ServiceInterface
podsGetter unversionedcore.PodsGetter podsGetter v1core.PodsGetter
heapsterScheme string heapsterScheme string
heapsterService string heapsterService string
heapsterPort string heapsterPort string
@@ -80,7 +79,7 @@ func NewHeapsterMetricsClient(client clientset.Interface, namespace, scheme, ser
} }
} }
func (h *HeapsterMetricsClient) GetResourceMetric(resource api.ResourceName, namespace string, selector labels.Selector) (PodResourceInfo, time.Time, error) { func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodResourceInfo, time.Time, error) {
metricPath := fmt.Sprintf("/apis/metrics/v1alpha1/namespaces/%s/pods", namespace) metricPath := fmt.Sprintf("/apis/metrics/v1alpha1/namespaces/%s/pods", namespace)
params := map[string]string{"labelSelector": selector.String()} params := map[string]string{"labelSelector": selector.String()}
@@ -132,7 +131,7 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource api.ResourceName, nam
} }
func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) { func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
podList, err := h.podsGetter.Pods(namespace).List(api.ListOptions{LabelSelector: selector}) podList, err := h.podsGetter.Pods(namespace).List(v1.ListOptions{LabelSelector: selector.String()})
if err != nil { if err != nil {
return nil, time.Time{}, fmt.Errorf("failed to get pod list while fetching metrics: %v", err) return nil, time.Time{}, fmt.Errorf("failed to get pod list while fetching metrics: %v", err)
} }

View File

@@ -23,12 +23,11 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
_ "k8s.io/kubernetes/pkg/apimachinery/registered" _ "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@@ -76,7 +75,7 @@ type testCase struct {
namespace string namespace string
selector labels.Selector selector labels.Selector
resourceName api.ResourceName resourceName v1.ResourceName
metricName string metricName string
} }
@@ -93,10 +92,10 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
fakeClient := &fake.Clientset{} fakeClient := &fake.Clientset{}
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &api.PodList{} obj := &v1.PodList{}
for i := 0; i < tc.replicas; i++ { for i := 0; i < tc.replicas; i++ {
podName := fmt.Sprintf("%s-%d", podNamePrefix, i) podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
pod := buildPod(namespace, podName, podLabels, api.PodRunning, "1024") pod := buildPod(namespace, podName, podLabels, v1.PodRunning, "1024")
obj.Items = append(obj.Items, pod) obj.Items = append(obj.Items, pod)
} }
return true, obj, nil return true, obj, nil
@@ -161,30 +160,30 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
return fakeClient return fakeClient
} }
func buildPod(namespace, podName string, podLabels map[string]string, phase api.PodPhase, request string) api.Pod { func buildPod(namespace, podName string, podLabels map[string]string, phase v1.PodPhase, request string) v1.Pod {
return api.Pod{ return v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Namespace: namespace, Namespace: namespace,
Labels: podLabels, Labels: podLabels,
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Resources: api.ResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: api.ResourceList{ Requests: v1.ResourceList{
api.ResourceCPU: resource.MustParse(request), v1.ResourceCPU: resource.MustParse(request),
}, },
}, },
}, },
}, },
}, },
Status: api.PodStatus{ Status: v1.PodStatus{
Phase: phase, Phase: phase,
Conditions: []api.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: api.PodReady, Type: v1.PodReady,
Status: api.ConditionTrue, Status: v1.ConditionTrue,
}, },
}, },
}, },
@@ -231,7 +230,7 @@ func TestCPU(t *testing.T) {
desiredResourceValues: PodResourceInfo{ desiredResourceValues: PodResourceInfo{
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000, "test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
}, },
resourceName: api.ResourceCPU, resourceName: v1.ResourceCPU,
targetTimestamp: 1, targetTimestamp: 1,
reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}}, reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}},
} }
@@ -271,7 +270,7 @@ func TestCPUMoreMetrics(t *testing.T) {
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000, "test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
"test-pod-3": 5000, "test-pod-4": 5000, "test-pod-3": 5000, "test-pod-4": 5000,
}, },
resourceName: api.ResourceCPU, resourceName: v1.ResourceCPU,
targetTimestamp: 10, targetTimestamp: 10,
reportedPodMetrics: [][]int64{{1000, 2000, 2000}, {5000}, {1000, 1000, 1000, 2000}, {4000, 1000}, {5000}}, reportedPodMetrics: [][]int64{{1000, 2000, 2000}, {5000}, {1000, 1000, 1000, 2000}, {4000, 1000}, {5000}},
} }
@@ -284,7 +283,7 @@ func TestCPUMissingMetrics(t *testing.T) {
desiredResourceValues: PodResourceInfo{ desiredResourceValues: PodResourceInfo{
"test-pod-0": 4000, "test-pod-0": 4000,
}, },
resourceName: api.ResourceCPU, resourceName: v1.ResourceCPU,
reportedPodMetrics: [][]int64{{4000}}, reportedPodMetrics: [][]int64{{4000}},
} }
tc.runTest(t) tc.runTest(t)
@@ -314,7 +313,7 @@ func TestQpsSuperfluousMetrics(t *testing.T) {
func TestCPUEmptyMetrics(t *testing.T) { func TestCPUEmptyMetrics(t *testing.T) {
tc := testCase{ tc := testCase{
replicas: 3, replicas: 3,
resourceName: api.ResourceCPU, resourceName: v1.ResourceCPU,
desiredError: fmt.Errorf("no metrics returned from heapster"), desiredError: fmt.Errorf("no metrics returned from heapster"),
reportedMetricsPoints: [][]metricPoint{}, reportedMetricsPoints: [][]metricPoint{},
reportedPodMetrics: [][]int64{}, reportedPodMetrics: [][]int64{},
@@ -338,7 +337,7 @@ func TestQpsEmptyEntries(t *testing.T) {
func TestCPUZeroReplicas(t *testing.T) { func TestCPUZeroReplicas(t *testing.T) {
tc := testCase{ tc := testCase{
replicas: 0, replicas: 0,
resourceName: api.ResourceCPU, resourceName: v1.ResourceCPU,
desiredError: fmt.Errorf("no metrics returned from heapster"), desiredError: fmt.Errorf("no metrics returned from heapster"),
reportedPodMetrics: [][]int64{}, reportedPodMetrics: [][]int64{},
} }
@@ -348,7 +347,7 @@ func TestCPUZeroReplicas(t *testing.T) {
func TestCPUEmptyMetricsForOnePod(t *testing.T) { func TestCPUEmptyMetricsForOnePod(t *testing.T) {
tc := testCase{ tc := testCase{
replicas: 3, replicas: 3,
resourceName: api.ResourceCPU, resourceName: v1.ResourceCPU,
desiredResourceValues: PodResourceInfo{ desiredResourceValues: PodResourceInfo{
"test-pod-0": 100, "test-pod-1": 700, "test-pod-0": 100, "test-pod-1": 700,
}, },

View File

@@ -21,8 +21,8 @@ import (
"math" "math"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
@@ -30,10 +30,10 @@ import (
type ReplicaCalculator struct { type ReplicaCalculator struct {
metricsClient metricsclient.MetricsClient metricsClient metricsclient.MetricsClient
podsGetter unversionedcore.PodsGetter podsGetter v1core.PodsGetter
} }
func NewReplicaCalculator(metricsClient metricsclient.MetricsClient, podsGetter unversionedcore.PodsGetter) *ReplicaCalculator { func NewReplicaCalculator(metricsClient metricsclient.MetricsClient, podsGetter v1core.PodsGetter) *ReplicaCalculator {
return &ReplicaCalculator{ return &ReplicaCalculator{
metricsClient: metricsClient, metricsClient: metricsClient,
podsGetter: podsGetter, podsGetter: podsGetter,
@@ -42,13 +42,13 @@ func NewReplicaCalculator(metricsClient metricsclient.MetricsClient, podsGetter
// GetResourceReplicas calculates the desired replica count based on a target resource utilization percentage // GetResourceReplicas calculates the desired replica count based on a target resource utilization percentage
// of the given resource for pods matching the given selector in the given namespace, and the current replica count // of the given resource for pods matching the given selector in the given namespace, and the current replica count
func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUtilization int32, resource api.ResourceName, namespace string, selector labels.Selector) (replicaCount int32, utilization int32, timestamp time.Time, err error) { func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUtilization int32, resource v1.ResourceName, namespace string, selector labels.Selector) (replicaCount int32, utilization int32, timestamp time.Time, err error) {
metrics, timestamp, err := c.metricsClient.GetResourceMetric(resource, namespace, selector) metrics, timestamp, err := c.metricsClient.GetResourceMetric(resource, namespace, selector)
if err != nil { if err != nil {
return 0, 0, time.Time{}, fmt.Errorf("unable to get metrics for resource %s: %v", resource, err) return 0, 0, time.Time{}, fmt.Errorf("unable to get metrics for resource %s: %v", resource, err)
} }
podList, err := c.podsGetter.Pods(namespace).List(api.ListOptions{LabelSelector: selector}) podList, err := c.podsGetter.Pods(namespace).List(v1.ListOptions{LabelSelector: selector.String()})
if err != nil { if err != nil {
return 0, 0, time.Time{}, fmt.Errorf("unable to get pods while calculating replica count: %v", err) return 0, 0, time.Time{}, fmt.Errorf("unable to get pods while calculating replica count: %v", err)
} }
@@ -74,7 +74,7 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti
requests[pod.Name] = podSum requests[pod.Name] = podSum
if pod.Status.Phase != api.PodRunning || !api.IsPodReady(&pod) { if pod.Status.Phase != v1.PodRunning || !v1.IsPodReady(&pod) {
// save this pod name for later, but pretend it doesn't exist for now // save this pod name for later, but pretend it doesn't exist for now
unreadyPods.Insert(pod.Name) unreadyPods.Insert(pod.Name)
delete(metrics, pod.Name) delete(metrics, pod.Name)
@@ -156,7 +156,7 @@ func (c *ReplicaCalculator) GetMetricReplicas(currentReplicas int32, targetUtili
return 0, 0, time.Time{}, fmt.Errorf("unable to get metric %s: %v", metricName, err) return 0, 0, time.Time{}, fmt.Errorf("unable to get metric %s: %v", metricName, err)
} }
podList, err := c.podsGetter.Pods(namespace).List(api.ListOptions{LabelSelector: selector}) podList, err := c.podsGetter.Pods(namespace).List(v1.ListOptions{LabelSelector: selector.String()})
if err != nil { if err != nil {
return 0, 0, time.Time{}, fmt.Errorf("unable to get pods while calculating replica count: %v", err) return 0, 0, time.Time{}, fmt.Errorf("unable to get pods while calculating replica count: %v", err)
} }
@@ -170,7 +170,7 @@ func (c *ReplicaCalculator) GetMetricReplicas(currentReplicas int32, targetUtili
missingPods := sets.NewString() missingPods := sets.NewString()
for _, pod := range podList.Items { for _, pod := range podList.Items {
if pod.Status.Phase != api.PodRunning || !api.IsPodReady(&pod) { if pod.Status.Phase != v1.PodRunning || !v1.IsPodReady(&pod) {
// save this pod name for later, but pretend it doesn't exist for now // save this pod name for later, but pretend it doesn't exist for now
unreadyPods.Insert(pod.Name) unreadyPods.Insert(pod.Name)
delete(metrics, pod.Name) delete(metrics, pod.Name)

View File

@@ -25,12 +25,11 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
_ "k8s.io/kubernetes/pkg/apimachinery/registered" _ "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics" "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
@@ -44,7 +43,7 @@ import (
) )
type resourceInfo struct { type resourceInfo struct {
name api.ResourceName name v1.ResourceName
requests []resource.Quantity requests []resource.Quantity
levels []int64 levels []int64
@@ -70,7 +69,7 @@ type replicaCalcTestCase struct {
resource *resourceInfo resource *resourceInfo
metric *metricInfo metric *metricInfo
podReadiness []api.ConditionStatus podReadiness []v1.ConditionStatus
} }
const ( const (
@@ -82,43 +81,43 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
fakeClient := &fake.Clientset{} fakeClient := &fake.Clientset{}
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &api.PodList{} obj := &v1.PodList{}
for i := 0; i < int(tc.currentReplicas); i++ { for i := 0; i < int(tc.currentReplicas); i++ {
podReadiness := api.ConditionTrue podReadiness := v1.ConditionTrue
if tc.podReadiness != nil { if tc.podReadiness != nil {
podReadiness = tc.podReadiness[i] podReadiness = tc.podReadiness[i]
} }
podName := fmt.Sprintf("%s-%d", podNamePrefix, i) podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
pod := api.Pod{ pod := v1.Pod{
Status: api.PodStatus{ Status: v1.PodStatus{
Phase: api.PodRunning, Phase: v1.PodRunning,
Conditions: []api.PodCondition{ Conditions: []v1.PodCondition{
{ {
Type: api.PodReady, Type: v1.PodReady,
Status: podReadiness, Status: podReadiness,
}, },
}, },
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Namespace: testNamespace, Namespace: testNamespace,
Labels: map[string]string{ Labels: map[string]string{
"name": podNamePrefix, "name": podNamePrefix,
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{{}, {}}, Containers: []v1.Container{{}, {}},
}, },
} }
if tc.resource != nil && i < len(tc.resource.requests) { if tc.resource != nil && i < len(tc.resource.requests) {
pod.Spec.Containers[0].Resources = api.ResourceRequirements{ pod.Spec.Containers[0].Resources = v1.ResourceRequirements{
Requests: api.ResourceList{ Requests: v1.ResourceList{
tc.resource.name: tc.resource.requests[i], tc.resource.name: tc.resource.requests[i],
}, },
} }
pod.Spec.Containers[1].Resources = api.ResourceRequirements{ pod.Spec.Containers[1].Resources = v1.ResourceRequirements{
Requests: api.ResourceList{ Requests: v1.ResourceList{
tc.resource.name: tc.resource.requests[i], tc.resource.name: tc.resource.requests[i],
}, },
} }
@@ -255,7 +254,7 @@ func TestReplicaCalcScaleUp(t *testing.T) {
currentReplicas: 3, currentReplicas: 3,
expectedReplicas: 5, expectedReplicas: 5,
resource: &resourceInfo{ resource: &resourceInfo{
name: api.ResourceCPU, name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{300, 500, 700}, levels: []int64{300, 500, 700},
@@ -270,9 +269,9 @@ func TestReplicaCalcScaleUpUnreadyLessScale(t *testing.T) {
tc := replicaCalcTestCase{ tc := replicaCalcTestCase{
currentReplicas: 3, currentReplicas: 3,
expectedReplicas: 4, expectedReplicas: 4,
podReadiness: []api.ConditionStatus{api.ConditionFalse, api.ConditionTrue, api.ConditionTrue}, podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
resource: &resourceInfo{ resource: &resourceInfo{
name: api.ResourceCPU, name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{300, 500, 700}, levels: []int64{300, 500, 700},
@@ -287,9 +286,9 @@ func TestReplicaCalcScaleUpUnreadyNoScale(t *testing.T) {
tc := replicaCalcTestCase{ tc := replicaCalcTestCase{
currentReplicas: 3, currentReplicas: 3,
expectedReplicas: 3, expectedReplicas: 3,
podReadiness: []api.ConditionStatus{api.ConditionTrue, api.ConditionFalse, api.ConditionFalse}, podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
resource: &resourceInfo{ resource: &resourceInfo{
name: api.ResourceCPU, name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{400, 500, 700}, levels: []int64{400, 500, 700},
@@ -318,7 +317,7 @@ func TestReplicaCalcScaleUpCMUnreadyLessScale(t *testing.T) {
tc := replicaCalcTestCase{ tc := replicaCalcTestCase{
currentReplicas: 3, currentReplicas: 3,
expectedReplicas: 4, expectedReplicas: 4,
podReadiness: []api.ConditionStatus{api.ConditionTrue, api.ConditionTrue, api.ConditionFalse}, podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
metric: &metricInfo{ metric: &metricInfo{
name: "qps", name: "qps",
levels: []float64{50.0, 10.0, 30.0}, levels: []float64{50.0, 10.0, 30.0},
@@ -333,7 +332,7 @@ func TestReplicaCalcScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
tc := replicaCalcTestCase{ tc := replicaCalcTestCase{
currentReplicas: 3, currentReplicas: 3,
expectedReplicas: 3, expectedReplicas: 3,
podReadiness: []api.ConditionStatus{api.ConditionFalse, api.ConditionTrue, api.ConditionFalse}, podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
metric: &metricInfo{ metric: &metricInfo{
name: "qps", name: "qps",
levels: []float64{50.0, 15.0, 30.0}, levels: []float64{50.0, 15.0, 30.0},
@@ -349,7 +348,7 @@ func TestReplicaCalcScaleDown(t *testing.T) {
currentReplicas: 5, currentReplicas: 5,
expectedReplicas: 3, expectedReplicas: 3,
resource: &resourceInfo{ resource: &resourceInfo{
name: api.ResourceCPU, name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{100, 300, 500, 250, 250}, levels: []int64{100, 300, 500, 250, 250},
@@ -378,9 +377,9 @@ func TestReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) {
tc := replicaCalcTestCase{ tc := replicaCalcTestCase{
currentReplicas: 5, currentReplicas: 5,
expectedReplicas: 2, expectedReplicas: 2,
podReadiness: []api.ConditionStatus{api.ConditionTrue, api.ConditionTrue, api.ConditionTrue, api.ConditionFalse, api.ConditionFalse}, podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
resource: &resourceInfo{ resource: &resourceInfo{
name: api.ResourceCPU, name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{100, 300, 500, 250, 250}, levels: []int64{100, 300, 500, 250, 250},
@@ -396,7 +395,7 @@ func TestReplicaCalcTolerance(t *testing.T) {
currentReplicas: 3, currentReplicas: 3,
expectedReplicas: 3, expectedReplicas: 3,
resource: &resourceInfo{ resource: &resourceInfo{
name: api.ResourceCPU, name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")}, requests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
levels: []int64{1010, 1030, 1020}, levels: []int64{1010, 1030, 1020},
@@ -426,7 +425,7 @@ func TestReplicaCalcSuperfluousMetrics(t *testing.T) {
currentReplicas: 4, currentReplicas: 4,
expectedReplicas: 24, expectedReplicas: 24,
resource: &resourceInfo{ resource: &resourceInfo{
name: api.ResourceCPU, name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{4000, 9500, 3000, 7000, 3200, 2000}, levels: []int64{4000, 9500, 3000, 7000, 3200, 2000},
targetUtilization: 100, targetUtilization: 100,
@@ -441,7 +440,7 @@ func TestReplicaCalcMissingMetrics(t *testing.T) {
currentReplicas: 4, currentReplicas: 4,
expectedReplicas: 3, expectedReplicas: 3,
resource: &resourceInfo{ resource: &resourceInfo{
name: api.ResourceCPU, name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{400, 95}, levels: []int64{400, 95},
@@ -457,7 +456,7 @@ func TestReplicaCalcEmptyMetrics(t *testing.T) {
currentReplicas: 4, currentReplicas: 4,
expectedError: fmt.Errorf("unable to get metrics for resource cpu: no metrics returned from heapster"), expectedError: fmt.Errorf("unable to get metrics for resource cpu: no metrics returned from heapster"),
resource: &resourceInfo{ resource: &resourceInfo{
name: api.ResourceCPU, name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{}, levels: []int64{},
@@ -472,7 +471,7 @@ func TestReplicaCalcEmptyCPURequest(t *testing.T) {
currentReplicas: 1, currentReplicas: 1,
expectedError: fmt.Errorf("missing request for"), expectedError: fmt.Errorf("missing request for"),
resource: &resourceInfo{ resource: &resourceInfo{
name: api.ResourceCPU, name: v1.ResourceCPU,
requests: []resource.Quantity{}, requests: []resource.Quantity{},
levels: []int64{200}, levels: []int64{200},
@@ -487,7 +486,7 @@ func TestReplicaCalcMissingMetricsNoChangeEq(t *testing.T) {
currentReplicas: 2, currentReplicas: 2,
expectedReplicas: 2, expectedReplicas: 2,
resource: &resourceInfo{ resource: &resourceInfo{
name: api.ResourceCPU, name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")}, requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{1000}, levels: []int64{1000},
@@ -503,7 +502,7 @@ func TestReplicaCalcMissingMetricsNoChangeGt(t *testing.T) {
currentReplicas: 2, currentReplicas: 2,
expectedReplicas: 2, expectedReplicas: 2,
resource: &resourceInfo{ resource: &resourceInfo{
name: api.ResourceCPU, name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")}, requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{1900}, levels: []int64{1900},
@@ -519,7 +518,7 @@ func TestReplicaCalcMissingMetricsNoChangeLt(t *testing.T) {
currentReplicas: 2, currentReplicas: 2,
expectedReplicas: 2, expectedReplicas: 2,
resource: &resourceInfo{ resource: &resourceInfo{
name: api.ResourceCPU, name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")}, requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{600}, levels: []int64{600},
@@ -534,9 +533,9 @@ func TestReplicaCalcMissingMetricsUnreadyNoChange(t *testing.T) {
tc := replicaCalcTestCase{ tc := replicaCalcTestCase{
currentReplicas: 3, currentReplicas: 3,
expectedReplicas: 3, expectedReplicas: 3,
podReadiness: []api.ConditionStatus{api.ConditionFalse, api.ConditionTrue, api.ConditionTrue}, podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
resource: &resourceInfo{ resource: &resourceInfo{
name: api.ResourceCPU, name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{100, 450}, levels: []int64{100, 450},
@@ -551,9 +550,9 @@ func TestReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) {
tc := replicaCalcTestCase{ tc := replicaCalcTestCase{
currentReplicas: 3, currentReplicas: 3,
expectedReplicas: 4, expectedReplicas: 4,
podReadiness: []api.ConditionStatus{api.ConditionFalse, api.ConditionTrue, api.ConditionTrue}, podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
resource: &resourceInfo{ resource: &resourceInfo{
name: api.ResourceCPU, name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{100, 2000}, levels: []int64{100, 2000},
@@ -568,9 +567,9 @@ func TestReplicaCalcMissingMetricsUnreadyScaleDown(t *testing.T) {
tc := replicaCalcTestCase{ tc := replicaCalcTestCase{
currentReplicas: 4, currentReplicas: 4,
expectedReplicas: 3, expectedReplicas: 3,
podReadiness: []api.ConditionStatus{api.ConditionFalse, api.ConditionTrue, api.ConditionTrue, api.ConditionTrue}, podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue},
resource: &resourceInfo{ resource: &resourceInfo{
name: api.ResourceCPU, name: v1.ResourceCPU,
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")}, requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
levels: []int64{100, 100, 100}, levels: []int64{100, 100, 100},
@@ -609,7 +608,7 @@ func TestReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
currentReplicas: startPods, currentReplicas: startPods,
expectedReplicas: finalPods, expectedReplicas: finalPods,
resource: &resourceInfo{ resource: &resourceInfo{
name: api.ResourceCPU, name: v1.ResourceCPU,
levels: []int64{ levels: []int64{
totalUsedCPUOfAllPods / 10, totalUsedCPUOfAllPods / 10,
totalUsedCPUOfAllPods / 10, totalUsedCPUOfAllPods / 10,

View File

@@ -21,9 +21,9 @@ import (
"sync" "sync"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
@@ -69,7 +69,7 @@ func NewPodGC(kubeClient clientset.Interface, podInformer cache.SharedIndexInfor
terminatedPodThreshold: terminatedPodThreshold, terminatedPodThreshold: terminatedPodThreshold,
deletePod: func(namespace, name string) error { deletePod: func(namespace, name string) error {
glog.Infof("PodGC is force deleting Pod: %v:%v", namespace, name) glog.Infof("PodGC is force deleting Pod: %v:%v", namespace, name)
return kubeClient.Core().Pods(namespace).Delete(name, api.NewDeleteOptions(0)) return kubeClient.Core().Pods(namespace).Delete(name, v1.NewDeleteOptions(0))
}, },
} }
@@ -78,14 +78,14 @@ func NewPodGC(kubeClient clientset.Interface, podInformer cache.SharedIndexInfor
gcc.nodeStore.Store, gcc.nodeController = cache.NewInformer( gcc.nodeStore.Store, gcc.nodeController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return gcc.kubeClient.Core().Nodes().List(options) return gcc.kubeClient.Core().Nodes().List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return gcc.kubeClient.Core().Nodes().Watch(options) return gcc.kubeClient.Core().Nodes().Watch(options)
}, },
}, },
&api.Node{}, &v1.Node{},
controller.NoResyncPeriodFunc(), controller.NoResyncPeriodFunc(),
cache.ResourceEventHandlerFuncs{}, cache.ResourceEventHandlerFuncs{},
) )
@@ -129,15 +129,15 @@ func (gcc *PodGCController) gc() {
gcc.gcUnscheduledTerminating(pods) gcc.gcUnscheduledTerminating(pods)
} }
func isPodTerminated(pod *api.Pod) bool { func isPodTerminated(pod *v1.Pod) bool {
if phase := pod.Status.Phase; phase != api.PodPending && phase != api.PodRunning && phase != api.PodUnknown { if phase := pod.Status.Phase; phase != v1.PodPending && phase != v1.PodRunning && phase != v1.PodUnknown {
return true return true
} }
return false return false
} }
func (gcc *PodGCController) gcTerminated(pods []*api.Pod) { func (gcc *PodGCController) gcTerminated(pods []*v1.Pod) {
terminatedPods := []*api.Pod{} terminatedPods := []*v1.Pod{}
for _, pod := range pods { for _, pod := range pods {
if isPodTerminated(pod) { if isPodTerminated(pod) {
terminatedPods = append(terminatedPods, pod) terminatedPods = append(terminatedPods, pod)
@@ -171,7 +171,7 @@ func (gcc *PodGCController) gcTerminated(pods []*api.Pod) {
} }
// gcOrphaned deletes pods that are bound to nodes that don't exist. // gcOrphaned deletes pods that are bound to nodes that don't exist.
func (gcc *PodGCController) gcOrphaned(pods []*api.Pod) { func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod) {
glog.V(4).Infof("GC'ing orphaned") glog.V(4).Infof("GC'ing orphaned")
for _, pod := range pods { for _, pod := range pods {
@@ -191,7 +191,7 @@ func (gcc *PodGCController) gcOrphaned(pods []*api.Pod) {
} }
// gcUnscheduledTerminating deletes pods that are terminating and haven't been scheduled to a particular node. // gcUnscheduledTerminating deletes pods that are terminating and haven't been scheduled to a particular node.
func (gcc *PodGCController) gcUnscheduledTerminating(pods []*api.Pod) { func (gcc *PodGCController) gcUnscheduledTerminating(pods []*v1.Pod) {
glog.V(4).Infof("GC'ing unscheduled pods which are terminating.") glog.V(4).Infof("GC'ing unscheduled pods which are terminating.")
for _, pod := range pods { for _, pod := range pods {
@@ -209,7 +209,7 @@ func (gcc *PodGCController) gcUnscheduledTerminating(pods []*api.Pod) {
} }
// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker. // byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker.
type byCreationTimestamp []*api.Pod type byCreationTimestamp []*v1.Pod
func (o byCreationTimestamp) Len() int { return len(o) } func (o byCreationTimestamp) Len() int { return len(o) }
func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] } func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }

View File

@@ -21,10 +21,10 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
) )
@@ -40,7 +40,7 @@ func (*FakeController) HasSynced() bool {
func TestGCTerminated(t *testing.T) { func TestGCTerminated(t *testing.T) {
type nameToPhase struct { type nameToPhase struct {
name string name string
phase api.PodPhase phase v1.PodPhase
} }
testCases := []struct { testCases := []struct {
@@ -50,8 +50,8 @@ func TestGCTerminated(t *testing.T) {
}{ }{
{ {
pods: []nameToPhase{ pods: []nameToPhase{
{name: "a", phase: api.PodFailed}, {name: "a", phase: v1.PodFailed},
{name: "b", phase: api.PodSucceeded}, {name: "b", phase: v1.PodSucceeded},
}, },
threshold: 0, threshold: 0,
// threshold = 0 disables terminated pod deletion // threshold = 0 disables terminated pod deletion
@@ -59,34 +59,34 @@ func TestGCTerminated(t *testing.T) {
}, },
{ {
pods: []nameToPhase{ pods: []nameToPhase{
{name: "a", phase: api.PodFailed}, {name: "a", phase: v1.PodFailed},
{name: "b", phase: api.PodSucceeded}, {name: "b", phase: v1.PodSucceeded},
{name: "c", phase: api.PodFailed}, {name: "c", phase: v1.PodFailed},
}, },
threshold: 1, threshold: 1,
deletedPodNames: sets.NewString("a", "b"), deletedPodNames: sets.NewString("a", "b"),
}, },
{ {
pods: []nameToPhase{ pods: []nameToPhase{
{name: "a", phase: api.PodRunning}, {name: "a", phase: v1.PodRunning},
{name: "b", phase: api.PodSucceeded}, {name: "b", phase: v1.PodSucceeded},
{name: "c", phase: api.PodFailed}, {name: "c", phase: v1.PodFailed},
}, },
threshold: 1, threshold: 1,
deletedPodNames: sets.NewString("b"), deletedPodNames: sets.NewString("b"),
}, },
{ {
pods: []nameToPhase{ pods: []nameToPhase{
{name: "a", phase: api.PodFailed}, {name: "a", phase: v1.PodFailed},
{name: "b", phase: api.PodSucceeded}, {name: "b", phase: v1.PodSucceeded},
}, },
threshold: 1, threshold: 1,
deletedPodNames: sets.NewString("a"), deletedPodNames: sets.NewString("a"),
}, },
{ {
pods: []nameToPhase{ pods: []nameToPhase{
{name: "a", phase: api.PodFailed}, {name: "a", phase: v1.PodFailed},
{name: "b", phase: api.PodSucceeded}, {name: "b", phase: v1.PodSucceeded},
}, },
threshold: 5, threshold: 5,
deletedPodNames: sets.NewString(), deletedPodNames: sets.NewString(),
@@ -108,16 +108,16 @@ func TestGCTerminated(t *testing.T) {
creationTime := time.Unix(0, 0) creationTime := time.Unix(0, 0)
for _, pod := range test.pods { for _, pod := range test.pods {
creationTime = creationTime.Add(1 * time.Hour) creationTime = creationTime.Add(1 * time.Hour)
gcc.podStore.Indexer.Add(&api.Pod{ gcc.podStore.Indexer.Add(&v1.Pod{
ObjectMeta: api.ObjectMeta{Name: pod.name, CreationTimestamp: unversioned.Time{Time: creationTime}}, ObjectMeta: v1.ObjectMeta{Name: pod.name, CreationTimestamp: unversioned.Time{Time: creationTime}},
Status: api.PodStatus{Phase: pod.phase}, Status: v1.PodStatus{Phase: pod.phase},
Spec: api.PodSpec{NodeName: "node"}, Spec: v1.PodSpec{NodeName: "node"},
}) })
} }
store := cache.NewStore(cache.MetaNamespaceKeyFunc) store := cache.NewStore(cache.MetaNamespaceKeyFunc)
store.Add(&api.Node{ store.Add(&v1.Node{
ObjectMeta: api.ObjectMeta{Name: "node"}, ObjectMeta: v1.ObjectMeta{Name: "node"},
}) })
gcc.nodeStore = cache.StoreToNodeLister{Store: store} gcc.nodeStore = cache.StoreToNodeLister{Store: store}
gcc.podController = &FakeController{} gcc.podController = &FakeController{}
@@ -143,7 +143,7 @@ func TestGCTerminated(t *testing.T) {
func TestGCOrphaned(t *testing.T) { func TestGCOrphaned(t *testing.T) {
type nameToPhase struct { type nameToPhase struct {
name string name string
phase api.PodPhase phase v1.PodPhase
} }
testCases := []struct { testCases := []struct {
@@ -153,15 +153,15 @@ func TestGCOrphaned(t *testing.T) {
}{ }{
{ {
pods: []nameToPhase{ pods: []nameToPhase{
{name: "a", phase: api.PodFailed}, {name: "a", phase: v1.PodFailed},
{name: "b", phase: api.PodSucceeded}, {name: "b", phase: v1.PodSucceeded},
}, },
threshold: 0, threshold: 0,
deletedPodNames: sets.NewString("a", "b"), deletedPodNames: sets.NewString("a", "b"),
}, },
{ {
pods: []nameToPhase{ pods: []nameToPhase{
{name: "a", phase: api.PodRunning}, {name: "a", phase: v1.PodRunning},
}, },
threshold: 1, threshold: 1,
deletedPodNames: sets.NewString("a"), deletedPodNames: sets.NewString("a"),
@@ -183,10 +183,10 @@ func TestGCOrphaned(t *testing.T) {
creationTime := time.Unix(0, 0) creationTime := time.Unix(0, 0)
for _, pod := range test.pods { for _, pod := range test.pods {
creationTime = creationTime.Add(1 * time.Hour) creationTime = creationTime.Add(1 * time.Hour)
gcc.podStore.Indexer.Add(&api.Pod{ gcc.podStore.Indexer.Add(&v1.Pod{
ObjectMeta: api.ObjectMeta{Name: pod.name, CreationTimestamp: unversioned.Time{Time: creationTime}}, ObjectMeta: v1.ObjectMeta{Name: pod.name, CreationTimestamp: unversioned.Time{Time: creationTime}},
Status: api.PodStatus{Phase: pod.phase}, Status: v1.PodStatus{Phase: pod.phase},
Spec: api.PodSpec{NodeName: "node"}, Spec: v1.PodSpec{NodeName: "node"},
}) })
} }
@@ -220,7 +220,7 @@ func TestGCOrphaned(t *testing.T) {
func TestGCUnscheduledTerminating(t *testing.T) { func TestGCUnscheduledTerminating(t *testing.T) {
type nameToPhase struct { type nameToPhase struct {
name string name string
phase api.PodPhase phase v1.PodPhase
deletionTimeStamp *unversioned.Time deletionTimeStamp *unversioned.Time
nodeName string nodeName string
} }
@@ -233,18 +233,18 @@ func TestGCUnscheduledTerminating(t *testing.T) {
{ {
name: "Unscheduled pod in any phase must be deleted", name: "Unscheduled pod in any phase must be deleted",
pods: []nameToPhase{ pods: []nameToPhase{
{name: "a", phase: api.PodFailed, deletionTimeStamp: &unversioned.Time{}, nodeName: ""}, {name: "a", phase: v1.PodFailed, deletionTimeStamp: &unversioned.Time{}, nodeName: ""},
{name: "b", phase: api.PodSucceeded, deletionTimeStamp: &unversioned.Time{}, nodeName: ""}, {name: "b", phase: v1.PodSucceeded, deletionTimeStamp: &unversioned.Time{}, nodeName: ""},
{name: "c", phase: api.PodRunning, deletionTimeStamp: &unversioned.Time{}, nodeName: ""}, {name: "c", phase: v1.PodRunning, deletionTimeStamp: &unversioned.Time{}, nodeName: ""},
}, },
deletedPodNames: sets.NewString("a", "b", "c"), deletedPodNames: sets.NewString("a", "b", "c"),
}, },
{ {
name: "Scheduled pod in any phase must not be deleted", name: "Scheduled pod in any phase must not be deleted",
pods: []nameToPhase{ pods: []nameToPhase{
{name: "a", phase: api.PodFailed, deletionTimeStamp: nil, nodeName: ""}, {name: "a", phase: v1.PodFailed, deletionTimeStamp: nil, nodeName: ""},
{name: "b", phase: api.PodSucceeded, deletionTimeStamp: nil, nodeName: "node"}, {name: "b", phase: v1.PodSucceeded, deletionTimeStamp: nil, nodeName: "node"},
{name: "c", phase: api.PodRunning, deletionTimeStamp: &unversioned.Time{}, nodeName: "node"}, {name: "c", phase: v1.PodRunning, deletionTimeStamp: &unversioned.Time{}, nodeName: "node"},
}, },
deletedPodNames: sets.NewString(), deletedPodNames: sets.NewString(),
}, },
@@ -265,11 +265,11 @@ func TestGCUnscheduledTerminating(t *testing.T) {
creationTime := time.Unix(0, 0) creationTime := time.Unix(0, 0)
for _, pod := range test.pods { for _, pod := range test.pods {
creationTime = creationTime.Add(1 * time.Hour) creationTime = creationTime.Add(1 * time.Hour)
gcc.podStore.Indexer.Add(&api.Pod{ gcc.podStore.Indexer.Add(&v1.Pod{
ObjectMeta: api.ObjectMeta{Name: pod.name, CreationTimestamp: unversioned.Time{Time: creationTime}, ObjectMeta: v1.ObjectMeta{Name: pod.name, CreationTimestamp: unversioned.Time{Time: creationTime},
DeletionTimestamp: pod.deletionTimeStamp}, DeletionTimestamp: pod.deletionTimeStamp},
Status: api.PodStatus{Phase: pod.phase}, Status: v1.PodStatus{Phase: pod.phase},
Spec: api.PodSpec{NodeName: pod.nodeName}, Spec: v1.PodSpec{NodeName: pod.nodeName},
}) })
} }

View File

@@ -26,14 +26,14 @@ import (
"time" "time"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/controller/informers"
@@ -107,13 +107,13 @@ func NewReplicaSetController(rsInformer informers.ReplicaSetInformer, podInforme
} }
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
rsc := &ReplicaSetController{ rsc := &ReplicaSetController{
kubeClient: kubeClient, kubeClient: kubeClient,
podControl: controller.RealPodControl{ podControl: controller.RealPodControl{
KubeClient: kubeClient, KubeClient: kubeClient,
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "replicaset-controller"}), Recorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "replicaset-controller"}),
}, },
burstReplicas: burstReplicas, burstReplicas: burstReplicas,
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()), expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
@@ -176,7 +176,7 @@ func (rsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) {
// getPodReplicaSet returns the replica set managing the given pod. // getPodReplicaSet returns the replica set managing the given pod.
// TODO: Surface that we are ignoring multiple replica sets for a single pod. // TODO: Surface that we are ignoring multiple replica sets for a single pod.
// TODO: use ownerReference.Controller to determine if the rs controls the pod. // TODO: use ownerReference.Controller to determine if the rs controls the pod.
func (rsc *ReplicaSetController) getPodReplicaSet(pod *api.Pod) *extensions.ReplicaSet { func (rsc *ReplicaSetController) getPodReplicaSet(pod *v1.Pod) *extensions.ReplicaSet {
// look up in the cache, if cached and the cache is valid, just return cached value // look up in the cache, if cached and the cache is valid, just return cached value
if obj, cached := rsc.lookupCache.GetMatchingObject(pod); cached { if obj, cached := rsc.lookupCache.GetMatchingObject(pod); cached {
rs, ok := obj.(*extensions.ReplicaSet) rs, ok := obj.(*extensions.ReplicaSet)
@@ -254,7 +254,7 @@ func (rsc *ReplicaSetController) updateRS(old, cur interface{}) {
} }
// isCacheValid check if the cache is valid // isCacheValid check if the cache is valid
func (rsc *ReplicaSetController) isCacheValid(pod *api.Pod, cachedRS *extensions.ReplicaSet) bool { func (rsc *ReplicaSetController) isCacheValid(pod *v1.Pod, cachedRS *extensions.ReplicaSet) bool {
_, err := rsc.rsLister.ReplicaSets(cachedRS.Namespace).Get(cachedRS.Name) _, err := rsc.rsLister.ReplicaSets(cachedRS.Namespace).Get(cachedRS.Name)
// rs has been deleted or updated, cache is invalid // rs has been deleted or updated, cache is invalid
if err != nil || !isReplicaSetMatch(pod, cachedRS) { if err != nil || !isReplicaSetMatch(pod, cachedRS) {
@@ -265,7 +265,7 @@ func (rsc *ReplicaSetController) isCacheValid(pod *api.Pod, cachedRS *extensions
// isReplicaSetMatch take a Pod and ReplicaSet, return whether the Pod and ReplicaSet are matching // isReplicaSetMatch take a Pod and ReplicaSet, return whether the Pod and ReplicaSet are matching
// TODO(mqliang): This logic is a copy from GetPodReplicaSets(), remove the duplication // TODO(mqliang): This logic is a copy from GetPodReplicaSets(), remove the duplication
func isReplicaSetMatch(pod *api.Pod, rs *extensions.ReplicaSet) bool { func isReplicaSetMatch(pod *v1.Pod, rs *extensions.ReplicaSet) bool {
if rs.Namespace != pod.Namespace { if rs.Namespace != pod.Namespace {
return false return false
} }
@@ -284,7 +284,7 @@ func isReplicaSetMatch(pod *api.Pod, rs *extensions.ReplicaSet) bool {
// When a pod is created, enqueue the replica set that manages it and update it's expectations. // When a pod is created, enqueue the replica set that manages it and update it's expectations.
func (rsc *ReplicaSetController) addPod(obj interface{}) { func (rsc *ReplicaSetController) addPod(obj interface{}) {
pod := obj.(*api.Pod) pod := obj.(*v1.Pod)
glog.V(4).Infof("Pod %s created: %#v.", pod.Name, pod) glog.V(4).Infof("Pod %s created: %#v.", pod.Name, pod)
rs := rsc.getPodReplicaSet(pod) rs := rsc.getPodReplicaSet(pod)
@@ -308,10 +308,10 @@ func (rsc *ReplicaSetController) addPod(obj interface{}) {
// When a pod is updated, figure out what replica set/s manage it and wake them // When a pod is updated, figure out what replica set/s manage it and wake them
// up. If the labels of the pod have changed we need to awaken both the old // up. If the labels of the pod have changed we need to awaken both the old
// and new replica set. old and cur must be *api.Pod types. // and new replica set. old and cur must be *v1.Pod types.
func (rsc *ReplicaSetController) updatePod(old, cur interface{}) { func (rsc *ReplicaSetController) updatePod(old, cur interface{}) {
curPod := cur.(*api.Pod) curPod := cur.(*v1.Pod)
oldPod := old.(*api.Pod) oldPod := old.(*v1.Pod)
if curPod.ResourceVersion == oldPod.ResourceVersion { if curPod.ResourceVersion == oldPod.ResourceVersion {
// Periodic resync will send update events for all known pods. // Periodic resync will send update events for all known pods.
// Two different versions of the same pod will always have different RVs. // Two different versions of the same pod will always have different RVs.
@@ -348,9 +348,9 @@ func (rsc *ReplicaSetController) updatePod(old, cur interface{}) {
} }
// When a pod is deleted, enqueue the replica set that manages the pod and update its expectations. // When a pod is deleted, enqueue the replica set that manages the pod and update its expectations.
// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item. // obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
func (rsc *ReplicaSetController) deletePod(obj interface{}) { func (rsc *ReplicaSetController) deletePod(obj interface{}) {
pod, ok := obj.(*api.Pod) pod, ok := obj.(*v1.Pod)
// When a delete is dropped, the relist will notice a pod in the store not // When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains // in the list, leading to the insertion of a tombstone object which contains
@@ -362,7 +362,7 @@ func (rsc *ReplicaSetController) deletePod(obj interface{}) {
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %+v", obj)) utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %+v", obj))
return return
} }
pod, ok = tombstone.Obj.(*api.Pod) pod, ok = tombstone.Obj.(*v1.Pod)
if !ok { if !ok {
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a pod %#v", obj)) utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a pod %#v", obj))
return return
@@ -426,8 +426,8 @@ func (rsc *ReplicaSetController) processNextWorkItem() bool {
// manageReplicas checks and updates replicas for the given ReplicaSet. // manageReplicas checks and updates replicas for the given ReplicaSet.
// Does NOT modify <filteredPods>. // Does NOT modify <filteredPods>.
// It will requeue the replica set in case of an error while creating/deleting pods. // It will requeue the replica set in case of an error while creating/deleting pods.
func (rsc *ReplicaSetController) manageReplicas(filteredPods []*api.Pod, rs *extensions.ReplicaSet) error { func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *extensions.ReplicaSet) error {
diff := len(filteredPods) - int(rs.Spec.Replicas) diff := len(filteredPods) - int(*(rs.Spec.Replicas))
rsKey, err := controller.KeyFunc(rs) rsKey, err := controller.KeyFunc(rs)
if err != nil { if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't get key for ReplicaSet %#v: %v", rs, err)) utilruntime.HandleError(fmt.Errorf("Couldn't get key for ReplicaSet %#v: %v", rs, err))
@@ -448,7 +448,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*api.Pod, rs *ext
rsc.expectations.ExpectCreations(rsKey, diff) rsc.expectations.ExpectCreations(rsKey, diff)
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(diff) wg.Add(diff)
glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rs.Namespace, rs.Name, rs.Spec.Replicas, diff) glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff)
for i := 0; i < diff; i++ { for i := 0; i < diff; i++ {
go func() { go func() {
defer wg.Done() defer wg.Done()
@@ -456,7 +456,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*api.Pod, rs *ext
if rsc.garbageCollectorEnabled { if rsc.garbageCollectorEnabled {
var trueVar = true var trueVar = true
controllerRef := &api.OwnerReference{ controllerRef := &v1.OwnerReference{
APIVersion: getRSKind().GroupVersion().String(), APIVersion: getRSKind().GroupVersion().String(),
Kind: getRSKind().Kind, Kind: getRSKind().Kind,
Name: rs.Name, Name: rs.Name,
@@ -481,9 +481,9 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*api.Pod, rs *ext
diff = rsc.burstReplicas diff = rsc.burstReplicas
} }
errCh = make(chan error, diff) errCh = make(chan error, diff)
glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rs.Namespace, rs.Name, rs.Spec.Replicas, diff) glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff)
// No need to sort pods if we are about to delete all of them // No need to sort pods if we are about to delete all of them
if rs.Spec.Replicas != 0 { if *(rs.Spec.Replicas) != 0 {
// Sort the pods in the order such that not-ready < ready, unscheduled // Sort the pods in the order such that not-ready < ready, unscheduled
// < scheduled, and pending < running. This ensures that we delete pods // < scheduled, and pending < running. This ensures that we delete pods
// in the earlier stages whenever possible. // in the earlier stages whenever possible.
@@ -567,7 +567,7 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
// NOTE: filteredPods are pointing to objects from cache - if you need to // NOTE: filteredPods are pointing to objects from cache - if you need to
// modify them, you need to copy it first. // modify them, you need to copy it first.
// TODO: Do the List and Filter in a single pass, or use an index. // TODO: Do the List and Filter in a single pass, or use an index.
var filteredPods []*api.Pod var filteredPods []*v1.Pod
if rsc.garbageCollectorEnabled { if rsc.garbageCollectorEnabled {
// list all pods to include the pods that don't match the rs`s selector // list all pods to include the pods that don't match the rs`s selector
// anymore but has the stale controller ref. // anymore but has the stale controller ref.

View File

@@ -28,15 +28,15 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/apis/extensions" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
fakeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" fakeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
@@ -51,7 +51,7 @@ import (
) )
func testNewReplicaSetControllerFromClient(client clientset.Interface, stopCh chan struct{}, burstReplicas int, lookupCacheSize int) *ReplicaSetController { func testNewReplicaSetControllerFromClient(client clientset.Interface, stopCh chan struct{}, burstReplicas int, lookupCacheSize int) *ReplicaSetController {
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) informers := informers.NewSharedInformerFactory(client, nil, controller.NoResyncPeriodFunc())
ret := NewReplicaSetController(informers.ReplicaSets(), informers.Pods(), client, burstReplicas, lookupCacheSize, false) ret := NewReplicaSetController(informers.ReplicaSets(), informers.Pods(), client, burstReplicas, lookupCacheSize, false)
ret.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})} ret.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
ret.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})} ret.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
@@ -98,34 +98,34 @@ func getKey(rs *extensions.ReplicaSet, t *testing.T) string {
func newReplicaSet(replicas int, selectorMap map[string]string) *extensions.ReplicaSet { func newReplicaSet(replicas int, selectorMap map[string]string) *extensions.ReplicaSet {
rs := &extensions.ReplicaSet{ rs := &extensions.ReplicaSet{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()}, TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
UID: uuid.NewUUID(), UID: uuid.NewUUID(),
Name: "foobar", Name: "foobar",
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
ResourceVersion: "18", ResourceVersion: "18",
}, },
Spec: extensions.ReplicaSetSpec{ Spec: extensions.ReplicaSetSpec{
Replicas: int32(replicas), Replicas: func() *int32 { i := int32(replicas); return &i }(),
Selector: &unversioned.LabelSelector{MatchLabels: selectorMap}, Selector: &unversioned.LabelSelector{MatchLabels: selectorMap},
Template: api.PodTemplateSpec{ Template: v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{ Labels: map[string]string{
"name": "foo", "name": "foo",
"type": "production", "type": "production",
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: "foo/bar", Image: "foo/bar",
TerminationMessagePath: api.TerminationMessagePathDefault, TerminationMessagePath: v1.TerminationMessagePathDefault,
ImagePullPolicy: api.PullIfNotPresent, ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
}, },
}, },
RestartPolicy: api.RestartPolicyAlways, RestartPolicy: v1.RestartPolicyAlways,
DNSPolicy: api.DNSDefault, DNSPolicy: v1.DNSDefault,
NodeSelector: map[string]string{ NodeSelector: map[string]string{
"baz": "blah", "baz": "blah",
}, },
@@ -137,40 +137,40 @@ func newReplicaSet(replicas int, selectorMap map[string]string) *extensions.Repl
} }
// create a pod with the given phase for the given rs (same selectors and namespace) // create a pod with the given phase for the given rs (same selectors and namespace)
func newPod(name string, rs *extensions.ReplicaSet, status api.PodPhase, lastTransitionTime *unversioned.Time) *api.Pod { func newPod(name string, rs *extensions.ReplicaSet, status v1.PodPhase, lastTransitionTime *unversioned.Time) *v1.Pod {
var conditions []api.PodCondition var conditions []v1.PodCondition
if status == api.PodRunning { if status == v1.PodRunning {
condition := api.PodCondition{Type: api.PodReady, Status: api.ConditionTrue} condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
if lastTransitionTime != nil { if lastTransitionTime != nil {
condition.LastTransitionTime = *lastTransitionTime condition.LastTransitionTime = *lastTransitionTime
} }
conditions = append(conditions, condition) conditions = append(conditions, condition)
} }
return &api.Pod{ return &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Namespace: rs.Namespace, Namespace: rs.Namespace,
Labels: rs.Spec.Selector.MatchLabels, Labels: rs.Spec.Selector.MatchLabels,
}, },
Status: api.PodStatus{Phase: status, Conditions: conditions}, Status: v1.PodStatus{Phase: status, Conditions: conditions},
} }
} }
// create count pods with the given phase for the given ReplicaSet (same selectors and namespace), and add them to the store. // create count pods with the given phase for the given ReplicaSet (same selectors and namespace), and add them to the store.
func newPodList(store cache.Store, count int, status api.PodPhase, labelMap map[string]string, rs *extensions.ReplicaSet, name string) *api.PodList { func newPodList(store cache.Store, count int, status v1.PodPhase, labelMap map[string]string, rs *extensions.ReplicaSet, name string) *v1.PodList {
pods := []api.Pod{} pods := []v1.Pod{}
var trueVar = true var trueVar = true
controllerReference := api.OwnerReference{UID: rs.UID, APIVersion: "v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &trueVar} controllerReference := v1.OwnerReference{UID: rs.UID, APIVersion: "v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &trueVar}
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
pod := newPod(fmt.Sprintf("%s%d", name, i), rs, status, nil) pod := newPod(fmt.Sprintf("%s%d", name, i), rs, status, nil)
pod.ObjectMeta.Labels = labelMap pod.ObjectMeta.Labels = labelMap
pod.OwnerReferences = []api.OwnerReference{controllerReference} pod.OwnerReferences = []v1.OwnerReference{controllerReference}
if store != nil { if store != nil {
store.Add(pod) store.Add(pod)
} }
pods = append(pods, *pod) pods = append(pods, *pod)
} }
return &api.PodList{ return &v1.PodList{
Items: pods, Items: pods,
} }
} }
@@ -197,7 +197,7 @@ type serverResponse struct {
} }
func TestSyncReplicaSetDoesNothing(t *testing.T) { func TestSyncReplicaSetDoesNothing(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
stopCh := make(chan struct{}) stopCh := make(chan struct{})
defer close(stopCh) defer close(stopCh)
@@ -208,7 +208,7 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) {
labelMap := map[string]string{"foo": "bar"} labelMap := map[string]string{"foo": "bar"}
rsSpec := newReplicaSet(2, labelMap) rsSpec := newReplicaSet(2, labelMap)
manager.rsLister.Indexer.Add(rsSpec) manager.rsLister.Indexer.Add(rsSpec)
newPodList(manager.podLister.Indexer, 2, api.PodRunning, labelMap, rsSpec, "pod") newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rsSpec, "pod")
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
manager.syncReplicaSet(getKey(rsSpec, t)) manager.syncReplicaSet(getKey(rsSpec, t))
@@ -216,7 +216,7 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) {
} }
func TestSyncReplicaSetDeletes(t *testing.T) { func TestSyncReplicaSetDeletes(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
stopCh := make(chan struct{}) stopCh := make(chan struct{})
defer close(stopCh) defer close(stopCh)
@@ -228,14 +228,14 @@ func TestSyncReplicaSetDeletes(t *testing.T) {
labelMap := map[string]string{"foo": "bar"} labelMap := map[string]string{"foo": "bar"}
rsSpec := newReplicaSet(1, labelMap) rsSpec := newReplicaSet(1, labelMap)
manager.rsLister.Indexer.Add(rsSpec) manager.rsLister.Indexer.Add(rsSpec)
newPodList(manager.podLister.Indexer, 2, api.PodRunning, labelMap, rsSpec, "pod") newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rsSpec, "pod")
manager.syncReplicaSet(getKey(rsSpec, t)) manager.syncReplicaSet(getKey(rsSpec, t))
validateSyncReplicaSet(t, &fakePodControl, 0, 1, 0) validateSyncReplicaSet(t, &fakePodControl, 0, 1, 0)
} }
func TestDeleteFinalStateUnknown(t *testing.T) { func TestDeleteFinalStateUnknown(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
stopCh := make(chan struct{}) stopCh := make(chan struct{})
defer close(stopCh) defer close(stopCh)
@@ -254,7 +254,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
labelMap := map[string]string{"foo": "bar"} labelMap := map[string]string{"foo": "bar"}
rsSpec := newReplicaSet(1, labelMap) rsSpec := newReplicaSet(1, labelMap)
manager.rsLister.Indexer.Add(rsSpec) manager.rsLister.Indexer.Add(rsSpec)
pods := newPodList(nil, 1, api.PodRunning, labelMap, rsSpec, "pod") pods := newPodList(nil, 1, v1.PodRunning, labelMap, rsSpec, "pod")
manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]}) manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})
go manager.worker() go manager.worker()
@@ -271,7 +271,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
} }
func TestSyncReplicaSetCreates(t *testing.T) { func TestSyncReplicaSetCreates(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
stopCh := make(chan struct{}) stopCh := make(chan struct{})
defer close(stopCh) defer close(stopCh)
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
@@ -297,7 +297,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
} }
testServer := httptest.NewServer(&fakeHandler) testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close() defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
stopCh := make(chan struct{}) stopCh := make(chan struct{})
defer close(stopCh) defer close(stopCh)
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
@@ -309,7 +309,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
rs := newReplicaSet(activePods, labelMap) rs := newReplicaSet(activePods, labelMap)
manager.rsLister.Indexer.Add(rs) manager.rsLister.Indexer.Add(rs)
rs.Status = extensions.ReplicaSetStatus{Replicas: int32(activePods), ReadyReplicas: int32(activePods), AvailableReplicas: int32(activePods)} rs.Status = extensions.ReplicaSetStatus{Replicas: int32(activePods), ReadyReplicas: int32(activePods), AvailableReplicas: int32(activePods)}
newPodList(manager.podLister.Indexer, activePods, api.PodRunning, labelMap, rs, "pod") newPodList(manager.podLister.Indexer, activePods, v1.PodRunning, labelMap, rs, "pod")
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@@ -343,7 +343,7 @@ func TestControllerUpdateReplicas(t *testing.T) {
testServer := httptest.NewServer(&fakeHandler) testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close() defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
stopCh := make(chan struct{}) stopCh := make(chan struct{})
defer close(stopCh) defer close(stopCh)
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
@@ -358,8 +358,8 @@ func TestControllerUpdateReplicas(t *testing.T) {
manager.rsLister.Indexer.Add(rs) manager.rsLister.Indexer.Add(rs)
rs.Status = extensions.ReplicaSetStatus{Replicas: 2, FullyLabeledReplicas: 6, ReadyReplicas: 2, AvailableReplicas: 2, ObservedGeneration: 0} rs.Status = extensions.ReplicaSetStatus{Replicas: 2, FullyLabeledReplicas: 6, ReadyReplicas: 2, AvailableReplicas: 2, ObservedGeneration: 0}
rs.Generation = 1 rs.Generation = 1
newPodList(manager.podLister.Indexer, 2, api.PodRunning, labelMap, rs, "pod") newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rs, "pod")
newPodList(manager.podLister.Indexer, 2, api.PodRunning, extraLabelMap, rs, "podWithExtraLabel") newPodList(manager.podLister.Indexer, 2, v1.PodRunning, extraLabelMap, rs, "podWithExtraLabel")
// This response body is just so we don't err out decoding the http response // This response body is just so we don't err out decoding the http response
response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{}) response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{})
@@ -391,7 +391,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
} }
testServer := httptest.NewServer(&fakeHandler) testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close() defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
stopCh := make(chan struct{}) stopCh := make(chan struct{})
@@ -404,7 +404,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
labelMap := map[string]string{"foo": "bar"} labelMap := map[string]string{"foo": "bar"}
rsSpec := newReplicaSet(2, labelMap) rsSpec := newReplicaSet(2, labelMap)
manager.rsLister.Indexer.Add(rsSpec) manager.rsLister.Indexer.Add(rsSpec)
newPodList(manager.podLister.Indexer, 1, api.PodRunning, labelMap, rsSpec, "pod") newPodList(manager.podLister.Indexer, 1, v1.PodRunning, labelMap, rsSpec, "pod")
// Creates a replica and sets expectations // Creates a replica and sets expectations
rsSpec.Status.Replicas = 1 rsSpec.Status.Replicas = 1
@@ -453,32 +453,32 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
func TestPodControllerLookup(t *testing.T) { func TestPodControllerLookup(t *testing.T) {
stopCh := make(chan struct{}) stopCh := make(chan struct{})
defer close(stopCh) defer close(stopCh)
manager := testNewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}), stopCh, BurstReplicas, 0) manager := testNewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}}), stopCh, BurstReplicas, 0)
manager.podListerSynced = alwaysReady manager.podListerSynced = alwaysReady
testCases := []struct { testCases := []struct {
inRSs []*extensions.ReplicaSet inRSs []*extensions.ReplicaSet
pod *api.Pod pod *v1.Pod
outRSName string outRSName string
}{ }{
// pods without labels don't match any ReplicaSets // pods without labels don't match any ReplicaSets
{ {
inRSs: []*extensions.ReplicaSet{ inRSs: []*extensions.ReplicaSet{
{ObjectMeta: api.ObjectMeta{Name: "basic"}}}, {ObjectMeta: v1.ObjectMeta{Name: "basic"}}},
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo1", Namespace: api.NamespaceAll}}, pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo1", Namespace: v1.NamespaceAll}},
outRSName: "", outRSName: "",
}, },
// Matching labels, not namespace // Matching labels, not namespace
{ {
inRSs: []*extensions.ReplicaSet{ inRSs: []*extensions.ReplicaSet{
{ {
ObjectMeta: api.ObjectMeta{Name: "foo"}, ObjectMeta: v1.ObjectMeta{Name: "foo"},
Spec: extensions.ReplicaSetSpec{ Spec: extensions.ReplicaSetSpec{
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
}, },
}, },
}, },
pod: &api.Pod{ pod: &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo2", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}}, Name: "foo2", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
outRSName: "", outRSName: "",
}, },
@@ -486,14 +486,14 @@ func TestPodControllerLookup(t *testing.T) {
{ {
inRSs: []*extensions.ReplicaSet{ inRSs: []*extensions.ReplicaSet{
{ {
ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"}, ObjectMeta: v1.ObjectMeta{Name: "bar", Namespace: "ns"},
Spec: extensions.ReplicaSetSpec{ Spec: extensions.ReplicaSetSpec{
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}, Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
}, },
}, },
}, },
pod: &api.Pod{ pod: &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo3", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}}, Name: "foo3", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
outRSName: "bar", outRSName: "bar",
}, },
@@ -523,7 +523,7 @@ func TestWatchControllers(t *testing.T) {
client.AddWatchReactor("replicasets", core.DefaultWatchReactor(fakeWatch, nil)) client.AddWatchReactor("replicasets", core.DefaultWatchReactor(fakeWatch, nil))
stopCh := make(chan struct{}) stopCh := make(chan struct{})
defer close(stopCh) defer close(stopCh)
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) informers := informers.NewSharedInformerFactory(client, nil, controller.NoResyncPeriodFunc())
manager := NewReplicaSetController(informers.ReplicaSets(), informers.Pods(), client, BurstReplicas, 0, false) manager := NewReplicaSetController(informers.ReplicaSets(), informers.Pods(), client, BurstReplicas, 0, false)
informers.Start(stopCh) informers.Start(stopCh)
manager.podListerSynced = alwaysReady manager.podListerSynced = alwaysReady
@@ -540,7 +540,7 @@ func TestWatchControllers(t *testing.T) {
t.Errorf("Expected to find replica set under key %v", key) t.Errorf("Expected to find replica set under key %v", key)
} }
rsSpec := *obj.(*extensions.ReplicaSet) rsSpec := *obj.(*extensions.ReplicaSet)
if !api.Semantic.DeepDerivative(rsSpec, testRSSpec) { if !v1.Semantic.DeepDerivative(rsSpec, testRSSpec) {
t.Errorf("Expected %#v, but got %#v", testRSSpec, rsSpec) t.Errorf("Expected %#v, but got %#v", testRSSpec, rsSpec)
} }
close(received) close(received)
@@ -582,7 +582,7 @@ func TestWatchPods(t *testing.T) {
t.Errorf("Expected to find replica set under key %v", key) t.Errorf("Expected to find replica set under key %v", key)
} }
rsSpec := obj.(*extensions.ReplicaSet) rsSpec := obj.(*extensions.ReplicaSet)
if !api.Semantic.DeepDerivative(rsSpec, testRSSpec) { if !v1.Semantic.DeepDerivative(rsSpec, testRSSpec) {
t.Errorf("\nExpected %#v,\nbut got %#v", testRSSpec, rsSpec) t.Errorf("\nExpected %#v,\nbut got %#v", testRSSpec, rsSpec)
} }
close(received) close(received)
@@ -592,9 +592,9 @@ func TestWatchPods(t *testing.T) {
// and make sure it hits the sync method for the right ReplicaSet. // and make sure it hits the sync method for the right ReplicaSet.
go wait.Until(manager.worker, 10*time.Millisecond, stopCh) go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
pods := newPodList(nil, 1, api.PodRunning, labelMap, testRSSpec, "pod") pods := newPodList(nil, 1, v1.PodRunning, labelMap, testRSSpec, "pod")
testPod := pods.Items[0] testPod := pods.Items[0]
testPod.Status.Phase = api.PodFailed testPod.Status.Phase = v1.PodFailed
fakeWatch.Add(&testPod) fakeWatch.Add(&testPod)
select { select {
@@ -636,7 +636,7 @@ func TestUpdatePods(t *testing.T) {
// case 1: We put in the podLister a pod with labels matching testRSSpec1, // case 1: We put in the podLister a pod with labels matching testRSSpec1,
// then update its labels to match testRSSpec2. We expect to receive a sync // then update its labels to match testRSSpec2. We expect to receive a sync
// request for both replica sets. // request for both replica sets.
pod1 := newPodList(manager.podLister.Indexer, 1, api.PodRunning, labelMap1, testRSSpec1, "pod").Items[0] pod1 := newPodList(manager.podLister.Indexer, 1, v1.PodRunning, labelMap1, testRSSpec1, "pod").Items[0]
pod1.ResourceVersion = "1" pod1.ResourceVersion = "1"
pod2 := pod1 pod2 := pod1
pod2.Labels = labelMap2 pod2.Labels = labelMap2
@@ -687,7 +687,7 @@ func TestControllerUpdateRequeue(t *testing.T) {
stopCh := make(chan struct{}) stopCh := make(chan struct{})
defer close(stopCh) defer close(stopCh)
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
manager.podListerSynced = alwaysReady manager.podListerSynced = alwaysReady
@@ -695,7 +695,7 @@ func TestControllerUpdateRequeue(t *testing.T) {
rs := newReplicaSet(1, labelMap) rs := newReplicaSet(1, labelMap)
manager.rsLister.Indexer.Add(rs) manager.rsLister.Indexer.Add(rs)
rs.Status = extensions.ReplicaSetStatus{Replicas: 2} rs.Status = extensions.ReplicaSetStatus{Replicas: 2}
newPodList(manager.podLister.Indexer, 1, api.PodRunning, labelMap, rs, "pod") newPodList(manager.podLister.Indexer, 1, v1.PodRunning, labelMap, rs, "pod")
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@@ -756,7 +756,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
// TODO: This test is too hairy for a unittest. It should be moved to an E2E suite. // TODO: This test is too hairy for a unittest. It should be moved to an E2E suite.
func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) { func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
stopCh := make(chan struct{}) stopCh := make(chan struct{})
defer close(stopCh) defer close(stopCh)
@@ -769,7 +769,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
manager.rsLister.Indexer.Add(rsSpec) manager.rsLister.Indexer.Add(rsSpec)
expectedPods := int32(0) expectedPods := int32(0)
pods := newPodList(nil, numReplicas, api.PodPending, labelMap, rsSpec, "pod") pods := newPodList(nil, numReplicas, v1.PodPending, labelMap, rsSpec, "pod")
rsKey, err := controller.KeyFunc(rsSpec) rsKey, err := controller.KeyFunc(rsSpec)
if err != nil { if err != nil {
@@ -779,7 +779,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
// Size up the controller, then size it down, and confirm the expected create/delete pattern // Size up the controller, then size it down, and confirm the expected create/delete pattern
for _, replicas := range []int32{int32(numReplicas), 0} { for _, replicas := range []int32{int32(numReplicas), 0} {
rsSpec.Spec.Replicas = replicas *(rsSpec.Spec.Replicas) = replicas
manager.rsLister.Indexer.Add(rsSpec) manager.rsLister.Indexer.Add(rsSpec)
for i := 0; i < numReplicas; i += burstReplicas { for i := 0; i < numReplicas; i += burstReplicas {
@@ -823,11 +823,11 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
// To accurately simulate a watch we must delete the exact pods // To accurately simulate a watch we must delete the exact pods
// the rs is waiting for. // the rs is waiting for.
expectedDels := manager.expectations.GetUIDs(getKey(rsSpec, t)) expectedDels := manager.expectations.GetUIDs(getKey(rsSpec, t))
podsToDelete := []*api.Pod{} podsToDelete := []*v1.Pod{}
for _, key := range expectedDels.List() { for _, key := range expectedDels.List() {
nsName := strings.Split(key, "/") nsName := strings.Split(key, "/")
podsToDelete = append(podsToDelete, &api.Pod{ podsToDelete = append(podsToDelete, &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: nsName[1], Name: nsName[1],
Namespace: nsName[0], Namespace: nsName[0],
Labels: rsSpec.Spec.Selector.MatchLabels, Labels: rsSpec.Spec.Selector.MatchLabels,
@@ -867,8 +867,8 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
t.Fatalf("Waiting on unexpected number of deletes.") t.Fatalf("Waiting on unexpected number of deletes.")
} }
nsName := strings.Split(expectedDel.List()[0], "/") nsName := strings.Split(expectedDel.List()[0], "/")
lastPod := &api.Pod{ lastPod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: nsName[1], Name: nsName[1],
Namespace: nsName[0], Namespace: nsName[0],
Labels: rsSpec.Spec.Selector.MatchLabels, Labels: rsSpec.Spec.Selector.MatchLabels,
@@ -882,11 +882,11 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
// Confirm that we've created the right number of replicas // Confirm that we've created the right number of replicas
activePods := int32(len(manager.podLister.Indexer.List())) activePods := int32(len(manager.podLister.Indexer.List()))
if activePods != rsSpec.Spec.Replicas { if activePods != *(rsSpec.Spec.Replicas) {
t.Fatalf("Unexpected number of active pods, expected %d, got %d", rsSpec.Spec.Replicas, activePods) t.Fatalf("Unexpected number of active pods, expected %d, got %d", *(rsSpec.Spec.Replicas), activePods)
} }
// Replenish the pod list, since we cut it down sizing up // Replenish the pod list, since we cut it down sizing up
pods = newPodList(nil, int(replicas), api.PodRunning, labelMap, rsSpec, "pod") pods = newPodList(nil, int(replicas), v1.PodRunning, labelMap, rsSpec, "pod")
} }
} }
@@ -910,7 +910,7 @@ func (fe FakeRSExpectations) SatisfiedExpectations(controllerKey string) bool {
// TestRSSyncExpectations tests that a pod cannot sneak in between counting active pods // TestRSSyncExpectations tests that a pod cannot sneak in between counting active pods
// and checking expectations. // and checking expectations.
func TestRSSyncExpectations(t *testing.T) { func TestRSSyncExpectations(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
stopCh := make(chan struct{}) stopCh := make(chan struct{})
defer close(stopCh) defer close(stopCh)
@@ -921,7 +921,7 @@ func TestRSSyncExpectations(t *testing.T) {
labelMap := map[string]string{"foo": "bar"} labelMap := map[string]string{"foo": "bar"}
rsSpec := newReplicaSet(2, labelMap) rsSpec := newReplicaSet(2, labelMap)
manager.rsLister.Indexer.Add(rsSpec) manager.rsLister.Indexer.Add(rsSpec)
pods := newPodList(nil, 2, api.PodPending, labelMap, rsSpec, "pod") pods := newPodList(nil, 2, v1.PodPending, labelMap, rsSpec, "pod")
manager.podLister.Indexer.Add(&pods.Items[0]) manager.podLister.Indexer.Add(&pods.Items[0])
postExpectationsPod := pods.Items[1] postExpectationsPod := pods.Items[1]
@@ -938,7 +938,7 @@ func TestRSSyncExpectations(t *testing.T) {
} }
func TestDeleteControllerAndExpectations(t *testing.T) { func TestDeleteControllerAndExpectations(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
stopCh := make(chan struct{}) stopCh := make(chan struct{})
defer close(stopCh) defer close(stopCh)
manager := testNewReplicaSetControllerFromClient(client, stopCh, 10, 0) manager := testNewReplicaSetControllerFromClient(client, stopCh, 10, 0)
@@ -993,7 +993,7 @@ func shuffle(controllers []*extensions.ReplicaSet) []*extensions.ReplicaSet {
} }
func TestOverlappingRSs(t *testing.T) { func TestOverlappingRSs(t *testing.T) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
labelMap := map[string]string{"foo": "bar"} labelMap := map[string]string{"foo": "bar"}
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
@@ -1016,7 +1016,7 @@ func TestOverlappingRSs(t *testing.T) {
manager.rsLister.Indexer.Add(shuffledControllers[j]) manager.rsLister.Indexer.Add(shuffledControllers[j])
} }
// Add a pod and make sure only the oldest ReplicaSet is synced // Add a pod and make sure only the oldest ReplicaSet is synced
pods := newPodList(nil, 1, api.PodPending, labelMap, controllers[0], "pod") pods := newPodList(nil, 1, v1.PodPending, labelMap, controllers[0], "pod")
rsKey := getKey(controllers[0], t) rsKey := getKey(controllers[0], t)
manager.addPod(&pods.Items[0]) manager.addPod(&pods.Items[0])
@@ -1029,7 +1029,7 @@ func TestOverlappingRSs(t *testing.T) {
} }
func TestDeletionTimestamp(t *testing.T) { func TestDeletionTimestamp(t *testing.T) {
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
labelMap := map[string]string{"foo": "bar"} labelMap := map[string]string{"foo": "bar"}
stopCh := make(chan struct{}) stopCh := make(chan struct{})
defer close(stopCh) defer close(stopCh)
@@ -1042,7 +1042,7 @@ func TestDeletionTimestamp(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("Couldn't get key for object %#v: %v", rs, err) t.Errorf("Couldn't get key for object %#v: %v", rs, err)
} }
pod := newPodList(nil, 1, api.PodPending, labelMap, rs, "pod").Items[0] pod := newPodList(nil, 1, v1.PodPending, labelMap, rs, "pod").Items[0]
pod.DeletionTimestamp = &unversioned.Time{Time: time.Now()} pod.DeletionTimestamp = &unversioned.Time{Time: time.Now()}
pod.ResourceVersion = "1" pod.ResourceVersion = "1"
manager.expectations.ExpectDeletions(rsKey, []string{controller.PodKey(&pod)}) manager.expectations.ExpectDeletions(rsKey, []string{controller.PodKey(&pod)})
@@ -1063,7 +1063,7 @@ func TestDeletionTimestamp(t *testing.T) {
// An update from no deletion timestamp to having one should be treated // An update from no deletion timestamp to having one should be treated
// as a deletion. // as a deletion.
oldPod := newPodList(nil, 1, api.PodPending, labelMap, rs, "pod").Items[0] oldPod := newPodList(nil, 1, v1.PodPending, labelMap, rs, "pod").Items[0]
oldPod.ResourceVersion = "2" oldPod.ResourceVersion = "2"
manager.expectations.ExpectDeletions(rsKey, []string{controller.PodKey(&pod)}) manager.expectations.ExpectDeletions(rsKey, []string{controller.PodKey(&pod)})
manager.updatePod(&oldPod, &pod) manager.updatePod(&oldPod, &pod)
@@ -1081,8 +1081,8 @@ func TestDeletionTimestamp(t *testing.T) {
// An update to the pod (including an update to the deletion timestamp) // An update to the pod (including an update to the deletion timestamp)
// should not be counted as a second delete. // should not be counted as a second delete.
secondPod := &api.Pod{ secondPod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Namespace: pod.Namespace, Namespace: pod.Namespace,
Name: "secondPod", Name: "secondPod",
Labels: pod.Labels, Labels: pod.Labels,
@@ -1142,10 +1142,10 @@ func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs) manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
manager.rsLister.Indexer.Add(rs) manager.rsLister.Indexer.Add(rs)
var trueVar = true var trueVar = true
otherControllerReference := api.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1beta1", Kind: "ReplicaSet", Name: "AnotherRS", Controller: &trueVar} otherControllerReference := v1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1beta1", Kind: "ReplicaSet", Name: "AnotherRS", Controller: &trueVar}
// add to podLister a matching Pod controlled by another controller. Expect no patch. // add to podLister a matching Pod controlled by another controller. Expect no patch.
pod := newPod("pod", rs, api.PodRunning, nil) pod := newPod("pod", rs, v1.PodRunning, nil)
pod.OwnerReferences = []api.OwnerReference{otherControllerReference} pod.OwnerReferences = []v1.OwnerReference{otherControllerReference}
manager.podLister.Indexer.Add(pod) manager.podLister.Indexer.Add(pod)
err := manager.syncReplicaSet(getKey(rs, t)) err := manager.syncReplicaSet(getKey(rs, t))
if err != nil { if err != nil {
@@ -1165,9 +1165,9 @@ func TestPatchPodWithOtherOwnerRef(t *testing.T) {
// add to podLister one more matching pod that doesn't have a controller // add to podLister one more matching pod that doesn't have a controller
// ref, but has an owner ref pointing to other object. Expect a patch to // ref, but has an owner ref pointing to other object. Expect a patch to
// take control of it. // take control of it.
unrelatedOwnerReference := api.OwnerReference{UID: uuid.NewUUID(), APIVersion: "batch/v1", Kind: "Job", Name: "Job"} unrelatedOwnerReference := v1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "batch/v1", Kind: "Job", Name: "Job"}
pod := newPod("pod", rs, api.PodRunning, nil) pod := newPod("pod", rs, v1.PodRunning, nil)
pod.OwnerReferences = []api.OwnerReference{unrelatedOwnerReference} pod.OwnerReferences = []v1.OwnerReference{unrelatedOwnerReference}
manager.podLister.Indexer.Add(pod) manager.podLister.Indexer.Add(pod)
err := manager.syncReplicaSet(getKey(rs, t)) err := manager.syncReplicaSet(getKey(rs, t))
@@ -1187,9 +1187,9 @@ func TestPatchPodWithCorrectOwnerRef(t *testing.T) {
manager.rsLister.Indexer.Add(rs) manager.rsLister.Indexer.Add(rs)
// add to podLister a matching pod that has an ownerRef pointing to the rs, // add to podLister a matching pod that has an ownerRef pointing to the rs,
// but ownerRef.Controller is false. Expect a patch to take control it. // but ownerRef.Controller is false. Expect a patch to take control it.
rsOwnerReference := api.OwnerReference{UID: rs.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: rs.Name} rsOwnerReference := v1.OwnerReference{UID: rs.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: rs.Name}
pod := newPod("pod", rs, api.PodRunning, nil) pod := newPod("pod", rs, v1.PodRunning, nil)
pod.OwnerReferences = []api.OwnerReference{rsOwnerReference} pod.OwnerReferences = []v1.OwnerReference{rsOwnerReference}
manager.podLister.Indexer.Add(pod) manager.podLister.Indexer.Add(pod)
err := manager.syncReplicaSet(getKey(rs, t)) err := manager.syncReplicaSet(getKey(rs, t))
@@ -1209,8 +1209,8 @@ func TestPatchPodFails(t *testing.T) {
manager.rsLister.Indexer.Add(rs) manager.rsLister.Indexer.Add(rs)
// add to podLister two matching pods. Expect two patches to take control // add to podLister two matching pods. Expect two patches to take control
// them. // them.
manager.podLister.Indexer.Add(newPod("pod1", rs, api.PodRunning, nil)) manager.podLister.Indexer.Add(newPod("pod1", rs, v1.PodRunning, nil))
manager.podLister.Indexer.Add(newPod("pod2", rs, api.PodRunning, nil)) manager.podLister.Indexer.Add(newPod("pod2", rs, v1.PodRunning, nil))
// let both patches fail. The rs controller will assume it fails to take // let both patches fail. The rs controller will assume it fails to take
// control of the pods and create new ones. // control of the pods and create new ones.
fakePodControl.Err = fmt.Errorf("Fake Error") fakePodControl.Err = fmt.Errorf("Fake Error")
@@ -1231,9 +1231,9 @@ func TestPatchExtraPodsThenDelete(t *testing.T) {
manager.rsLister.Indexer.Add(rs) manager.rsLister.Indexer.Add(rs)
// add to podLister three matching pods. Expect three patches to take control // add to podLister three matching pods. Expect three patches to take control
// them, and later delete one of them. // them, and later delete one of them.
manager.podLister.Indexer.Add(newPod("pod1", rs, api.PodRunning, nil)) manager.podLister.Indexer.Add(newPod("pod1", rs, v1.PodRunning, nil))
manager.podLister.Indexer.Add(newPod("pod2", rs, api.PodRunning, nil)) manager.podLister.Indexer.Add(newPod("pod2", rs, v1.PodRunning, nil))
manager.podLister.Indexer.Add(newPod("pod3", rs, api.PodRunning, nil)) manager.podLister.Indexer.Add(newPod("pod3", rs, v1.PodRunning, nil))
err := manager.syncReplicaSet(getKey(rs, t)) err := manager.syncReplicaSet(getKey(rs, t))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@@ -1250,11 +1250,11 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs) manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
manager.rsLister.Indexer.Add(rs) manager.rsLister.Indexer.Add(rs)
// put one pod in the podLister // put one pod in the podLister
pod := newPod("pod", rs, api.PodRunning, nil) pod := newPod("pod", rs, v1.PodRunning, nil)
pod.ResourceVersion = "1" pod.ResourceVersion = "1"
var trueVar = true var trueVar = true
rsOwnerReference := api.OwnerReference{UID: rs.UID, APIVersion: "v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &trueVar} rsOwnerReference := v1.OwnerReference{UID: rs.UID, APIVersion: "v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &trueVar}
pod.OwnerReferences = []api.OwnerReference{rsOwnerReference} pod.OwnerReferences = []v1.OwnerReference{rsOwnerReference}
updatedPod := *pod updatedPod := *pod
// reset the labels // reset the labels
updatedPod.Labels = make(map[string]string) updatedPod.Labels = make(map[string]string)
@@ -1277,7 +1277,7 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
// expect 1 patch to be sent to remove the controllerRef for the pod. // expect 1 patch to be sent to remove the controllerRef for the pod.
// expect 2 creates because the rs.Spec.Replicas=2 and there exists no // expect 2 creates because the *(rs.Spec.Replicas)=2 and there exists no
// matching pod. // matching pod.
validateSyncReplicaSet(t, fakePodControl, 2, 0, 1) validateSyncReplicaSet(t, fakePodControl, 2, 0, 1)
fakePodControl.Clear() fakePodControl.Clear()
@@ -1290,7 +1290,7 @@ func TestUpdateSelectorControllerRef(t *testing.T) {
defer close(stopCh) defer close(stopCh)
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs) manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
// put 2 pods in the podLister // put 2 pods in the podLister
newPodList(manager.podLister.Indexer, 2, api.PodRunning, labelMap, rs, "pod") newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rs, "pod")
// update the RS so that its selector no longer matches the pods // update the RS so that its selector no longer matches the pods
updatedRS := *rs updatedRS := *rs
updatedRS.Spec.Selector.MatchLabels = map[string]string{"foo": "baz"} updatedRS.Spec.Selector.MatchLabels = map[string]string{"foo": "baz"}
@@ -1311,7 +1311,7 @@ func TestUpdateSelectorControllerRef(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
// expect 2 patches to be sent to remove the controllerRef for the pods. // expect 2 patches to be sent to remove the controllerRef for the pods.
// expect 2 creates because the rc.Spec.Replicas=2 and there exists no // expect 2 creates because the *(rc.Spec.Replicas)=2 and there exists no
// matching pod. // matching pod.
validateSyncReplicaSet(t, fakePodControl, 2, 0, 2) validateSyncReplicaSet(t, fakePodControl, 2, 0, 2)
fakePodControl.Clear() fakePodControl.Clear()
@@ -1328,7 +1328,7 @@ func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
now := unversioned.Now() now := unversioned.Now()
rs.DeletionTimestamp = &now rs.DeletionTimestamp = &now
manager.rsLister.Indexer.Add(rs) manager.rsLister.Indexer.Add(rs)
pod1 := newPod("pod1", rs, api.PodRunning, nil) pod1 := newPod("pod1", rs, v1.PodRunning, nil)
manager.podLister.Indexer.Add(pod1) manager.podLister.Indexer.Add(pod1)
// no patch, no create // no patch, no create
@@ -1349,7 +1349,7 @@ func TestReadyReplicas(t *testing.T) {
testServer := httptest.NewServer(&fakeHandler) testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close() defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
stopCh := make(chan struct{}) stopCh := make(chan struct{})
defer close(stopCh) defer close(stopCh)
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
@@ -1362,8 +1362,8 @@ func TestReadyReplicas(t *testing.T) {
rs.Generation = 1 rs.Generation = 1
manager.rsLister.Indexer.Add(rs) manager.rsLister.Indexer.Add(rs)
newPodList(manager.podLister.Indexer, 2, api.PodPending, labelMap, rs, "pod") newPodList(manager.podLister.Indexer, 2, v1.PodPending, labelMap, rs, "pod")
newPodList(manager.podLister.Indexer, 2, api.PodRunning, labelMap, rs, "pod") newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rs, "pod")
// This response body is just so we don't err out decoding the http response // This response body is just so we don't err out decoding the http response
response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{}) response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{})
@@ -1392,7 +1392,7 @@ func TestAvailableReplicas(t *testing.T) {
testServer := httptest.NewServer(&fakeHandler) testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close() defer testServer.Close()
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
stopCh := make(chan struct{}) stopCh := make(chan struct{})
defer close(stopCh) defer close(stopCh)
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0) manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
@@ -1409,12 +1409,12 @@ func TestAvailableReplicas(t *testing.T) {
// First pod becomes ready 20s ago // First pod becomes ready 20s ago
moment := unversioned.Time{Time: time.Now().Add(-2e10)} moment := unversioned.Time{Time: time.Now().Add(-2e10)}
pod := newPod("pod", rs, api.PodRunning, &moment) pod := newPod("pod", rs, v1.PodRunning, &moment)
manager.podLister.Indexer.Add(pod) manager.podLister.Indexer.Add(pod)
// Second pod becomes ready now // Second pod becomes ready now
otherMoment := unversioned.Now() otherMoment := unversioned.Now()
otherPod := newPod("otherPod", rs, api.PodRunning, &otherMoment) otherPod := newPod("otherPod", rs, v1.PodRunning, &otherMoment)
manager.podLister.Indexer.Add(otherPod) manager.podLister.Indexer.Add(otherPod)
// This response body is just so we don't err out decoding the http response // This response body is just so we don't err out decoding the http response
@@ -1440,7 +1440,7 @@ var (
condImagePullBackOff = func() extensions.ReplicaSetCondition { condImagePullBackOff = func() extensions.ReplicaSetCondition {
return extensions.ReplicaSetCondition{ return extensions.ReplicaSetCondition{
Type: imagePullBackOff, Type: imagePullBackOff,
Status: api.ConditionTrue, Status: v1.ConditionTrue,
Reason: "NonExistentImage", Reason: "NonExistentImage",
} }
} }
@@ -1448,7 +1448,7 @@ var (
condReplicaFailure = func() extensions.ReplicaSetCondition { condReplicaFailure = func() extensions.ReplicaSetCondition {
return extensions.ReplicaSetCondition{ return extensions.ReplicaSetCondition{
Type: extensions.ReplicaSetReplicaFailure, Type: extensions.ReplicaSetReplicaFailure,
Status: api.ConditionTrue, Status: v1.ConditionTrue,
Reason: "OtherFailure", Reason: "OtherFailure",
} }
} }
@@ -1456,7 +1456,7 @@ var (
condReplicaFailure2 = func() extensions.ReplicaSetCondition { condReplicaFailure2 = func() extensions.ReplicaSetCondition {
return extensions.ReplicaSetCondition{ return extensions.ReplicaSetCondition{
Type: extensions.ReplicaSetReplicaFailure, Type: extensions.ReplicaSetReplicaFailure,
Status: api.ConditionTrue, Status: v1.ConditionTrue,
Reason: "AnotherFailure", Reason: "AnotherFailure",
} }
} }
@@ -1476,7 +1476,7 @@ func TestGetCondition(t *testing.T) {
status extensions.ReplicaSetStatus status extensions.ReplicaSetStatus
condType extensions.ReplicaSetConditionType condType extensions.ReplicaSetConditionType
condStatus api.ConditionStatus condStatus v1.ConditionStatus
condReason string condReason string
expected bool expected bool

View File

@@ -26,8 +26,9 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/api/v1"
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion" extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/extensions/v1beta1"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
) )
@@ -62,7 +63,7 @@ func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs exte
var getErr error var getErr error
for i, rs := 0, &rs; ; i++ { for i, rs := 0, &rs; ; i++ {
glog.V(4).Infof(fmt.Sprintf("Updating replica count for ReplicaSet: %s/%s, ", rs.Namespace, rs.Name) + glog.V(4).Infof(fmt.Sprintf("Updating replica count for ReplicaSet: %s/%s, ", rs.Namespace, rs.Name) +
fmt.Sprintf("replicas %d->%d (need %d), ", rs.Status.Replicas, newStatus.Replicas, rs.Spec.Replicas) + fmt.Sprintf("replicas %d->%d (need %d), ", rs.Status.Replicas, newStatus.Replicas, *(rs.Spec.Replicas)) +
fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rs.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) + fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rs.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) +
fmt.Sprintf("readyReplicas %d->%d, ", rs.Status.ReadyReplicas, newStatus.ReadyReplicas) + fmt.Sprintf("readyReplicas %d->%d, ", rs.Status.ReadyReplicas, newStatus.ReadyReplicas) +
fmt.Sprintf("availableReplicas %d->%d, ", rs.Status.AvailableReplicas, newStatus.AvailableReplicas) + fmt.Sprintf("availableReplicas %d->%d, ", rs.Status.AvailableReplicas, newStatus.AvailableReplicas) +
@@ -95,7 +96,7 @@ func (o overlappingReplicaSets) Less(i, j int) bool {
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp) return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
} }
func calculateStatus(rs extensions.ReplicaSet, filteredPods []*api.Pod, manageReplicasErr error) extensions.ReplicaSetStatus { func calculateStatus(rs extensions.ReplicaSet, filteredPods []*v1.Pod, manageReplicasErr error) extensions.ReplicaSetStatus {
newStatus := rs.Status newStatus := rs.Status
// Count the number of pods that have labels matching the labels of the pod // Count the number of pods that have labels matching the labels of the pod
// template of the replica set, the matching pods may have more // template of the replica set, the matching pods may have more
@@ -110,9 +111,9 @@ func calculateStatus(rs extensions.ReplicaSet, filteredPods []*api.Pod, manageRe
if templateLabel.Matches(labels.Set(pod.Labels)) { if templateLabel.Matches(labels.Set(pod.Labels)) {
fullyLabeledReplicasCount++ fullyLabeledReplicasCount++
} }
if api.IsPodReady(pod) { if v1.IsPodReady(pod) {
readyReplicasCount++ readyReplicasCount++
if api.IsPodAvailable(pod, rs.Spec.MinReadySeconds, unversioned.Now()) { if v1.IsPodAvailable(pod, rs.Spec.MinReadySeconds, unversioned.Now()) {
availableReplicasCount++ availableReplicasCount++
} }
} }
@@ -121,12 +122,12 @@ func calculateStatus(rs extensions.ReplicaSet, filteredPods []*api.Pod, manageRe
failureCond := GetCondition(rs.Status, extensions.ReplicaSetReplicaFailure) failureCond := GetCondition(rs.Status, extensions.ReplicaSetReplicaFailure)
if manageReplicasErr != nil && failureCond == nil { if manageReplicasErr != nil && failureCond == nil {
var reason string var reason string
if diff := len(filteredPods) - int(rs.Spec.Replicas); diff < 0 { if diff := len(filteredPods) - int(*(rs.Spec.Replicas)); diff < 0 {
reason = "FailedCreate" reason = "FailedCreate"
} else if diff > 0 { } else if diff > 0 {
reason = "FailedDelete" reason = "FailedDelete"
} }
cond := NewReplicaSetCondition(extensions.ReplicaSetReplicaFailure, api.ConditionTrue, reason, manageReplicasErr.Error()) cond := NewReplicaSetCondition(extensions.ReplicaSetReplicaFailure, v1.ConditionTrue, reason, manageReplicasErr.Error())
SetCondition(&newStatus, cond) SetCondition(&newStatus, cond)
} else if manageReplicasErr == nil && failureCond != nil { } else if manageReplicasErr == nil && failureCond != nil {
RemoveCondition(&newStatus, extensions.ReplicaSetReplicaFailure) RemoveCondition(&newStatus, extensions.ReplicaSetReplicaFailure)
@@ -140,7 +141,7 @@ func calculateStatus(rs extensions.ReplicaSet, filteredPods []*api.Pod, manageRe
} }
// NewReplicaSetCondition creates a new replica set condition. // NewReplicaSetCondition creates a new replica set condition.
func NewReplicaSetCondition(condType extensions.ReplicaSetConditionType, status api.ConditionStatus, reason, msg string) extensions.ReplicaSetCondition { func NewReplicaSetCondition(condType extensions.ReplicaSetConditionType, status v1.ConditionStatus, reason, msg string) extensions.ReplicaSetCondition {
return extensions.ReplicaSetCondition{ return extensions.ReplicaSetCondition{
Type: condType, Type: condType,
Status: status, Status: status,

View File

@@ -25,13 +25,12 @@ import (
"time" "time"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/controller/informers"
@@ -122,9 +121,9 @@ type ReplicationManager struct {
func NewReplicationManager(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager { func NewReplicationManager(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")}) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
return newReplicationManager( return newReplicationManager(
eventBroadcaster.NewRecorder(api.EventSource{Component: "replication-controller"}), eventBroadcaster.NewRecorder(v1.EventSource{Component: "replication-controller"}),
podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize, garbageCollectorEnabled) podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize, garbageCollectorEnabled)
} }
@@ -148,14 +147,14 @@ func newReplicationManager(eventRecorder record.EventRecorder, podInformer cache
rm.rcStore.Indexer, rm.rcController = cache.NewIndexerInformer( rm.rcStore.Indexer, rm.rcController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options) return rm.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).Watch(options) return rm.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).Watch(options)
}, },
}, },
&api.ReplicationController{}, &v1.ReplicationController{},
// TODO: Can we have much longer period here? // TODO: Can we have much longer period here?
FullControllerResyncPeriod, FullControllerResyncPeriod,
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
@@ -235,10 +234,10 @@ func (rm *ReplicationManager) Run(workers int, stopCh <-chan struct{}) {
// getPodController returns the controller managing the given pod. // getPodController returns the controller managing the given pod.
// TODO: Surface that we are ignoring multiple controllers for a single pod. // TODO: Surface that we are ignoring multiple controllers for a single pod.
// TODO: use ownerReference.Controller to determine if the rc controls the pod. // TODO: use ownerReference.Controller to determine if the rc controls the pod.
func (rm *ReplicationManager) getPodController(pod *api.Pod) *api.ReplicationController { func (rm *ReplicationManager) getPodController(pod *v1.Pod) *v1.ReplicationController {
// look up in the cache, if cached and the cache is valid, just return cached value // look up in the cache, if cached and the cache is valid, just return cached value
if obj, cached := rm.lookupCache.GetMatchingObject(pod); cached { if obj, cached := rm.lookupCache.GetMatchingObject(pod); cached {
controller, ok := obj.(*api.ReplicationController) controller, ok := obj.(*v1.ReplicationController)
if !ok { if !ok {
// This should not happen // This should not happen
glog.Errorf("lookup cache does not return a ReplicationController object") glog.Errorf("lookup cache does not return a ReplicationController object")
@@ -275,7 +274,7 @@ func (rm *ReplicationManager) getPodController(pod *api.Pod) *api.ReplicationCon
} }
// isCacheValid check if the cache is valid // isCacheValid check if the cache is valid
func (rm *ReplicationManager) isCacheValid(pod *api.Pod, cachedRC *api.ReplicationController) bool { func (rm *ReplicationManager) isCacheValid(pod *v1.Pod, cachedRC *v1.ReplicationController) bool {
_, err := rm.rcStore.ReplicationControllers(cachedRC.Namespace).Get(cachedRC.Name) _, err := rm.rcStore.ReplicationControllers(cachedRC.Namespace).Get(cachedRC.Name)
// rc has been deleted or updated, cache is invalid // rc has been deleted or updated, cache is invalid
if err != nil || !isControllerMatch(pod, cachedRC) { if err != nil || !isControllerMatch(pod, cachedRC) {
@@ -286,7 +285,7 @@ func (rm *ReplicationManager) isCacheValid(pod *api.Pod, cachedRC *api.Replicati
// isControllerMatch take a Pod and ReplicationController, return whether the Pod and ReplicationController are matching // isControllerMatch take a Pod and ReplicationController, return whether the Pod and ReplicationController are matching
// TODO(mqliang): This logic is a copy from GetPodControllers(), remove the duplication // TODO(mqliang): This logic is a copy from GetPodControllers(), remove the duplication
func isControllerMatch(pod *api.Pod, rc *api.ReplicationController) bool { func isControllerMatch(pod *v1.Pod, rc *v1.ReplicationController) bool {
if rc.Namespace != pod.Namespace { if rc.Namespace != pod.Namespace {
return false return false
} }
@@ -301,8 +300,8 @@ func isControllerMatch(pod *api.Pod, rc *api.ReplicationController) bool {
// callback when RC is updated // callback when RC is updated
func (rm *ReplicationManager) updateRC(old, cur interface{}) { func (rm *ReplicationManager) updateRC(old, cur interface{}) {
oldRC := old.(*api.ReplicationController) oldRC := old.(*v1.ReplicationController)
curRC := cur.(*api.ReplicationController) curRC := cur.(*v1.ReplicationController)
// We should invalidate the whole lookup cache if a RC's selector has been updated. // We should invalidate the whole lookup cache if a RC's selector has been updated.
// //
@@ -319,7 +318,7 @@ func (rm *ReplicationManager) updateRC(old, cur interface{}) {
rm.lookupCache.InvalidateAll() rm.lookupCache.InvalidateAll()
} }
// TODO: Remove when #31981 is resolved! // TODO: Remove when #31981 is resolved!
glog.Infof("Observed updated replication controller %v. Desired pod count change: %d->%d", curRC.Name, oldRC.Spec.Replicas, curRC.Spec.Replicas) glog.Infof("Observed updated replication controller %v. Desired pod count change: %d->%d", curRC.Name, *(oldRC.Spec.Replicas), *(curRC.Spec.Replicas))
// You might imagine that we only really need to enqueue the // You might imagine that we only really need to enqueue the
// controller when Spec changes, but it is safer to sync any // controller when Spec changes, but it is safer to sync any
@@ -342,7 +341,7 @@ func (rm *ReplicationManager) updateRC(old, cur interface{}) {
// When a pod is created, enqueue the controller that manages it and update it's expectations. // When a pod is created, enqueue the controller that manages it and update it's expectations.
func (rm *ReplicationManager) addPod(obj interface{}) { func (rm *ReplicationManager) addPod(obj interface{}) {
pod := obj.(*api.Pod) pod := obj.(*v1.Pod)
rc := rm.getPodController(pod) rc := rm.getPodController(pod)
if rc == nil { if rc == nil {
@@ -366,10 +365,10 @@ func (rm *ReplicationManager) addPod(obj interface{}) {
// When a pod is updated, figure out what controller/s manage it and wake them // When a pod is updated, figure out what controller/s manage it and wake them
// up. If the labels of the pod have changed we need to awaken both the old // up. If the labels of the pod have changed we need to awaken both the old
// and new controller. old and cur must be *api.Pod types. // and new controller. old and cur must be *v1.Pod types.
func (rm *ReplicationManager) updatePod(old, cur interface{}) { func (rm *ReplicationManager) updatePod(old, cur interface{}) {
curPod := cur.(*api.Pod) curPod := cur.(*v1.Pod)
oldPod := old.(*api.Pod) oldPod := old.(*v1.Pod)
if curPod.ResourceVersion == oldPod.ResourceVersion { if curPod.ResourceVersion == oldPod.ResourceVersion {
// Periodic resync will send update events for all known pods. // Periodic resync will send update events for all known pods.
// Two different versions of the same pod will always have different RVs. // Two different versions of the same pod will always have different RVs.
@@ -407,9 +406,9 @@ func (rm *ReplicationManager) updatePod(old, cur interface{}) {
} }
// When a pod is deleted, enqueue the controller that manages the pod and update its expectations. // When a pod is deleted, enqueue the controller that manages the pod and update its expectations.
// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item. // obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
func (rm *ReplicationManager) deletePod(obj interface{}) { func (rm *ReplicationManager) deletePod(obj interface{}) {
pod, ok := obj.(*api.Pod) pod, ok := obj.(*v1.Pod)
// When a delete is dropped, the relist will notice a pod in the store not // When a delete is dropped, the relist will notice a pod in the store not
// in the list, leading to the insertion of a tombstone object which contains // in the list, leading to the insertion of a tombstone object which contains
@@ -421,7 +420,7 @@ func (rm *ReplicationManager) deletePod(obj interface{}) {
glog.Errorf("Couldn't get object from tombstone %#v", obj) glog.Errorf("Couldn't get object from tombstone %#v", obj)
return return
} }
pod, ok = tombstone.Obj.(*api.Pod) pod, ok = tombstone.Obj.(*v1.Pod)
if !ok { if !ok {
glog.Errorf("Tombstone contained object that is not a pod %#v", obj) glog.Errorf("Tombstone contained object that is not a pod %#v", obj)
return return
@@ -439,7 +438,7 @@ func (rm *ReplicationManager) deletePod(obj interface{}) {
} }
} }
// obj could be an *api.ReplicationController, or a DeletionFinalStateUnknown marker item. // obj could be an *v1.ReplicationController, or a DeletionFinalStateUnknown marker item.
func (rm *ReplicationManager) enqueueController(obj interface{}) { func (rm *ReplicationManager) enqueueController(obj interface{}) {
key, err := controller.KeyFunc(obj) key, err := controller.KeyFunc(obj)
if err != nil { if err != nil {
@@ -486,8 +485,8 @@ func (rm *ReplicationManager) worker() {
// manageReplicas checks and updates replicas for the given replication controller. // manageReplicas checks and updates replicas for the given replication controller.
// Does NOT modify <filteredPods>. // Does NOT modify <filteredPods>.
func (rm *ReplicationManager) manageReplicas(filteredPods []*api.Pod, rc *api.ReplicationController) error { func (rm *ReplicationManager) manageReplicas(filteredPods []*v1.Pod, rc *v1.ReplicationController) error {
diff := len(filteredPods) - int(rc.Spec.Replicas) diff := len(filteredPods) - int(*(rc.Spec.Replicas))
rcKey, err := controller.KeyFunc(rc) rcKey, err := controller.KeyFunc(rc)
if err != nil { if err != nil {
return err return err
@@ -510,14 +509,14 @@ func (rm *ReplicationManager) manageReplicas(filteredPods []*api.Pod, rc *api.Re
rm.expectations.ExpectCreations(rcKey, diff) rm.expectations.ExpectCreations(rcKey, diff)
var wg sync.WaitGroup var wg sync.WaitGroup
wg.Add(diff) wg.Add(diff)
glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rc.Namespace, rc.Name, rc.Spec.Replicas, diff) glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rc.Namespace, rc.Name, *(rc.Spec.Replicas), diff)
for i := 0; i < diff; i++ { for i := 0; i < diff; i++ {
go func() { go func() {
defer wg.Done() defer wg.Done()
var err error var err error
if rm.garbageCollectorEnabled { if rm.garbageCollectorEnabled {
var trueVar = true var trueVar = true
controllerRef := &api.OwnerReference{ controllerRef := &v1.OwnerReference{
APIVersion: getRCKind().GroupVersion().String(), APIVersion: getRCKind().GroupVersion().String(),
Kind: getRCKind().Kind, Kind: getRCKind().Kind,
Name: rc.Name, Name: rc.Name,
@@ -554,9 +553,9 @@ func (rm *ReplicationManager) manageReplicas(filteredPods []*api.Pod, rc *api.Re
if diff > rm.burstReplicas { if diff > rm.burstReplicas {
diff = rm.burstReplicas diff = rm.burstReplicas
} }
glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rc.Namespace, rc.Name, rc.Spec.Replicas, diff) glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rc.Namespace, rc.Name, *(rc.Spec.Replicas), diff)
// No need to sort pods if we are about to delete all of them // No need to sort pods if we are about to delete all of them
if rc.Spec.Replicas != 0 { if *(rc.Spec.Replicas) != 0 {
// Sort the pods in the order such that not-ready < ready, unscheduled // Sort the pods in the order such that not-ready < ready, unscheduled
// < scheduled, and pending < running. This ensures that we delete pods // < scheduled, and pending < running. This ensures that we delete pods
// in the earlier stages whenever possible. // in the earlier stages whenever possible.
@@ -636,7 +635,7 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
if err != nil { if err != nil {
return err return err
} }
rc := *obj.(*api.ReplicationController) rc := *obj.(*v1.ReplicationController)
// Check the expectations of the rc before counting active pods, otherwise a new pod can sneak in // Check the expectations of the rc before counting active pods, otherwise a new pod can sneak in
// and update the expectations after we've retrieved active pods from the store. If a new pod enters // and update the expectations after we've retrieved active pods from the store. If a new pod enters
@@ -653,7 +652,7 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
// NOTE: filteredPods are pointing to objects from cache - if you need to // NOTE: filteredPods are pointing to objects from cache - if you need to
// modify them, you need to copy it first. // modify them, you need to copy it first.
// TODO: Do the List and Filter in a single pass, or use an index. // TODO: Do the List and Filter in a single pass, or use an index.
var filteredPods []*api.Pod var filteredPods []*v1.Pod
if rm.garbageCollectorEnabled { if rm.garbageCollectorEnabled {
// list all pods to include the pods that don't match the rc's selector // list all pods to include the pods that don't match the rc's selector
// anymore but has the stale controller ref. // anymore but has the stale controller ref.

View File

@@ -27,14 +27,14 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/apimachinery/registered"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
fakeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" fakeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/client/restclient"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
@@ -49,7 +49,7 @@ import (
var alwaysReady = func() bool { return true } var alwaysReady = func() bool { return true }
func getKey(rc *api.ReplicationController, t *testing.T) string { func getKey(rc *v1.ReplicationController, t *testing.T) string {
if key, err := controller.KeyFunc(rc); err != nil { if key, err := controller.KeyFunc(rc); err != nil {
t.Errorf("Unexpected error getting key for rc %v: %v", rc.Name, err) t.Errorf("Unexpected error getting key for rc %v: %v", rc.Name, err)
return "" return ""
@@ -58,36 +58,36 @@ func getKey(rc *api.ReplicationController, t *testing.T) string {
} }
} }
func newReplicationController(replicas int) *api.ReplicationController { func newReplicationController(replicas int) *v1.ReplicationController {
rc := &api.ReplicationController{ rc := &v1.ReplicationController{
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()}, TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
UID: uuid.NewUUID(), UID: uuid.NewUUID(),
Name: "foobar", Name: "foobar",
Namespace: api.NamespaceDefault, Namespace: v1.NamespaceDefault,
ResourceVersion: "18", ResourceVersion: "18",
}, },
Spec: api.ReplicationControllerSpec{ Spec: v1.ReplicationControllerSpec{
Replicas: int32(replicas), Replicas: func() *int32 { i := int32(replicas); return &i }(),
Selector: map[string]string{"foo": "bar"}, Selector: map[string]string{"foo": "bar"},
Template: &api.PodTemplateSpec{ Template: &v1.PodTemplateSpec{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Labels: map[string]string{ Labels: map[string]string{
"name": "foo", "name": "foo",
"type": "production", "type": "production",
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Image: "foo/bar", Image: "foo/bar",
TerminationMessagePath: api.TerminationMessagePathDefault, TerminationMessagePath: v1.TerminationMessagePathDefault,
ImagePullPolicy: api.PullIfNotPresent, ImagePullPolicy: v1.PullIfNotPresent,
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(), SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
}, },
}, },
RestartPolicy: api.RestartPolicyAlways, RestartPolicy: v1.RestartPolicyAlways,
DNSPolicy: api.DNSDefault, DNSPolicy: v1.DNSDefault,
NodeSelector: map[string]string{ NodeSelector: map[string]string{
"baz": "blah", "baz": "blah",
}, },
@@ -99,39 +99,39 @@ func newReplicationController(replicas int) *api.ReplicationController {
} }
// create a pod with the given phase for the given rc (same selectors and namespace). // create a pod with the given phase for the given rc (same selectors and namespace).
func newPod(name string, rc *api.ReplicationController, status api.PodPhase, lastTransitionTime *unversioned.Time) *api.Pod { func newPod(name string, rc *v1.ReplicationController, status v1.PodPhase, lastTransitionTime *unversioned.Time) *v1.Pod {
var conditions []api.PodCondition var conditions []v1.PodCondition
if status == api.PodRunning { if status == v1.PodRunning {
condition := api.PodCondition{Type: api.PodReady, Status: api.ConditionTrue} condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
if lastTransitionTime != nil { if lastTransitionTime != nil {
condition.LastTransitionTime = *lastTransitionTime condition.LastTransitionTime = *lastTransitionTime
} }
conditions = append(conditions, condition) conditions = append(conditions, condition)
} }
return &api.Pod{ return &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Labels: rc.Spec.Selector, Labels: rc.Spec.Selector,
Namespace: rc.Namespace, Namespace: rc.Namespace,
}, },
Status: api.PodStatus{Phase: status, Conditions: conditions}, Status: v1.PodStatus{Phase: status, Conditions: conditions},
} }
} }
// create count pods with the given phase for the given rc (same selectors and namespace), and add them to the store. // create count pods with the given phase for the given rc (same selectors and namespace), and add them to the store.
func newPodList(store cache.Store, count int, status api.PodPhase, rc *api.ReplicationController, name string) *api.PodList { func newPodList(store cache.Store, count int, status v1.PodPhase, rc *v1.ReplicationController, name string) *v1.PodList {
pods := []api.Pod{} pods := []v1.Pod{}
var trueVar = true var trueVar = true
controllerReference := api.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name, Controller: &trueVar} controllerReference := v1.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name, Controller: &trueVar}
for i := 0; i < count; i++ { for i := 0; i < count; i++ {
pod := newPod(fmt.Sprintf("%s%d", name, i), rc, status, nil) pod := newPod(fmt.Sprintf("%s%d", name, i), rc, status, nil)
pod.OwnerReferences = []api.OwnerReference{controllerReference} pod.OwnerReferences = []v1.OwnerReference{controllerReference}
if store != nil { if store != nil {
store.Add(pod) store.Add(pod)
} }
pods = append(pods, *pod) pods = append(pods, *pod)
} }
return &api.PodList{ return &v1.PodList{
Items: pods, Items: pods,
} }
} }
@@ -158,7 +158,7 @@ type serverResponse struct {
} }
func TestSyncReplicationControllerDoesNothing(t *testing.T) { func TestSyncReplicationControllerDoesNothing(t *testing.T) {
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
@@ -166,7 +166,7 @@ func TestSyncReplicationControllerDoesNothing(t *testing.T) {
// 2 running pods, a controller with 2 replicas, sync is a no-op // 2 running pods, a controller with 2 replicas, sync is a no-op
controllerSpec := newReplicationController(2) controllerSpec := newReplicationController(2)
manager.rcStore.Indexer.Add(controllerSpec) manager.rcStore.Indexer.Add(controllerSpec)
newPodList(manager.podStore.Indexer, 2, api.PodRunning, controllerSpec, "pod") newPodList(manager.podStore.Indexer, 2, v1.PodRunning, controllerSpec, "pod")
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
manager.syncReplicationController(getKey(controllerSpec, t)) manager.syncReplicationController(getKey(controllerSpec, t))
@@ -174,7 +174,7 @@ func TestSyncReplicationControllerDoesNothing(t *testing.T) {
} }
func TestSyncReplicationControllerDeletes(t *testing.T) { func TestSyncReplicationControllerDeletes(t *testing.T) {
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
@@ -183,14 +183,14 @@ func TestSyncReplicationControllerDeletes(t *testing.T) {
// 2 running pods and a controller with 1 replica, one pod delete expected // 2 running pods and a controller with 1 replica, one pod delete expected
controllerSpec := newReplicationController(1) controllerSpec := newReplicationController(1)
manager.rcStore.Indexer.Add(controllerSpec) manager.rcStore.Indexer.Add(controllerSpec)
newPodList(manager.podStore.Indexer, 2, api.PodRunning, controllerSpec, "pod") newPodList(manager.podStore.Indexer, 2, v1.PodRunning, controllerSpec, "pod")
manager.syncReplicationController(getKey(controllerSpec, t)) manager.syncReplicationController(getKey(controllerSpec, t))
validateSyncReplication(t, &fakePodControl, 0, 1, 0) validateSyncReplication(t, &fakePodControl, 0, 1, 0)
} }
func TestDeleteFinalStateUnknown(t *testing.T) { func TestDeleteFinalStateUnknown(t *testing.T) {
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
@@ -206,7 +206,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
// the controller matching the selectors of the deleted pod into the work queue. // the controller matching the selectors of the deleted pod into the work queue.
controllerSpec := newReplicationController(1) controllerSpec := newReplicationController(1)
manager.rcStore.Indexer.Add(controllerSpec) manager.rcStore.Indexer.Add(controllerSpec)
pods := newPodList(nil, 1, api.PodRunning, controllerSpec, "pod") pods := newPodList(nil, 1, v1.PodRunning, controllerSpec, "pod")
manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]}) manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})
go manager.worker() go manager.worker()
@@ -223,7 +223,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
} }
func TestSyncReplicationControllerCreates(t *testing.T) { func TestSyncReplicationControllerCreates(t *testing.T) {
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
@@ -245,7 +245,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
} }
testServer := httptest.NewServer(&fakeHandler) testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close() defer testServer.Close()
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
@@ -253,8 +253,8 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
activePods := 5 activePods := 5
rc := newReplicationController(activePods) rc := newReplicationController(activePods)
manager.rcStore.Indexer.Add(rc) manager.rcStore.Indexer.Add(rc)
rc.Status = api.ReplicationControllerStatus{Replicas: int32(activePods), ReadyReplicas: int32(activePods), AvailableReplicas: int32(activePods)} rc.Status = v1.ReplicationControllerStatus{Replicas: int32(activePods), ReadyReplicas: int32(activePods), AvailableReplicas: int32(activePods)}
newPodList(manager.podStore.Indexer, activePods, api.PodRunning, rc, "pod") newPodList(manager.podStore.Indexer, activePods, v1.PodRunning, rc, "pod")
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@@ -267,7 +267,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
// This response body is just so we don't err out decoding the http response, all // This response body is just so we don't err out decoding the http response, all
// we care about is the request body sent below. // we care about is the request body sent below.
response := runtime.EncodeOrDie(testapi.Default.Codec(), &api.ReplicationController{}) response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{})
fakeHandler.ResponseBody = response fakeHandler.ResponseBody = response
rc.Generation = rc.Generation + 1 rc.Generation = rc.Generation + 1
@@ -286,7 +286,7 @@ func TestControllerUpdateReplicas(t *testing.T) {
} }
testServer := httptest.NewServer(&fakeHandler) testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close() defer testServer.Close()
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
@@ -294,16 +294,16 @@ func TestControllerUpdateReplicas(t *testing.T) {
// Status.Replica should update to match number of pods in system, 1 new pod should be created. // Status.Replica should update to match number of pods in system, 1 new pod should be created.
rc := newReplicationController(5) rc := newReplicationController(5)
manager.rcStore.Indexer.Add(rc) manager.rcStore.Indexer.Add(rc)
rc.Status = api.ReplicationControllerStatus{Replicas: 2, FullyLabeledReplicas: 6, ReadyReplicas: 2, AvailableReplicas: 2, ObservedGeneration: 0} rc.Status = v1.ReplicationControllerStatus{Replicas: 2, FullyLabeledReplicas: 6, ReadyReplicas: 2, AvailableReplicas: 2, ObservedGeneration: 0}
rc.Generation = 1 rc.Generation = 1
newPodList(manager.podStore.Indexer, 2, api.PodRunning, rc, "pod") newPodList(manager.podStore.Indexer, 2, v1.PodRunning, rc, "pod")
rcCopy := *rc rcCopy := *rc
extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"} extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"}
rcCopy.Spec.Selector = extraLabelMap rcCopy.Spec.Selector = extraLabelMap
newPodList(manager.podStore.Indexer, 2, api.PodRunning, &rcCopy, "podWithExtraLabel") newPodList(manager.podStore.Indexer, 2, v1.PodRunning, &rcCopy, "podWithExtraLabel")
// This response body is just so we don't err out decoding the http response // This response body is just so we don't err out decoding the http response
response := runtime.EncodeOrDie(testapi.Default.Codec(), &api.ReplicationController{}) response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{})
fakeHandler.ResponseBody = response fakeHandler.ResponseBody = response
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
@@ -315,7 +315,7 @@ func TestControllerUpdateReplicas(t *testing.T) {
// 2. Status.FullyLabeledReplicas should equal to the number of pods that // 2. Status.FullyLabeledReplicas should equal to the number of pods that
// has the extra labels, i.e., 2. // has the extra labels, i.e., 2.
// 3. Every update to the status should include the Generation of the spec. // 3. Every update to the status should include the Generation of the spec.
rc.Status = api.ReplicationControllerStatus{Replicas: 4, ReadyReplicas: 4, AvailableReplicas: 4, ObservedGeneration: 1} rc.Status = v1.ReplicationControllerStatus{Replicas: 4, ReadyReplicas: 4, AvailableReplicas: 4, ObservedGeneration: 1}
decRc := runtime.EncodeOrDie(testapi.Default.Codec(), rc) decRc := runtime.EncodeOrDie(testapi.Default.Codec(), rc)
fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name)+"/status", "PUT", &decRc) fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name)+"/status", "PUT", &decRc)
@@ -331,7 +331,7 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
} }
testServer := httptest.NewServer(&fakeHandler) testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close() defer testServer.Close()
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
@@ -339,7 +339,7 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
controllerSpec := newReplicationController(2) controllerSpec := newReplicationController(2)
manager.rcStore.Indexer.Add(controllerSpec) manager.rcStore.Indexer.Add(controllerSpec)
newPodList(manager.podStore.Indexer, 1, api.PodRunning, controllerSpec, "pod") newPodList(manager.podStore.Indexer, 1, v1.PodRunning, controllerSpec, "pod")
// Creates a replica and sets expectations // Creates a replica and sets expectations
controllerSpec.Status.Replicas = 1 controllerSpec.Status.Replicas = 1
@@ -386,47 +386,47 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
} }
func TestPodControllerLookup(t *testing.T) { func TestPodControllerLookup(t *testing.T) {
manager := NewReplicationManagerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}), controller.NoResyncPeriodFunc, BurstReplicas, 0) manager := NewReplicationManagerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}}), controller.NoResyncPeriodFunc, BurstReplicas, 0)
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
testCases := []struct { testCases := []struct {
inRCs []*api.ReplicationController inRCs []*v1.ReplicationController
pod *api.Pod pod *v1.Pod
outRCName string outRCName string
}{ }{
// pods without labels don't match any rcs // pods without labels don't match any rcs
{ {
inRCs: []*api.ReplicationController{ inRCs: []*v1.ReplicationController{
{ObjectMeta: api.ObjectMeta{Name: "basic"}}}, {ObjectMeta: v1.ObjectMeta{Name: "basic"}}},
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo1", Namespace: api.NamespaceAll}}, pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo1", Namespace: v1.NamespaceAll}},
outRCName: "", outRCName: "",
}, },
// Matching labels, not namespace // Matching labels, not namespace
{ {
inRCs: []*api.ReplicationController{ inRCs: []*v1.ReplicationController{
{ {
ObjectMeta: api.ObjectMeta{Name: "foo"}, ObjectMeta: v1.ObjectMeta{Name: "foo"},
Spec: api.ReplicationControllerSpec{ Spec: v1.ReplicationControllerSpec{
Selector: map[string]string{"foo": "bar"}, Selector: map[string]string{"foo": "bar"},
}, },
}, },
}, },
pod: &api.Pod{ pod: &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo2", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}}, Name: "foo2", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
outRCName: "", outRCName: "",
}, },
// Matching ns and labels returns the key to the rc, not the rc name // Matching ns and labels returns the key to the rc, not the rc name
{ {
inRCs: []*api.ReplicationController{ inRCs: []*v1.ReplicationController{
{ {
ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"}, ObjectMeta: v1.ObjectMeta{Name: "bar", Namespace: "ns"},
Spec: api.ReplicationControllerSpec{ Spec: v1.ReplicationControllerSpec{
Selector: map[string]string{"foo": "bar"}, Selector: map[string]string{"foo": "bar"},
}, },
}, },
}, },
pod: &api.Pod{ pod: &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "foo3", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}}, Name: "foo3", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
outRCName: "bar", outRCName: "bar",
}, },
@@ -452,7 +452,7 @@ func TestWatchControllers(t *testing.T) {
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
var testControllerSpec api.ReplicationController var testControllerSpec v1.ReplicationController
received := make(chan string) received := make(chan string)
// The update sent through the fakeWatcher should make its way into the workqueue, // The update sent through the fakeWatcher should make its way into the workqueue,
@@ -464,8 +464,8 @@ func TestWatchControllers(t *testing.T) {
if !exists || err != nil { if !exists || err != nil {
t.Errorf("Expected to find controller under key %v", key) t.Errorf("Expected to find controller under key %v", key)
} }
controllerSpec := *obj.(*api.ReplicationController) controllerSpec := *obj.(*v1.ReplicationController)
if !api.Semantic.DeepDerivative(controllerSpec, testControllerSpec) { if !v1.Semantic.DeepDerivative(controllerSpec, testControllerSpec) {
t.Errorf("Expected %#v, but got %#v", testControllerSpec, controllerSpec) t.Errorf("Expected %#v, but got %#v", testControllerSpec, controllerSpec)
} }
close(received) close(received)
@@ -507,8 +507,8 @@ func TestWatchPods(t *testing.T) {
if !exists || err != nil { if !exists || err != nil {
t.Errorf("Expected to find controller under key %v", key) t.Errorf("Expected to find controller under key %v", key)
} }
controllerSpec := obj.(*api.ReplicationController) controllerSpec := obj.(*v1.ReplicationController)
if !api.Semantic.DeepDerivative(controllerSpec, testControllerSpec) { if !v1.Semantic.DeepDerivative(controllerSpec, testControllerSpec) {
t.Errorf("\nExpected %#v,\nbut got %#v", testControllerSpec, controllerSpec) t.Errorf("\nExpected %#v,\nbut got %#v", testControllerSpec, controllerSpec)
} }
close(received) close(received)
@@ -522,9 +522,9 @@ func TestWatchPods(t *testing.T) {
go manager.internalPodInformer.Run(stopCh) go manager.internalPodInformer.Run(stopCh)
go wait.Until(manager.worker, 10*time.Millisecond, stopCh) go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
pods := newPodList(nil, 1, api.PodRunning, testControllerSpec, "pod") pods := newPodList(nil, 1, v1.PodRunning, testControllerSpec, "pod")
testPod := pods.Items[0] testPod := pods.Items[0]
testPod.Status.Phase = api.PodFailed testPod.Status.Phase = v1.PodFailed
fakeWatch.Add(&testPod) fakeWatch.Add(&testPod)
select { select {
@@ -545,7 +545,7 @@ func TestUpdatePods(t *testing.T) {
if !exists || err != nil { if !exists || err != nil {
t.Errorf("Expected to find controller under key %v", key) t.Errorf("Expected to find controller under key %v", key)
} }
received <- obj.(*api.ReplicationController).Name received <- obj.(*v1.ReplicationController).Name
return nil return nil
} }
@@ -564,7 +564,7 @@ func TestUpdatePods(t *testing.T) {
// case 1: We put in the podStore a pod with labels matching // case 1: We put in the podStore a pod with labels matching
// testControllerSpec1, then update its labels to match testControllerSpec2. // testControllerSpec1, then update its labels to match testControllerSpec2.
// We expect to receive a sync request for both controllers. // We expect to receive a sync request for both controllers.
pod1 := newPodList(manager.podStore.Indexer, 1, api.PodRunning, testControllerSpec1, "pod").Items[0] pod1 := newPodList(manager.podStore.Indexer, 1, v1.PodRunning, testControllerSpec1, "pod").Items[0]
pod1.ResourceVersion = "1" pod1.ResourceVersion = "1"
pod2 := pod1 pod2 := pod1
pod2.Labels = testControllerSpec2.Spec.Selector pod2.Labels = testControllerSpec2.Spec.Selector
@@ -612,14 +612,14 @@ func TestControllerUpdateRequeue(t *testing.T) {
testServer := httptest.NewServer(&fakeHandler) testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close() defer testServer.Close()
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
rc := newReplicationController(1) rc := newReplicationController(1)
manager.rcStore.Indexer.Add(rc) manager.rcStore.Indexer.Add(rc)
rc.Status = api.ReplicationControllerStatus{Replicas: 2} rc.Status = v1.ReplicationControllerStatus{Replicas: 2}
newPodList(manager.podStore.Indexer, 1, api.PodRunning, rc, "pod") newPodList(manager.podStore.Indexer, 1, v1.PodRunning, rc, "pod")
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@@ -640,11 +640,11 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
return true, rc, nil return true, rc, nil
}) })
c.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) { c.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
return true, &api.ReplicationController{}, fmt.Errorf("Fake error") return true, &v1.ReplicationController{}, fmt.Errorf("Fake error")
}) })
fakeRCClient := c.Core().ReplicationControllers("default") fakeRCClient := c.Core().ReplicationControllers("default")
numReplicas := int32(10) numReplicas := int32(10)
status := api.ReplicationControllerStatus{Replicas: numReplicas} status := v1.ReplicationControllerStatus{Replicas: numReplicas}
updateReplicationControllerStatus(fakeRCClient, *rc, status) updateReplicationControllerStatus(fakeRCClient, *rc, status)
updates, gets := 0, 0 updates, gets := 0, 0
for _, a := range c.Actions() { for _, a := range c.Actions() {
@@ -664,7 +664,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
updates++ updates++
// Confirm that the update has the right status.Replicas even though the Get // Confirm that the update has the right status.Replicas even though the Get
// returned an rc with replicas=1. // returned an rc with replicas=1.
if c, ok := action.GetObject().(*api.ReplicationController); !ok { if c, ok := action.GetObject().(*v1.ReplicationController); !ok {
t.Errorf("Expected an rc as the argument to update, got %T", c) t.Errorf("Expected an rc as the argument to update, got %T", c)
} else if c.Status.Replicas != numReplicas { } else if c.Status.Replicas != numReplicas {
t.Errorf("Expected update for rc to contain replicas %v, got %v instead", t.Errorf("Expected update for rc to contain replicas %v, got %v instead",
@@ -682,7 +682,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
// TODO: This test is too hairy for a unittest. It should be moved to an E2E suite. // TODO: This test is too hairy for a unittest. It should be moved to an E2E suite.
func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) { func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, burstReplicas, 0) manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, burstReplicas, 0)
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
@@ -692,7 +692,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
manager.rcStore.Indexer.Add(controllerSpec) manager.rcStore.Indexer.Add(controllerSpec)
expectedPods := 0 expectedPods := 0
pods := newPodList(nil, numReplicas, api.PodPending, controllerSpec, "pod") pods := newPodList(nil, numReplicas, v1.PodPending, controllerSpec, "pod")
rcKey, err := controller.KeyFunc(controllerSpec) rcKey, err := controller.KeyFunc(controllerSpec)
if err != nil { if err != nil {
@@ -702,7 +702,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
// Size up the controller, then size it down, and confirm the expected create/delete pattern // Size up the controller, then size it down, and confirm the expected create/delete pattern
for _, replicas := range []int{numReplicas, 0} { for _, replicas := range []int{numReplicas, 0} {
controllerSpec.Spec.Replicas = int32(replicas) *(controllerSpec.Spec.Replicas) = int32(replicas)
manager.rcStore.Indexer.Add(controllerSpec) manager.rcStore.Indexer.Add(controllerSpec)
for i := 0; i < numReplicas; i += burstReplicas { for i := 0; i < numReplicas; i += burstReplicas {
@@ -745,11 +745,11 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
// To accurately simulate a watch we must delete the exact pods // To accurately simulate a watch we must delete the exact pods
// the rc is waiting for. // the rc is waiting for.
expectedDels := manager.expectations.GetUIDs(getKey(controllerSpec, t)) expectedDels := manager.expectations.GetUIDs(getKey(controllerSpec, t))
podsToDelete := []*api.Pod{} podsToDelete := []*v1.Pod{}
for _, key := range expectedDels.List() { for _, key := range expectedDels.List() {
nsName := strings.Split(key, "/") nsName := strings.Split(key, "/")
podsToDelete = append(podsToDelete, &api.Pod{ podsToDelete = append(podsToDelete, &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: nsName[1], Name: nsName[1],
Namespace: nsName[0], Namespace: nsName[0],
Labels: controllerSpec.Spec.Selector, Labels: controllerSpec.Spec.Selector,
@@ -789,8 +789,8 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
t.Fatalf("Waiting on unexpected number of deletes.") t.Fatalf("Waiting on unexpected number of deletes.")
} }
nsName := strings.Split(expectedDel.List()[0], "/") nsName := strings.Split(expectedDel.List()[0], "/")
lastPod := &api.Pod{ lastPod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: nsName[1], Name: nsName[1],
Namespace: nsName[0], Namespace: nsName[0],
Labels: controllerSpec.Spec.Selector, Labels: controllerSpec.Spec.Selector,
@@ -804,11 +804,11 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
// Confirm that we've created the right number of replicas // Confirm that we've created the right number of replicas
activePods := int32(len(manager.podStore.Indexer.List())) activePods := int32(len(manager.podStore.Indexer.List()))
if activePods != controllerSpec.Spec.Replicas { if activePods != *(controllerSpec.Spec.Replicas) {
t.Fatalf("Unexpected number of active pods, expected %d, got %d", controllerSpec.Spec.Replicas, activePods) t.Fatalf("Unexpected number of active pods, expected %d, got %d", *(controllerSpec.Spec.Replicas), activePods)
} }
// Replenish the pod list, since we cut it down sizing up // Replenish the pod list, since we cut it down sizing up
pods = newPodList(nil, replicas, api.PodRunning, controllerSpec, "pod") pods = newPodList(nil, replicas, v1.PodRunning, controllerSpec, "pod")
} }
} }
@@ -832,7 +832,7 @@ func (fe FakeRCExpectations) SatisfiedExpectations(controllerKey string) bool {
// TestRCSyncExpectations tests that a pod cannot sneak in between counting active pods // TestRCSyncExpectations tests that a pod cannot sneak in between counting active pods
// and checking expectations. // and checking expectations.
func TestRCSyncExpectations(t *testing.T) { func TestRCSyncExpectations(t *testing.T) {
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 2, 0) manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 2, 0)
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
@@ -840,7 +840,7 @@ func TestRCSyncExpectations(t *testing.T) {
controllerSpec := newReplicationController(2) controllerSpec := newReplicationController(2)
manager.rcStore.Indexer.Add(controllerSpec) manager.rcStore.Indexer.Add(controllerSpec)
pods := newPodList(nil, 2, api.PodPending, controllerSpec, "pod") pods := newPodList(nil, 2, v1.PodPending, controllerSpec, "pod")
manager.podStore.Indexer.Add(&pods.Items[0]) manager.podStore.Indexer.Add(&pods.Items[0])
postExpectationsPod := pods.Items[1] postExpectationsPod := pods.Items[1]
@@ -857,7 +857,7 @@ func TestRCSyncExpectations(t *testing.T) {
} }
func TestDeleteControllerAndExpectations(t *testing.T) { func TestDeleteControllerAndExpectations(t *testing.T) {
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0) manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0)
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
@@ -899,7 +899,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
} }
func TestRCManagerNotReady(t *testing.T) { func TestRCManagerNotReady(t *testing.T) {
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 2, 0) manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 2, 0)
manager.podControl = &fakePodControl manager.podControl = &fakePodControl
@@ -925,10 +925,10 @@ func TestRCManagerNotReady(t *testing.T) {
} }
// shuffle returns a new shuffled list of container controllers. // shuffle returns a new shuffled list of container controllers.
func shuffle(controllers []*api.ReplicationController) []*api.ReplicationController { func shuffle(controllers []*v1.ReplicationController) []*v1.ReplicationController {
numControllers := len(controllers) numControllers := len(controllers)
randIndexes := rand.Perm(numControllers) randIndexes := rand.Perm(numControllers)
shuffled := make([]*api.ReplicationController, numControllers) shuffled := make([]*v1.ReplicationController, numControllers)
for i := 0; i < numControllers; i++ { for i := 0; i < numControllers; i++ {
shuffled[i] = controllers[randIndexes[i]] shuffled[i] = controllers[randIndexes[i]]
} }
@@ -936,14 +936,14 @@ func shuffle(controllers []*api.ReplicationController) []*api.ReplicationControl
} }
func TestOverlappingRCs(t *testing.T) { func TestOverlappingRCs(t *testing.T) {
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0) manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0)
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
// Create 10 rcs, shuffled them randomly and insert them into the rc manager's store // Create 10 rcs, shuffled them randomly and insert them into the rc manager's store
var controllers []*api.ReplicationController var controllers []*v1.ReplicationController
for j := 1; j < 10; j++ { for j := 1; j < 10; j++ {
controllerSpec := newReplicationController(1) controllerSpec := newReplicationController(1)
controllerSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local) controllerSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local)
@@ -955,7 +955,7 @@ func TestOverlappingRCs(t *testing.T) {
manager.rcStore.Indexer.Add(shuffledControllers[j]) manager.rcStore.Indexer.Add(shuffledControllers[j])
} }
// Add a pod and make sure only the oldest rc is synced // Add a pod and make sure only the oldest rc is synced
pods := newPodList(nil, 1, api.PodPending, controllers[0], "pod") pods := newPodList(nil, 1, v1.PodPending, controllers[0], "pod")
rcKey := getKey(controllers[0], t) rcKey := getKey(controllers[0], t)
manager.addPod(&pods.Items[0]) manager.addPod(&pods.Items[0])
@@ -967,7 +967,7 @@ func TestOverlappingRCs(t *testing.T) {
} }
func TestDeletionTimestamp(t *testing.T) { func TestDeletionTimestamp(t *testing.T) {
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0) manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0)
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
@@ -977,7 +977,7 @@ func TestDeletionTimestamp(t *testing.T) {
if err != nil { if err != nil {
t.Errorf("Couldn't get key for object %#v: %v", controllerSpec, err) t.Errorf("Couldn't get key for object %#v: %v", controllerSpec, err)
} }
pod := newPodList(nil, 1, api.PodPending, controllerSpec, "pod").Items[0] pod := newPodList(nil, 1, v1.PodPending, controllerSpec, "pod").Items[0]
pod.DeletionTimestamp = &unversioned.Time{Time: time.Now()} pod.DeletionTimestamp = &unversioned.Time{Time: time.Now()}
pod.ResourceVersion = "1" pod.ResourceVersion = "1"
manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(&pod)}) manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(&pod)})
@@ -998,7 +998,7 @@ func TestDeletionTimestamp(t *testing.T) {
// An update from no deletion timestamp to having one should be treated // An update from no deletion timestamp to having one should be treated
// as a deletion. // as a deletion.
oldPod := newPodList(nil, 1, api.PodPending, controllerSpec, "pod").Items[0] oldPod := newPodList(nil, 1, v1.PodPending, controllerSpec, "pod").Items[0]
oldPod.ResourceVersion = "2" oldPod.ResourceVersion = "2"
manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(&pod)}) manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(&pod)})
manager.updatePod(&oldPod, &pod) manager.updatePod(&oldPod, &pod)
@@ -1016,8 +1016,8 @@ func TestDeletionTimestamp(t *testing.T) {
// An update to the pod (including an update to the deletion timestamp) // An update to the pod (including an update to the deletion timestamp)
// should not be counted as a second delete. // should not be counted as a second delete.
secondPod := &api.Pod{ secondPod := &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Namespace: pod.Namespace, Namespace: pod.Namespace,
Name: "secondPod", Name: "secondPod",
Labels: pod.Labels, Labels: pod.Labels,
@@ -1057,20 +1057,20 @@ func TestDeletionTimestamp(t *testing.T) {
} }
func BenchmarkGetPodControllerMultiNS(b *testing.B) { func BenchmarkGetPodControllerMultiNS(b *testing.B) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
manager := NewReplicationManagerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) manager := NewReplicationManagerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
const nsNum = 1000 const nsNum = 1000
pods := []api.Pod{} pods := []v1.Pod{}
for i := 0; i < nsNum; i++ { for i := 0; i < nsNum; i++ {
ns := fmt.Sprintf("ns-%d", i) ns := fmt.Sprintf("ns-%d", i)
for j := 0; j < 10; j++ { for j := 0; j < 10; j++ {
rcName := fmt.Sprintf("rc-%d", j) rcName := fmt.Sprintf("rc-%d", j)
for k := 0; k < 10; k++ { for k := 0; k < 10; k++ {
podName := fmt.Sprintf("pod-%d-%d", j, k) podName := fmt.Sprintf("pod-%d-%d", j, k)
pods = append(pods, api.Pod{ pods = append(pods, v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Namespace: ns, Namespace: ns,
Labels: map[string]string{"rcName": rcName}, Labels: map[string]string{"rcName": rcName},
@@ -1084,9 +1084,9 @@ func BenchmarkGetPodControllerMultiNS(b *testing.B) {
ns := fmt.Sprintf("ns-%d", i) ns := fmt.Sprintf("ns-%d", i)
for j := 0; j < 10; j++ { for j := 0; j < 10; j++ {
rcName := fmt.Sprintf("rc-%d", j) rcName := fmt.Sprintf("rc-%d", j)
manager.rcStore.Indexer.Add(&api.ReplicationController{ manager.rcStore.Indexer.Add(&v1.ReplicationController{
ObjectMeta: api.ObjectMeta{Name: rcName, Namespace: ns}, ObjectMeta: v1.ObjectMeta{Name: rcName, Namespace: ns},
Spec: api.ReplicationControllerSpec{ Spec: v1.ReplicationControllerSpec{
Selector: map[string]string{"rcName": rcName}, Selector: map[string]string{"rcName": rcName},
}, },
}) })
@@ -1103,19 +1103,19 @@ func BenchmarkGetPodControllerMultiNS(b *testing.B) {
} }
func BenchmarkGetPodControllerSingleNS(b *testing.B) { func BenchmarkGetPodControllerSingleNS(b *testing.B) {
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
manager := NewReplicationManagerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0) manager := NewReplicationManagerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
const rcNum = 1000 const rcNum = 1000
const replicaNum = 3 const replicaNum = 3
pods := []api.Pod{} pods := []v1.Pod{}
for i := 0; i < rcNum; i++ { for i := 0; i < rcNum; i++ {
rcName := fmt.Sprintf("rc-%d", i) rcName := fmt.Sprintf("rc-%d", i)
for j := 0; j < replicaNum; j++ { for j := 0; j < replicaNum; j++ {
podName := fmt.Sprintf("pod-%d-%d", i, j) podName := fmt.Sprintf("pod-%d-%d", i, j)
pods = append(pods, api.Pod{ pods = append(pods, v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Namespace: "foo", Namespace: "foo",
Labels: map[string]string{"rcName": rcName}, Labels: map[string]string{"rcName": rcName},
@@ -1126,9 +1126,9 @@ func BenchmarkGetPodControllerSingleNS(b *testing.B) {
for i := 0; i < rcNum; i++ { for i := 0; i < rcNum; i++ {
rcName := fmt.Sprintf("rc-%d", i) rcName := fmt.Sprintf("rc-%d", i)
manager.rcStore.Indexer.Add(&api.ReplicationController{ manager.rcStore.Indexer.Add(&v1.ReplicationController{
ObjectMeta: api.ObjectMeta{Name: rcName, Namespace: "foo"}, ObjectMeta: v1.ObjectMeta{Name: rcName, Namespace: "foo"},
Spec: api.ReplicationControllerSpec{ Spec: v1.ReplicationControllerSpec{
Selector: map[string]string{"rcName": rcName}, Selector: map[string]string{"rcName": rcName},
}, },
}) })
@@ -1158,10 +1158,10 @@ func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
rc := newReplicationController(2) rc := newReplicationController(2)
manager.rcStore.Indexer.Add(rc) manager.rcStore.Indexer.Add(rc)
var trueVar = true var trueVar = true
otherControllerReference := api.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1", Kind: "ReplicationController", Name: "AnotherRC", Controller: &trueVar} otherControllerReference := v1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1", Kind: "ReplicationController", Name: "AnotherRC", Controller: &trueVar}
// add to podStore a matching Pod controlled by another controller. Expect no patch. // add to podStore a matching Pod controlled by another controller. Expect no patch.
pod := newPod("pod", rc, api.PodRunning, nil) pod := newPod("pod", rc, v1.PodRunning, nil)
pod.OwnerReferences = []api.OwnerReference{otherControllerReference} pod.OwnerReferences = []v1.OwnerReference{otherControllerReference}
manager.podStore.Indexer.Add(pod) manager.podStore.Indexer.Add(pod)
err := manager.syncReplicationController(getKey(rc, t)) err := manager.syncReplicationController(getKey(rc, t))
if err != nil { if err != nil {
@@ -1178,9 +1178,9 @@ func TestPatchPodWithOtherOwnerRef(t *testing.T) {
// add to podStore one more matching pod that doesn't have a controller // add to podStore one more matching pod that doesn't have a controller
// ref, but has an owner ref pointing to other object. Expect a patch to // ref, but has an owner ref pointing to other object. Expect a patch to
// take control of it. // take control of it.
unrelatedOwnerReference := api.OwnerReference{UID: uuid.NewUUID(), APIVersion: "batch/v1", Kind: "Job", Name: "Job"} unrelatedOwnerReference := v1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "batch/v1", Kind: "Job", Name: "Job"}
pod := newPod("pod", rc, api.PodRunning, nil) pod := newPod("pod", rc, v1.PodRunning, nil)
pod.OwnerReferences = []api.OwnerReference{unrelatedOwnerReference} pod.OwnerReferences = []v1.OwnerReference{unrelatedOwnerReference}
manager.podStore.Indexer.Add(pod) manager.podStore.Indexer.Add(pod)
err := manager.syncReplicationController(getKey(rc, t)) err := manager.syncReplicationController(getKey(rc, t))
@@ -1197,9 +1197,9 @@ func TestPatchPodWithCorrectOwnerRef(t *testing.T) {
manager.rcStore.Indexer.Add(rc) manager.rcStore.Indexer.Add(rc)
// add to podStore a matching pod that has an ownerRef pointing to the rc, // add to podStore a matching pod that has an ownerRef pointing to the rc,
// but ownerRef.Controller is false. Expect a patch to take control it. // but ownerRef.Controller is false. Expect a patch to take control it.
rcOwnerReference := api.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name} rcOwnerReference := v1.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name}
pod := newPod("pod", rc, api.PodRunning, nil) pod := newPod("pod", rc, v1.PodRunning, nil)
pod.OwnerReferences = []api.OwnerReference{rcOwnerReference} pod.OwnerReferences = []v1.OwnerReference{rcOwnerReference}
manager.podStore.Indexer.Add(pod) manager.podStore.Indexer.Add(pod)
err := manager.syncReplicationController(getKey(rc, t)) err := manager.syncReplicationController(getKey(rc, t))
@@ -1216,8 +1216,8 @@ func TestPatchPodFails(t *testing.T) {
manager.rcStore.Indexer.Add(rc) manager.rcStore.Indexer.Add(rc)
// add to podStore two matching pods. Expect two patches to take control // add to podStore two matching pods. Expect two patches to take control
// them. // them.
manager.podStore.Indexer.Add(newPod("pod1", rc, api.PodRunning, nil)) manager.podStore.Indexer.Add(newPod("pod1", rc, v1.PodRunning, nil))
manager.podStore.Indexer.Add(newPod("pod2", rc, api.PodRunning, nil)) manager.podStore.Indexer.Add(newPod("pod2", rc, v1.PodRunning, nil))
// let both patches fail. The rc manager will assume it fails to take // let both patches fail. The rc manager will assume it fails to take
// control of the pods and create new ones. // control of the pods and create new ones.
fakePodControl.Err = fmt.Errorf("Fake Error") fakePodControl.Err = fmt.Errorf("Fake Error")
@@ -1235,9 +1235,9 @@ func TestPatchExtraPodsThenDelete(t *testing.T) {
manager.rcStore.Indexer.Add(rc) manager.rcStore.Indexer.Add(rc)
// add to podStore three matching pods. Expect three patches to take control // add to podStore three matching pods. Expect three patches to take control
// them, and later delete one of them. // them, and later delete one of them.
manager.podStore.Indexer.Add(newPod("pod1", rc, api.PodRunning, nil)) manager.podStore.Indexer.Add(newPod("pod1", rc, v1.PodRunning, nil))
manager.podStore.Indexer.Add(newPod("pod2", rc, api.PodRunning, nil)) manager.podStore.Indexer.Add(newPod("pod2", rc, v1.PodRunning, nil))
manager.podStore.Indexer.Add(newPod("pod3", rc, api.PodRunning, nil)) manager.podStore.Indexer.Add(newPod("pod3", rc, v1.PodRunning, nil))
err := manager.syncReplicationController(getKey(rc, t)) err := manager.syncReplicationController(getKey(rc, t))
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
@@ -1251,11 +1251,11 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
rc := newReplicationController(2) rc := newReplicationController(2)
manager.rcStore.Indexer.Add(rc) manager.rcStore.Indexer.Add(rc)
// put one pod in the podStore // put one pod in the podStore
pod := newPod("pod", rc, api.PodRunning, nil) pod := newPod("pod", rc, v1.PodRunning, nil)
pod.ResourceVersion = "1" pod.ResourceVersion = "1"
var trueVar = true var trueVar = true
rcOwnerReference := api.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name, Controller: &trueVar} rcOwnerReference := v1.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name, Controller: &trueVar}
pod.OwnerReferences = []api.OwnerReference{rcOwnerReference} pod.OwnerReferences = []v1.OwnerReference{rcOwnerReference}
updatedPod := *pod updatedPod := *pod
// reset the labels // reset the labels
updatedPod.Labels = make(map[string]string) updatedPod.Labels = make(map[string]string)
@@ -1278,7 +1278,7 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
// expect 1 patch to be sent to remove the controllerRef for the pod. // expect 1 patch to be sent to remove the controllerRef for the pod.
// expect 2 creates because the rc.Spec.Replicas=2 and there exists no // expect 2 creates because the *(rc.Spec.Replicas)=2 and there exists no
// matching pod. // matching pod.
validateSyncReplication(t, fakePodControl, 2, 0, 1) validateSyncReplication(t, fakePodControl, 2, 0, 1)
fakePodControl.Clear() fakePodControl.Clear()
@@ -1288,7 +1288,7 @@ func TestUpdateSelectorControllerRef(t *testing.T) {
manager, fakePodControl := setupManagerWithGCEnabled() manager, fakePodControl := setupManagerWithGCEnabled()
rc := newReplicationController(2) rc := newReplicationController(2)
// put 2 pods in the podStore // put 2 pods in the podStore
newPodList(manager.podStore.Indexer, 2, api.PodRunning, rc, "pod") newPodList(manager.podStore.Indexer, 2, v1.PodRunning, rc, "pod")
// update the RC so that its selector no longer matches the pods // update the RC so that its selector no longer matches the pods
updatedRC := *rc updatedRC := *rc
updatedRC.Spec.Selector = map[string]string{"foo": "baz"} updatedRC.Spec.Selector = map[string]string{"foo": "baz"}
@@ -1309,7 +1309,7 @@ func TestUpdateSelectorControllerRef(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
// expect 2 patches to be sent to remove the controllerRef for the pods. // expect 2 patches to be sent to remove the controllerRef for the pods.
// expect 2 creates because the rc.Spec.Replicas=2 and there exists no // expect 2 creates because the *(rc.Spec.Replicas)=2 and there exists no
// matching pod. // matching pod.
validateSyncReplication(t, fakePodControl, 2, 0, 2) validateSyncReplication(t, fakePodControl, 2, 0, 2)
fakePodControl.Clear() fakePodControl.Clear()
@@ -1323,7 +1323,7 @@ func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
now := unversioned.Now() now := unversioned.Now()
rc.DeletionTimestamp = &now rc.DeletionTimestamp = &now
manager.rcStore.Indexer.Add(rc) manager.rcStore.Indexer.Add(rc)
pod1 := newPod("pod1", rc, api.PodRunning, nil) pod1 := newPod("pod1", rc, v1.PodRunning, nil)
manager.podStore.Indexer.Add(pod1) manager.podStore.Indexer.Add(pod1)
// no patch, no create // no patch, no create
@@ -1343,21 +1343,21 @@ func TestReadyReplicas(t *testing.T) {
testServer := httptest.NewServer(&fakeHandler) testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close() defer testServer.Close()
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
// Status.Replica should update to match number of pods in system, 1 new pod should be created. // Status.Replica should update to match number of pods in system, 1 new pod should be created.
rc := newReplicationController(2) rc := newReplicationController(2)
rc.Status = api.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 0, AvailableReplicas: 0, ObservedGeneration: 1} rc.Status = v1.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 0, AvailableReplicas: 0, ObservedGeneration: 1}
rc.Generation = 1 rc.Generation = 1
manager.rcStore.Indexer.Add(rc) manager.rcStore.Indexer.Add(rc)
newPodList(manager.podStore.Indexer, 2, api.PodPending, rc, "pod") newPodList(manager.podStore.Indexer, 2, v1.PodPending, rc, "pod")
newPodList(manager.podStore.Indexer, 2, api.PodRunning, rc, "pod") newPodList(manager.podStore.Indexer, 2, v1.PodRunning, rc, "pod")
// This response body is just so we don't err out decoding the http response // This response body is just so we don't err out decoding the http response
response := runtime.EncodeOrDie(testapi.Default.Codec(), &api.ReplicationController{}) response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{})
fakeHandler.ResponseBody = response fakeHandler.ResponseBody = response
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
@@ -1366,7 +1366,7 @@ func TestReadyReplicas(t *testing.T) {
manager.syncReplicationController(getKey(rc, t)) manager.syncReplicationController(getKey(rc, t))
// ReadyReplicas should go from 0 to 2. // ReadyReplicas should go from 0 to 2.
rc.Status = api.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 2, AvailableReplicas: 2, ObservedGeneration: 1} rc.Status = v1.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 2, AvailableReplicas: 2, ObservedGeneration: 1}
decRc := runtime.EncodeOrDie(testapi.Default.Codec(), rc) decRc := runtime.EncodeOrDie(testapi.Default.Codec(), rc)
fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name)+"/status", "PUT", &decRc) fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name)+"/status", "PUT", &decRc)
@@ -1382,13 +1382,13 @@ func TestAvailableReplicas(t *testing.T) {
testServer := httptest.NewServer(&fakeHandler) testServer := httptest.NewServer(&fakeHandler)
defer testServer.Close() defer testServer.Close()
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(api.GroupName).GroupVersion}}) c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: &registered.GroupOrDie(v1.GroupName).GroupVersion}})
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0) manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
manager.podStoreSynced = alwaysReady manager.podStoreSynced = alwaysReady
// Status.Replica should update to match number of pods in system, 1 new pod should be created. // Status.Replica should update to match number of pods in system, 1 new pod should be created.
rc := newReplicationController(2) rc := newReplicationController(2)
rc.Status = api.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 0, ObservedGeneration: 1} rc.Status = v1.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 0, ObservedGeneration: 1}
rc.Generation = 1 rc.Generation = 1
// minReadySeconds set to 15s // minReadySeconds set to 15s
rc.Spec.MinReadySeconds = 15 rc.Spec.MinReadySeconds = 15
@@ -1396,16 +1396,16 @@ func TestAvailableReplicas(t *testing.T) {
// First pod becomes ready 20s ago // First pod becomes ready 20s ago
moment := unversioned.Time{Time: time.Now().Add(-2e10)} moment := unversioned.Time{Time: time.Now().Add(-2e10)}
pod := newPod("pod", rc, api.PodRunning, &moment) pod := newPod("pod", rc, v1.PodRunning, &moment)
manager.podStore.Indexer.Add(pod) manager.podStore.Indexer.Add(pod)
// Second pod becomes ready now // Second pod becomes ready now
otherMoment := unversioned.Now() otherMoment := unversioned.Now()
otherPod := newPod("otherPod", rc, api.PodRunning, &otherMoment) otherPod := newPod("otherPod", rc, v1.PodRunning, &otherMoment)
manager.podStore.Indexer.Add(otherPod) manager.podStore.Indexer.Add(otherPod)
// This response body is just so we don't err out decoding the http response // This response body is just so we don't err out decoding the http response
response := runtime.EncodeOrDie(testapi.Default.Codec(), &api.ReplicationController{}) response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{})
fakeHandler.ResponseBody = response fakeHandler.ResponseBody = response
fakePodControl := controller.FakePodControl{} fakePodControl := controller.FakePodControl{}
@@ -1414,7 +1414,7 @@ func TestAvailableReplicas(t *testing.T) {
// The controller should see only one available pod. // The controller should see only one available pod.
manager.syncReplicationController(getKey(rc, t)) manager.syncReplicationController(getKey(rc, t))
rc.Status = api.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 2, AvailableReplicas: 1, ObservedGeneration: 1} rc.Status = v1.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 2, AvailableReplicas: 1, ObservedGeneration: 1}
decRc := runtime.EncodeOrDie(testapi.Default.Codec(), rc) decRc := runtime.EncodeOrDie(testapi.Default.Codec(), rc)
fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name)+"/status", "PUT", &decRc) fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name)+"/status", "PUT", &decRc)
@@ -1422,35 +1422,35 @@ func TestAvailableReplicas(t *testing.T) {
} }
var ( var (
imagePullBackOff api.ReplicationControllerConditionType = "ImagePullBackOff" imagePullBackOff v1.ReplicationControllerConditionType = "ImagePullBackOff"
condImagePullBackOff = func() api.ReplicationControllerCondition { condImagePullBackOff = func() v1.ReplicationControllerCondition {
return api.ReplicationControllerCondition{ return v1.ReplicationControllerCondition{
Type: imagePullBackOff, Type: imagePullBackOff,
Status: api.ConditionTrue, Status: v1.ConditionTrue,
Reason: "NonExistentImage", Reason: "NonExistentImage",
} }
} }
condReplicaFailure = func() api.ReplicationControllerCondition { condReplicaFailure = func() v1.ReplicationControllerCondition {
return api.ReplicationControllerCondition{ return v1.ReplicationControllerCondition{
Type: api.ReplicationControllerReplicaFailure, Type: v1.ReplicationControllerReplicaFailure,
Status: api.ConditionTrue, Status: v1.ConditionTrue,
Reason: "OtherFailure", Reason: "OtherFailure",
} }
} }
condReplicaFailure2 = func() api.ReplicationControllerCondition { condReplicaFailure2 = func() v1.ReplicationControllerCondition {
return api.ReplicationControllerCondition{ return v1.ReplicationControllerCondition{
Type: api.ReplicationControllerReplicaFailure, Type: v1.ReplicationControllerReplicaFailure,
Status: api.ConditionTrue, Status: v1.ConditionTrue,
Reason: "AnotherFailure", Reason: "AnotherFailure",
} }
} }
status = func() *api.ReplicationControllerStatus { status = func() *v1.ReplicationControllerStatus {
return &api.ReplicationControllerStatus{ return &v1.ReplicationControllerStatus{
Conditions: []api.ReplicationControllerCondition{condReplicaFailure()}, Conditions: []v1.ReplicationControllerCondition{condReplicaFailure()},
} }
} }
) )
@@ -1461,9 +1461,9 @@ func TestGetCondition(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
status api.ReplicationControllerStatus status v1.ReplicationControllerStatus
condType api.ReplicationControllerConditionType condType v1.ReplicationControllerConditionType
condStatus api.ConditionStatus condStatus v1.ConditionStatus
condReason string condReason string
expected bool expected bool
@@ -1472,7 +1472,7 @@ func TestGetCondition(t *testing.T) {
name: "condition exists", name: "condition exists",
status: *exampleStatus, status: *exampleStatus,
condType: api.ReplicationControllerReplicaFailure, condType: v1.ReplicationControllerReplicaFailure,
expected: true, expected: true,
}, },
@@ -1499,34 +1499,34 @@ func TestSetCondition(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
status *api.ReplicationControllerStatus status *v1.ReplicationControllerStatus
cond api.ReplicationControllerCondition cond v1.ReplicationControllerCondition
expectedStatus *api.ReplicationControllerStatus expectedStatus *v1.ReplicationControllerStatus
}{ }{
{ {
name: "set for the first time", name: "set for the first time",
status: &api.ReplicationControllerStatus{}, status: &v1.ReplicationControllerStatus{},
cond: condReplicaFailure(), cond: condReplicaFailure(),
expectedStatus: &api.ReplicationControllerStatus{Conditions: []api.ReplicationControllerCondition{condReplicaFailure()}}, expectedStatus: &v1.ReplicationControllerStatus{Conditions: []v1.ReplicationControllerCondition{condReplicaFailure()}},
}, },
{ {
name: "simple set", name: "simple set",
status: &api.ReplicationControllerStatus{Conditions: []api.ReplicationControllerCondition{condImagePullBackOff()}}, status: &v1.ReplicationControllerStatus{Conditions: []v1.ReplicationControllerCondition{condImagePullBackOff()}},
cond: condReplicaFailure(), cond: condReplicaFailure(),
expectedStatus: &api.ReplicationControllerStatus{Conditions: []api.ReplicationControllerCondition{condImagePullBackOff(), condReplicaFailure()}}, expectedStatus: &v1.ReplicationControllerStatus{Conditions: []v1.ReplicationControllerCondition{condImagePullBackOff(), condReplicaFailure()}},
}, },
{ {
name: "overwrite", name: "overwrite",
status: &api.ReplicationControllerStatus{Conditions: []api.ReplicationControllerCondition{condReplicaFailure()}}, status: &v1.ReplicationControllerStatus{Conditions: []v1.ReplicationControllerCondition{condReplicaFailure()}},
cond: condReplicaFailure2(), cond: condReplicaFailure2(),
expectedStatus: &api.ReplicationControllerStatus{Conditions: []api.ReplicationControllerCondition{condReplicaFailure2()}}, expectedStatus: &v1.ReplicationControllerStatus{Conditions: []v1.ReplicationControllerCondition{condReplicaFailure2()}},
}, },
} }
@@ -1542,26 +1542,26 @@ func TestRemoveCondition(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
status *api.ReplicationControllerStatus status *v1.ReplicationControllerStatus
condType api.ReplicationControllerConditionType condType v1.ReplicationControllerConditionType
expectedStatus *api.ReplicationControllerStatus expectedStatus *v1.ReplicationControllerStatus
}{ }{
{ {
name: "remove from empty status", name: "remove from empty status",
status: &api.ReplicationControllerStatus{}, status: &v1.ReplicationControllerStatus{},
condType: api.ReplicationControllerReplicaFailure, condType: v1.ReplicationControllerReplicaFailure,
expectedStatus: &api.ReplicationControllerStatus{}, expectedStatus: &v1.ReplicationControllerStatus{},
}, },
{ {
name: "simple remove", name: "simple remove",
status: &api.ReplicationControllerStatus{Conditions: []api.ReplicationControllerCondition{condReplicaFailure()}}, status: &v1.ReplicationControllerStatus{Conditions: []v1.ReplicationControllerCondition{condReplicaFailure()}},
condType: api.ReplicationControllerReplicaFailure, condType: v1.ReplicationControllerReplicaFailure,
expectedStatus: &api.ReplicationControllerStatus{}, expectedStatus: &v1.ReplicationControllerStatus{},
}, },
{ {
name: "doesn't remove anything", name: "doesn't remove anything",

View File

@@ -23,14 +23,14 @@ import (
"reflect" "reflect"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" "k8s.io/kubernetes/pkg/api/v1"
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/labels"
) )
// updateReplicationControllerStatus attempts to update the Status.Replicas of the given controller, with a single GET/PUT retry. // updateReplicationControllerStatus attempts to update the Status.Replicas of the given controller, with a single GET/PUT retry.
func updateReplicationControllerStatus(c unversionedcore.ReplicationControllerInterface, rc api.ReplicationController, newStatus api.ReplicationControllerStatus) (updateErr error) { func updateReplicationControllerStatus(c v1core.ReplicationControllerInterface, rc v1.ReplicationController, newStatus v1.ReplicationControllerStatus) (updateErr error) {
// This is the steady state. It happens when the rc doesn't have any expectations, since // This is the steady state. It happens when the rc doesn't have any expectations, since
// we do a periodic relist every 30s. If the generations differ but the replicas are // we do a periodic relist every 30s. If the generations differ but the replicas are
// the same, a caller might've resized to the same replica count. // the same, a caller might've resized to the same replica count.
@@ -51,7 +51,7 @@ func updateReplicationControllerStatus(c unversionedcore.ReplicationControllerIn
var getErr error var getErr error
for i, rc := 0, &rc; ; i++ { for i, rc := 0, &rc; ; i++ {
glog.V(4).Infof(fmt.Sprintf("Updating replica count for rc: %s/%s, ", rc.Namespace, rc.Name) + glog.V(4).Infof(fmt.Sprintf("Updating replica count for rc: %s/%s, ", rc.Namespace, rc.Name) +
fmt.Sprintf("replicas %d->%d (need %d), ", rc.Status.Replicas, newStatus.Replicas, rc.Spec.Replicas) + fmt.Sprintf("replicas %d->%d (need %d), ", rc.Status.Replicas, newStatus.Replicas, *(rc.Spec.Replicas)) +
fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rc.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) + fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rc.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) +
fmt.Sprintf("readyReplicas %d->%d, ", rc.Status.ReadyReplicas, newStatus.ReadyReplicas) + fmt.Sprintf("readyReplicas %d->%d, ", rc.Status.ReadyReplicas, newStatus.ReadyReplicas) +
fmt.Sprintf("availableReplicas %d->%d, ", rc.Status.AvailableReplicas, newStatus.AvailableReplicas) + fmt.Sprintf("availableReplicas %d->%d, ", rc.Status.AvailableReplicas, newStatus.AvailableReplicas) +
@@ -72,7 +72,7 @@ func updateReplicationControllerStatus(c unversionedcore.ReplicationControllerIn
} }
// OverlappingControllers sorts a list of controllers by creation timestamp, using their names as a tie breaker. // OverlappingControllers sorts a list of controllers by creation timestamp, using their names as a tie breaker.
type OverlappingControllers []*api.ReplicationController type OverlappingControllers []*v1.ReplicationController
func (o OverlappingControllers) Len() int { return len(o) } func (o OverlappingControllers) Len() int { return len(o) }
func (o OverlappingControllers) Swap(i, j int) { o[i], o[j] = o[j], o[i] } func (o OverlappingControllers) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
@@ -84,7 +84,7 @@ func (o OverlappingControllers) Less(i, j int) bool {
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp) return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
} }
func calculateStatus(rc api.ReplicationController, filteredPods []*api.Pod, manageReplicasErr error) api.ReplicationControllerStatus { func calculateStatus(rc v1.ReplicationController, filteredPods []*v1.Pod, manageReplicasErr error) v1.ReplicationControllerStatus {
newStatus := rc.Status newStatus := rc.Status
// Count the number of pods that have labels matching the labels of the pod // Count the number of pods that have labels matching the labels of the pod
// template of the replication controller, the matching pods may have more // template of the replication controller, the matching pods may have more
@@ -99,26 +99,26 @@ func calculateStatus(rc api.ReplicationController, filteredPods []*api.Pod, mana
if templateLabel.Matches(labels.Set(pod.Labels)) { if templateLabel.Matches(labels.Set(pod.Labels)) {
fullyLabeledReplicasCount++ fullyLabeledReplicasCount++
} }
if api.IsPodReady(pod) { if v1.IsPodReady(pod) {
readyReplicasCount++ readyReplicasCount++
if api.IsPodAvailable(pod, rc.Spec.MinReadySeconds, unversioned.Now()) { if v1.IsPodAvailable(pod, rc.Spec.MinReadySeconds, unversioned.Now()) {
availableReplicasCount++ availableReplicasCount++
} }
} }
} }
failureCond := GetCondition(rc.Status, api.ReplicationControllerReplicaFailure) failureCond := GetCondition(rc.Status, v1.ReplicationControllerReplicaFailure)
if manageReplicasErr != nil && failureCond == nil { if manageReplicasErr != nil && failureCond == nil {
var reason string var reason string
if diff := len(filteredPods) - int(rc.Spec.Replicas); diff < 0 { if diff := len(filteredPods) - int(*(rc.Spec.Replicas)); diff < 0 {
reason = "FailedCreate" reason = "FailedCreate"
} else if diff > 0 { } else if diff > 0 {
reason = "FailedDelete" reason = "FailedDelete"
} }
cond := NewReplicationControllerCondition(api.ReplicationControllerReplicaFailure, api.ConditionTrue, reason, manageReplicasErr.Error()) cond := NewReplicationControllerCondition(v1.ReplicationControllerReplicaFailure, v1.ConditionTrue, reason, manageReplicasErr.Error())
SetCondition(&newStatus, cond) SetCondition(&newStatus, cond)
} else if manageReplicasErr == nil && failureCond != nil { } else if manageReplicasErr == nil && failureCond != nil {
RemoveCondition(&newStatus, api.ReplicationControllerReplicaFailure) RemoveCondition(&newStatus, v1.ReplicationControllerReplicaFailure)
} }
newStatus.Replicas = int32(len(filteredPods)) newStatus.Replicas = int32(len(filteredPods))
@@ -129,8 +129,8 @@ func calculateStatus(rc api.ReplicationController, filteredPods []*api.Pod, mana
} }
// NewReplicationControllerCondition creates a new replication controller condition. // NewReplicationControllerCondition creates a new replication controller condition.
func NewReplicationControllerCondition(condType api.ReplicationControllerConditionType, status api.ConditionStatus, reason, msg string) api.ReplicationControllerCondition { func NewReplicationControllerCondition(condType v1.ReplicationControllerConditionType, status v1.ConditionStatus, reason, msg string) v1.ReplicationControllerCondition {
return api.ReplicationControllerCondition{ return v1.ReplicationControllerCondition{
Type: condType, Type: condType,
Status: status, Status: status,
LastTransitionTime: unversioned.Now(), LastTransitionTime: unversioned.Now(),
@@ -140,7 +140,7 @@ func NewReplicationControllerCondition(condType api.ReplicationControllerConditi
} }
// GetCondition returns a replication controller condition with the provided type if it exists. // GetCondition returns a replication controller condition with the provided type if it exists.
func GetCondition(status api.ReplicationControllerStatus, condType api.ReplicationControllerConditionType) *api.ReplicationControllerCondition { func GetCondition(status v1.ReplicationControllerStatus, condType v1.ReplicationControllerConditionType) *v1.ReplicationControllerCondition {
for i := range status.Conditions { for i := range status.Conditions {
c := status.Conditions[i] c := status.Conditions[i]
if c.Type == condType { if c.Type == condType {
@@ -151,7 +151,7 @@ func GetCondition(status api.ReplicationControllerStatus, condType api.Replicati
} }
// SetCondition adds/replaces the given condition in the replication controller status. // SetCondition adds/replaces the given condition in the replication controller status.
func SetCondition(status *api.ReplicationControllerStatus, condition api.ReplicationControllerCondition) { func SetCondition(status *v1.ReplicationControllerStatus, condition v1.ReplicationControllerCondition) {
currentCond := GetCondition(*status, condition.Type) currentCond := GetCondition(*status, condition.Type)
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason { if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason {
return return
@@ -161,13 +161,13 @@ func SetCondition(status *api.ReplicationControllerStatus, condition api.Replica
} }
// RemoveCondition removes the condition with the provided type from the replication controller status. // RemoveCondition removes the condition with the provided type from the replication controller status.
func RemoveCondition(status *api.ReplicationControllerStatus, condType api.ReplicationControllerConditionType) { func RemoveCondition(status *v1.ReplicationControllerStatus, condType v1.ReplicationControllerConditionType) {
status.Conditions = filterOutCondition(status.Conditions, condType) status.Conditions = filterOutCondition(status.Conditions, condType)
} }
// filterOutCondition returns a new slice of replication controller conditions without conditions with the provided type. // filterOutCondition returns a new slice of replication controller conditions without conditions with the provided type.
func filterOutCondition(conditions []api.ReplicationControllerCondition, condType api.ReplicationControllerConditionType) []api.ReplicationControllerCondition { func filterOutCondition(conditions []v1.ReplicationControllerCondition, condType v1.ReplicationControllerConditionType) []v1.ReplicationControllerCondition {
var newConditions []api.ReplicationControllerCondition var newConditions []v1.ReplicationControllerCondition
for _, c := range conditions { for _, c := range conditions {
if c.Type == condType { if c.Type == condType {
continue continue

View File

@@ -24,8 +24,9 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/quota/evaluator/core" "k8s.io/kubernetes/pkg/quota/evaluator/core"
@@ -55,9 +56,9 @@ type ReplenishmentControllerOptions struct {
// PodReplenishmentUpdateFunc will replenish if the old pod was quota tracked but the new is not // PodReplenishmentUpdateFunc will replenish if the old pod was quota tracked but the new is not
func PodReplenishmentUpdateFunc(options *ReplenishmentControllerOptions) func(oldObj, newObj interface{}) { func PodReplenishmentUpdateFunc(options *ReplenishmentControllerOptions) func(oldObj, newObj interface{}) {
return func(oldObj, newObj interface{}) { return func(oldObj, newObj interface{}) {
oldPod := oldObj.(*api.Pod) oldPod := oldObj.(*v1.Pod)
newPod := newObj.(*api.Pod) newPod := newObj.(*v1.Pod)
if core.QuotaPod(oldPod) && !core.QuotaPod(newPod) { if core.QuotaV1Pod(oldPod) && !core.QuotaV1Pod(newPod) {
options.ReplenishmentFunc(options.GroupKind, newPod.Namespace, oldPod) options.ReplenishmentFunc(options.GroupKind, newPod.Namespace, oldPod)
} }
} }
@@ -146,14 +147,14 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
// TODO move to informer when defined // TODO move to informer when defined
_, result = cache.NewInformer( _, result = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().Services(api.NamespaceAll).List(options) return r.kubeClient.Core().Services(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return r.kubeClient.Core().Services(api.NamespaceAll).Watch(options) return r.kubeClient.Core().Services(v1.NamespaceAll).Watch(options)
}, },
}, },
&api.Service{}, &v1.Service{},
options.ResyncPeriod(), options.ResyncPeriod(),
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
UpdateFunc: ServiceReplenishmentUpdateFunc(options), UpdateFunc: ServiceReplenishmentUpdateFunc(options),
@@ -164,14 +165,14 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
// TODO move to informer when defined // TODO move to informer when defined
_, result = cache.NewInformer( _, result = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options) return r.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return r.kubeClient.Core().ReplicationControllers(api.NamespaceAll).Watch(options) return r.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).Watch(options)
}, },
}, },
&api.ReplicationController{}, &v1.ReplicationController{},
options.ResyncPeriod(), options.ResyncPeriod(),
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
DeleteFunc: ObjectReplenishmentDeleteFunc(options), DeleteFunc: ObjectReplenishmentDeleteFunc(options),
@@ -187,14 +188,14 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
// TODO (derekwaynecarr) remove me when we can require a sharedInformerFactory in all code paths... // TODO (derekwaynecarr) remove me when we can require a sharedInformerFactory in all code paths...
_, result = cache.NewInformer( _, result = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options) return r.kubeClient.Core().PersistentVolumeClaims(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return r.kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options) return r.kubeClient.Core().PersistentVolumeClaims(v1.NamespaceAll).Watch(options)
}, },
}, },
&api.PersistentVolumeClaim{}, &v1.PersistentVolumeClaim{},
options.ResyncPeriod(), options.ResyncPeriod(),
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
DeleteFunc: ObjectReplenishmentDeleteFunc(options), DeleteFunc: ObjectReplenishmentDeleteFunc(options),
@@ -204,14 +205,14 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
// TODO move to informer when defined // TODO move to informer when defined
_, result = cache.NewInformer( _, result = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().Secrets(api.NamespaceAll).List(options) return r.kubeClient.Core().Secrets(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return r.kubeClient.Core().Secrets(api.NamespaceAll).Watch(options) return r.kubeClient.Core().Secrets(v1.NamespaceAll).Watch(options)
}, },
}, },
&api.Secret{}, &v1.Secret{},
options.ResyncPeriod(), options.ResyncPeriod(),
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
DeleteFunc: ObjectReplenishmentDeleteFunc(options), DeleteFunc: ObjectReplenishmentDeleteFunc(options),
@@ -221,14 +222,14 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
// TODO move to informer when defined // TODO move to informer when defined
_, result = cache.NewInformer( _, result = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return r.kubeClient.Core().ConfigMaps(api.NamespaceAll).List(options) return r.kubeClient.Core().ConfigMaps(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return r.kubeClient.Core().ConfigMaps(api.NamespaceAll).Watch(options) return r.kubeClient.Core().ConfigMaps(v1.NamespaceAll).Watch(options)
}, },
}, },
&api.ConfigMap{}, &v1.ConfigMap{},
options.ResyncPeriod(), options.ResyncPeriod(),
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
DeleteFunc: ObjectReplenishmentDeleteFunc(options), DeleteFunc: ObjectReplenishmentDeleteFunc(options),
@@ -243,8 +244,8 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
// ServiceReplenishmentUpdateFunc will replenish if the service was quota tracked has changed service type // ServiceReplenishmentUpdateFunc will replenish if the service was quota tracked has changed service type
func ServiceReplenishmentUpdateFunc(options *ReplenishmentControllerOptions) func(oldObj, newObj interface{}) { func ServiceReplenishmentUpdateFunc(options *ReplenishmentControllerOptions) func(oldObj, newObj interface{}) {
return func(oldObj, newObj interface{}) { return func(oldObj, newObj interface{}) {
oldService := oldObj.(*api.Service) oldService := oldObj.(*v1.Service)
newService := newObj.(*api.Service) newService := newObj.(*v1.Service)
if core.GetQuotaServiceType(oldService) != core.GetQuotaServiceType(newService) { if core.GetQuotaServiceType(oldService) != core.GetQuotaServiceType(newService) {
options.ReplenishmentFunc(options.GroupKind, newService.Namespace, nil) options.ReplenishmentFunc(options.GroupKind, newService.Namespace, nil)
} }

View File

@@ -21,6 +21,7 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/intstr" "k8s.io/kubernetes/pkg/util/intstr"
@@ -45,13 +46,13 @@ func TestPodReplenishmentUpdateFunc(t *testing.T) {
ReplenishmentFunc: mockReplenish.Replenish, ReplenishmentFunc: mockReplenish.Replenish,
ResyncPeriod: controller.NoResyncPeriodFunc, ResyncPeriod: controller.NoResyncPeriodFunc,
} }
oldPod := &api.Pod{ oldPod := &v1.Pod{
ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "pod"}, ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "pod"},
Status: api.PodStatus{Phase: api.PodRunning}, Status: v1.PodStatus{Phase: v1.PodRunning},
} }
newPod := &api.Pod{ newPod := &v1.Pod{
ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "pod"}, ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "pod"},
Status: api.PodStatus{Phase: api.PodFailed}, Status: v1.PodStatus{Phase: v1.PodFailed},
} }
updateFunc := PodReplenishmentUpdateFunc(&options) updateFunc := PodReplenishmentUpdateFunc(&options)
updateFunc(oldPod, newPod) updateFunc(oldPod, newPod)
@@ -70,9 +71,9 @@ func TestObjectReplenishmentDeleteFunc(t *testing.T) {
ReplenishmentFunc: mockReplenish.Replenish, ReplenishmentFunc: mockReplenish.Replenish,
ResyncPeriod: controller.NoResyncPeriodFunc, ResyncPeriod: controller.NoResyncPeriodFunc,
} }
oldPod := &api.Pod{ oldPod := &v1.Pod{
ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "pod"}, ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "pod"},
Status: api.PodStatus{Phase: api.PodRunning}, Status: v1.PodStatus{Phase: v1.PodRunning},
} }
deleteFunc := ObjectReplenishmentDeleteFunc(&options) deleteFunc := ObjectReplenishmentDeleteFunc(&options)
deleteFunc(oldPod) deleteFunc(oldPod)
@@ -91,21 +92,21 @@ func TestServiceReplenishmentUpdateFunc(t *testing.T) {
ReplenishmentFunc: mockReplenish.Replenish, ReplenishmentFunc: mockReplenish.Replenish,
ResyncPeriod: controller.NoResyncPeriodFunc, ResyncPeriod: controller.NoResyncPeriodFunc,
} }
oldService := &api.Service{ oldService := &v1.Service{
ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "mysvc"}, ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "mysvc"},
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Type: api.ServiceTypeNodePort, Type: v1.ServiceTypeNodePort,
Ports: []api.ServicePort{{ Ports: []v1.ServicePort{{
Port: 80, Port: 80,
TargetPort: intstr.FromInt(80), TargetPort: intstr.FromInt(80),
}}, }},
}, },
} }
newService := &api.Service{ newService := &v1.Service{
ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "mysvc"}, ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "mysvc"},
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Type: api.ServiceTypeClusterIP, Type: v1.ServiceTypeClusterIP,
Ports: []api.ServicePort{{ Ports: []v1.ServicePort{{
Port: 80, Port: 80,
TargetPort: intstr.FromInt(80), TargetPort: intstr.FromInt(80),
}}}, }}},
@@ -125,21 +126,21 @@ func TestServiceReplenishmentUpdateFunc(t *testing.T) {
ReplenishmentFunc: mockReplenish.Replenish, ReplenishmentFunc: mockReplenish.Replenish,
ResyncPeriod: controller.NoResyncPeriodFunc, ResyncPeriod: controller.NoResyncPeriodFunc,
} }
oldService = &api.Service{ oldService = &v1.Service{
ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "mysvc"}, ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "mysvc"},
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Type: api.ServiceTypeNodePort, Type: v1.ServiceTypeNodePort,
Ports: []api.ServicePort{{ Ports: []v1.ServicePort{{
Port: 80, Port: 80,
TargetPort: intstr.FromInt(80), TargetPort: intstr.FromInt(80),
}}, }},
}, },
} }
newService = &api.Service{ newService = &v1.Service{
ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "mysvc"}, ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "mysvc"},
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Type: api.ServiceTypeNodePort, Type: v1.ServiceTypeNodePort,
Ports: []api.ServicePort{{ Ports: []v1.ServicePort{{
Port: 81, Port: 81,
TargetPort: intstr.FromInt(81), TargetPort: intstr.FromInt(81),
}}}, }}},

View File

@@ -23,8 +23,9 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/quota" "k8s.io/kubernetes/pkg/quota"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@@ -93,14 +94,14 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour
// build the controller that observes quota // build the controller that observes quota
rq.rqIndexer, rq.rqController = cache.NewIndexerInformer( rq.rqIndexer, rq.rqController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return rq.kubeClient.Core().ResourceQuotas(api.NamespaceAll).List(options) return rq.kubeClient.Core().ResourceQuotas(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return rq.kubeClient.Core().ResourceQuotas(api.NamespaceAll).Watch(options) return rq.kubeClient.Core().ResourceQuotas(v1.NamespaceAll).Watch(options)
}, },
}, },
&api.ResourceQuota{}, &v1.ResourceQuota{},
rq.resyncPeriod(), rq.resyncPeriod(),
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: rq.addQuota, AddFunc: rq.addQuota,
@@ -113,9 +114,9 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour
// that cannot be backed by a cache and result in a full query of a namespace's content, we do not // that cannot be backed by a cache and result in a full query of a namespace's content, we do not
// want to pay the price on spurious status updates. As a result, we have a separate routine that is // want to pay the price on spurious status updates. As a result, we have a separate routine that is
// responsible for enqueue of all resource quotas when doing a full resync (enqueueAll) // responsible for enqueue of all resource quotas when doing a full resync (enqueueAll)
oldResourceQuota := old.(*api.ResourceQuota) oldResourceQuota := old.(*v1.ResourceQuota)
curResourceQuota := cur.(*api.ResourceQuota) curResourceQuota := cur.(*v1.ResourceQuota)
if quota.Equals(curResourceQuota.Spec.Hard, oldResourceQuota.Spec.Hard) { if quota.V1Equals(oldResourceQuota.Spec.Hard, curResourceQuota.Spec.Hard) {
return return
} }
rq.addQuota(curResourceQuota) rq.addQuota(curResourceQuota)
@@ -152,7 +153,7 @@ func (rq *ResourceQuotaController) enqueueAll() {
} }
} }
// obj could be an *api.ResourceQuota, or a DeletionFinalStateUnknown marker item. // obj could be an *v1.ResourceQuota, or a DeletionFinalStateUnknown marker item.
func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) { func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) {
key, err := controller.KeyFunc(obj) key, err := controller.KeyFunc(obj)
if err != nil { if err != nil {
@@ -169,10 +170,10 @@ func (rq *ResourceQuotaController) addQuota(obj interface{}) {
return return
} }
resourceQuota := obj.(*api.ResourceQuota) resourceQuota := obj.(*v1.ResourceQuota)
// if we declared an intent that is not yet captured in status (prioritize it) // if we declared an intent that is not yet captured in status (prioritize it)
if !api.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard) { if !v1.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard) {
rq.missingUsageQueue.Add(key) rq.missingUsageQueue.Add(key)
return return
} }
@@ -180,7 +181,7 @@ func (rq *ResourceQuotaController) addQuota(obj interface{}) {
// if we declared a constraint that has no usage (which this controller can calculate, prioritize it) // if we declared a constraint that has no usage (which this controller can calculate, prioritize it)
for constraint := range resourceQuota.Status.Hard { for constraint := range resourceQuota.Status.Hard {
if _, usageFound := resourceQuota.Status.Used[constraint]; !usageFound { if _, usageFound := resourceQuota.Status.Used[constraint]; !usageFound {
matchedResources := []api.ResourceName{constraint} matchedResources := []api.ResourceName{api.ResourceName(constraint)}
for _, evaluator := range rq.registry.Evaluators() { for _, evaluator := range rq.registry.Evaluators() {
if intersection := quota.Intersection(evaluator.MatchesResources(), matchedResources); len(intersection) != 0 { if intersection := quota.Intersection(evaluator.MatchesResources(), matchedResources); len(intersection) != 0 {
@@ -260,14 +261,19 @@ func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err err
rq.queue.Add(key) rq.queue.Add(key)
return err return err
} }
quota := *obj.(*api.ResourceQuota) quota := *obj.(*v1.ResourceQuota)
return rq.syncResourceQuota(quota) return rq.syncResourceQuota(quota)
} }
// syncResourceQuota runs a complete sync of resource quota status across all known kinds // syncResourceQuota runs a complete sync of resource quota status across all known kinds
func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota api.ResourceQuota) (err error) { func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota v1.ResourceQuota) (err error) {
// quota is dirty if any part of spec hard limits differs from the status hard limits // quota is dirty if any part of spec hard limits differs from the status hard limits
dirty := !api.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard) dirty := !v1.Semantic.DeepEqual(v1ResourceQuota.Spec.Hard, v1ResourceQuota.Status.Hard)
resourceQuota := api.ResourceQuota{}
if err := v1.Convert_v1_ResourceQuota_To_api_ResourceQuota(&v1ResourceQuota, &resourceQuota, nil); err != nil {
return err
}
// dirty tracks if the usage status differs from the previous sync, // dirty tracks if the usage status differs from the previous sync,
// if so, we send a new usage with latest status // if so, we send a new usage with latest status
@@ -311,7 +317,11 @@ func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota api.ResourceQ
// there was a change observed by this controller that requires we update quota // there was a change observed by this controller that requires we update quota
if dirty { if dirty {
_, err = rq.kubeClient.Core().ResourceQuotas(usage.Namespace).UpdateStatus(&usage) v1Usage := &v1.ResourceQuota{}
if err := v1.Convert_api_ResourceQuota_To_v1_ResourceQuota(&usage, v1Usage, nil); err != nil {
return err
}
_, err = rq.kubeClient.Core().ResourceQuotas(usage.Namespace).UpdateStatus(v1Usage)
return err return err
} }
return nil return nil
@@ -327,7 +337,7 @@ func (rq *ResourceQuotaController) replenishQuota(groupKind unversioned.GroupKin
} }
// check if this namespace even has a quota... // check if this namespace even has a quota...
indexKey := &api.ResourceQuota{} indexKey := &v1.ResourceQuota{}
indexKey.Namespace = namespace indexKey.Namespace = namespace
resourceQuotas, err := rq.rqIndexer.Index("namespace", indexKey) resourceQuotas, err := rq.rqIndexer.Index("namespace", indexKey)
if err != nil { if err != nil {
@@ -340,8 +350,13 @@ func (rq *ResourceQuotaController) replenishQuota(groupKind unversioned.GroupKin
// only queue those quotas that are tracking a resource associated with this kind. // only queue those quotas that are tracking a resource associated with this kind.
matchedResources := evaluator.MatchesResources() matchedResources := evaluator.MatchesResources()
for i := range resourceQuotas { for i := range resourceQuotas {
resourceQuota := resourceQuotas[i].(*api.ResourceQuota) resourceQuota := resourceQuotas[i].(*v1.ResourceQuota)
resourceQuotaResources := quota.ResourceNames(resourceQuota.Status.Hard) internalResourceQuota := &api.ResourceQuota{}
if err := v1.Convert_v1_ResourceQuota_To_api_ResourceQuota(resourceQuota, internalResourceQuota, nil); err != nil {
glog.Error(err)
continue
}
resourceQuotaResources := quota.ResourceNames(internalResourceQuota.Status.Hard)
if len(quota.Intersection(matchedResources, resourceQuotaResources)) > 0 { if len(quota.Intersection(matchedResources, resourceQuotaResources)) > 0 {
// TODO: make this support targeted replenishment to a specific kind, right now it does a full recalc on that quota. // TODO: make this support targeted replenishment to a specific kind, right now it does a full recalc on that quota.
rq.enqueueResourceQuota(resourceQuota) rq.enqueueResourceQuota(resourceQuota)

View File

@@ -23,7 +23,8 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/resource"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/quota/generic" "k8s.io/kubernetes/pkg/quota/generic"
@@ -31,74 +32,74 @@ import (
"k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/sets"
) )
func getResourceList(cpu, memory string) api.ResourceList { func getResourceList(cpu, memory string) v1.ResourceList {
res := api.ResourceList{} res := v1.ResourceList{}
if cpu != "" { if cpu != "" {
res[api.ResourceCPU] = resource.MustParse(cpu) res[v1.ResourceCPU] = resource.MustParse(cpu)
} }
if memory != "" { if memory != "" {
res[api.ResourceMemory] = resource.MustParse(memory) res[v1.ResourceMemory] = resource.MustParse(memory)
} }
return res return res
} }
func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequirements { func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements {
res := api.ResourceRequirements{} res := v1.ResourceRequirements{}
res.Requests = requests res.Requests = requests
res.Limits = limits res.Limits = limits
return res return res
} }
func TestSyncResourceQuota(t *testing.T) { func TestSyncResourceQuota(t *testing.T) {
podList := api.PodList{ podList := v1.PodList{
Items: []api.Pod{ Items: []v1.Pod{
{ {
ObjectMeta: api.ObjectMeta{Name: "pod-running", Namespace: "testing"}, ObjectMeta: v1.ObjectMeta{Name: "pod-running", Namespace: "testing"},
Status: api.PodStatus{Phase: api.PodRunning}, Status: v1.PodStatus{Phase: v1.PodRunning},
Spec: api.PodSpec{ Spec: v1.PodSpec{
Volumes: []api.Volume{{Name: "vol"}}, Volumes: []v1.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}}, Containers: []v1.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
}, },
}, },
{ {
ObjectMeta: api.ObjectMeta{Name: "pod-running-2", Namespace: "testing"}, ObjectMeta: v1.ObjectMeta{Name: "pod-running-2", Namespace: "testing"},
Status: api.PodStatus{Phase: api.PodRunning}, Status: v1.PodStatus{Phase: v1.PodRunning},
Spec: api.PodSpec{ Spec: v1.PodSpec{
Volumes: []api.Volume{{Name: "vol"}}, Volumes: []v1.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}}, Containers: []v1.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
}, },
}, },
{ {
ObjectMeta: api.ObjectMeta{Name: "pod-failed", Namespace: "testing"}, ObjectMeta: v1.ObjectMeta{Name: "pod-failed", Namespace: "testing"},
Status: api.PodStatus{Phase: api.PodFailed}, Status: v1.PodStatus{Phase: v1.PodFailed},
Spec: api.PodSpec{ Spec: v1.PodSpec{
Volumes: []api.Volume{{Name: "vol"}}, Volumes: []v1.Volume{{Name: "vol"}},
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}}, Containers: []v1.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
}, },
}, },
}, },
} }
resourceQuota := api.ResourceQuota{ resourceQuota := v1.ResourceQuota{
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "testing"}, ObjectMeta: v1.ObjectMeta{Name: "quota", Namespace: "testing"},
Spec: api.ResourceQuotaSpec{ Spec: v1.ResourceQuotaSpec{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourceCPU: resource.MustParse("3"), v1.ResourceCPU: resource.MustParse("3"),
api.ResourceMemory: resource.MustParse("100Gi"), v1.ResourceMemory: resource.MustParse("100Gi"),
api.ResourcePods: resource.MustParse("5"), v1.ResourcePods: resource.MustParse("5"),
}, },
}, },
} }
expectedUsage := api.ResourceQuota{ expectedUsage := v1.ResourceQuota{
Status: api.ResourceQuotaStatus{ Status: v1.ResourceQuotaStatus{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourceCPU: resource.MustParse("3"), v1.ResourceCPU: resource.MustParse("3"),
api.ResourceMemory: resource.MustParse("100Gi"), v1.ResourceMemory: resource.MustParse("100Gi"),
api.ResourcePods: resource.MustParse("5"), v1.ResourcePods: resource.MustParse("5"),
}, },
Used: api.ResourceList{ Used: v1.ResourceList{
api.ResourceCPU: resource.MustParse("200m"), v1.ResourceCPU: resource.MustParse("200m"),
api.ResourceMemory: resource.MustParse("2Gi"), v1.ResourceMemory: resource.MustParse("2Gi"),
api.ResourcePods: resource.MustParse("2"), v1.ResourcePods: resource.MustParse("2"),
}, },
}, },
} }
@@ -135,7 +136,7 @@ func TestSyncResourceQuota(t *testing.T) {
} }
lastActionIndex := len(kubeClient.Actions()) - 1 lastActionIndex := len(kubeClient.Actions()) - 1
usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*api.ResourceQuota) usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*v1.ResourceQuota)
// ensure hard and used limits are what we expected // ensure hard and used limits are what we expected
for k, v := range expectedUsage.Status.Hard { for k, v := range expectedUsage.Status.Hard {
@@ -157,33 +158,33 @@ func TestSyncResourceQuota(t *testing.T) {
} }
func TestSyncResourceQuotaSpecChange(t *testing.T) { func TestSyncResourceQuotaSpecChange(t *testing.T) {
resourceQuota := api.ResourceQuota{ resourceQuota := v1.ResourceQuota{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Namespace: "default", Namespace: "default",
Name: "rq", Name: "rq",
}, },
Spec: api.ResourceQuotaSpec{ Spec: v1.ResourceQuotaSpec{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourceCPU: resource.MustParse("4"), v1.ResourceCPU: resource.MustParse("4"),
}, },
}, },
Status: api.ResourceQuotaStatus{ Status: v1.ResourceQuotaStatus{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourceCPU: resource.MustParse("3"), v1.ResourceCPU: resource.MustParse("3"),
}, },
Used: api.ResourceList{ Used: v1.ResourceList{
api.ResourceCPU: resource.MustParse("0"), v1.ResourceCPU: resource.MustParse("0"),
}, },
}, },
} }
expectedUsage := api.ResourceQuota{ expectedUsage := v1.ResourceQuota{
Status: api.ResourceQuotaStatus{ Status: v1.ResourceQuotaStatus{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourceCPU: resource.MustParse("4"), v1.ResourceCPU: resource.MustParse("4"),
}, },
Used: api.ResourceList{ Used: v1.ResourceList{
api.ResourceCPU: resource.MustParse("0"), v1.ResourceCPU: resource.MustParse("0"),
}, },
}, },
} }
@@ -221,7 +222,7 @@ func TestSyncResourceQuotaSpecChange(t *testing.T) {
} }
lastActionIndex := len(kubeClient.Actions()) - 1 lastActionIndex := len(kubeClient.Actions()) - 1
usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*api.ResourceQuota) usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*v1.ResourceQuota)
// ensure hard and used limits are what we expected // ensure hard and used limits are what we expected
for k, v := range expectedUsage.Status.Hard { for k, v := range expectedUsage.Status.Hard {
@@ -243,35 +244,35 @@ func TestSyncResourceQuotaSpecChange(t *testing.T) {
} }
func TestSyncResourceQuotaSpecHardChange(t *testing.T) { func TestSyncResourceQuotaSpecHardChange(t *testing.T) {
resourceQuota := api.ResourceQuota{ resourceQuota := v1.ResourceQuota{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Namespace: "default", Namespace: "default",
Name: "rq", Name: "rq",
}, },
Spec: api.ResourceQuotaSpec{ Spec: v1.ResourceQuotaSpec{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourceCPU: resource.MustParse("4"), v1.ResourceCPU: resource.MustParse("4"),
}, },
}, },
Status: api.ResourceQuotaStatus{ Status: v1.ResourceQuotaStatus{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourceCPU: resource.MustParse("3"), v1.ResourceCPU: resource.MustParse("3"),
api.ResourceMemory: resource.MustParse("1Gi"), v1.ResourceMemory: resource.MustParse("1Gi"),
}, },
Used: api.ResourceList{ Used: v1.ResourceList{
api.ResourceCPU: resource.MustParse("0"), v1.ResourceCPU: resource.MustParse("0"),
api.ResourceMemory: resource.MustParse("0"), v1.ResourceMemory: resource.MustParse("0"),
}, },
}, },
} }
expectedUsage := api.ResourceQuota{ expectedUsage := v1.ResourceQuota{
Status: api.ResourceQuotaStatus{ Status: v1.ResourceQuotaStatus{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourceCPU: resource.MustParse("4"), v1.ResourceCPU: resource.MustParse("4"),
}, },
Used: api.ResourceList{ Used: v1.ResourceList{
api.ResourceCPU: resource.MustParse("0"), v1.ResourceCPU: resource.MustParse("0"),
}, },
}, },
} }
@@ -309,7 +310,7 @@ func TestSyncResourceQuotaSpecHardChange(t *testing.T) {
} }
lastActionIndex := len(kubeClient.Actions()) - 1 lastActionIndex := len(kubeClient.Actions()) - 1
usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*api.ResourceQuota) usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*v1.ResourceQuota)
// ensure hard and used limits are what we expected // ensure hard and used limits are what we expected
for k, v := range expectedUsage.Status.Hard { for k, v := range expectedUsage.Status.Hard {
@@ -331,40 +332,40 @@ func TestSyncResourceQuotaSpecHardChange(t *testing.T) {
// ensure usage hard and used are are synced with spec hard, not have dirty resource // ensure usage hard and used are are synced with spec hard, not have dirty resource
for k, v := range usage.Status.Hard { for k, v := range usage.Status.Hard {
if k == api.ResourceMemory { if k == v1.ResourceMemory {
t.Errorf("Unexpected Usage Hard: Key: %v, Value: %v", k, v.String()) t.Errorf("Unexpected Usage Hard: Key: %v, Value: %v", k, v.String())
} }
} }
for k, v := range usage.Status.Used { for k, v := range usage.Status.Used {
if k == api.ResourceMemory { if k == v1.ResourceMemory {
t.Errorf("Unexpected Usage Used: Key: %v, Value: %v", k, v.String()) t.Errorf("Unexpected Usage Used: Key: %v, Value: %v", k, v.String())
} }
} }
} }
func TestSyncResourceQuotaNoChange(t *testing.T) { func TestSyncResourceQuotaNoChange(t *testing.T) {
resourceQuota := api.ResourceQuota{ resourceQuota := v1.ResourceQuota{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Namespace: "default", Namespace: "default",
Name: "rq", Name: "rq",
}, },
Spec: api.ResourceQuotaSpec{ Spec: v1.ResourceQuotaSpec{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourceCPU: resource.MustParse("4"), v1.ResourceCPU: resource.MustParse("4"),
}, },
}, },
Status: api.ResourceQuotaStatus{ Status: v1.ResourceQuotaStatus{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourceCPU: resource.MustParse("4"), v1.ResourceCPU: resource.MustParse("4"),
}, },
Used: api.ResourceList{ Used: v1.ResourceList{
api.ResourceCPU: resource.MustParse("0"), v1.ResourceCPU: resource.MustParse("0"),
}, },
}, },
} }
kubeClient := fake.NewSimpleClientset(&api.PodList{}, &resourceQuota) kubeClient := fake.NewSimpleClientset(&v1.PodList{}, &resourceQuota)
resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{ resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{
KubeClient: kubeClient, KubeClient: kubeClient,
ResyncPeriod: controller.NoResyncPeriodFunc, ResyncPeriod: controller.NoResyncPeriodFunc,
@@ -416,20 +417,20 @@ func TestAddQuota(t *testing.T) {
testCases := []struct { testCases := []struct {
name string name string
quota *api.ResourceQuota quota *v1.ResourceQuota
expectedPriority bool expectedPriority bool
}{ }{
{ {
name: "no status", name: "no status",
expectedPriority: true, expectedPriority: true,
quota: &api.ResourceQuota{ quota: &v1.ResourceQuota{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Namespace: "default", Namespace: "default",
Name: "rq", Name: "rq",
}, },
Spec: api.ResourceQuotaSpec{ Spec: v1.ResourceQuotaSpec{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourceCPU: resource.MustParse("4"), v1.ResourceCPU: resource.MustParse("4"),
}, },
}, },
}, },
@@ -437,19 +438,19 @@ func TestAddQuota(t *testing.T) {
{ {
name: "status, no usage", name: "status, no usage",
expectedPriority: true, expectedPriority: true,
quota: &api.ResourceQuota{ quota: &v1.ResourceQuota{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Namespace: "default", Namespace: "default",
Name: "rq", Name: "rq",
}, },
Spec: api.ResourceQuotaSpec{ Spec: v1.ResourceQuotaSpec{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourceCPU: resource.MustParse("4"), v1.ResourceCPU: resource.MustParse("4"),
}, },
}, },
Status: api.ResourceQuotaStatus{ Status: v1.ResourceQuotaStatus{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourceCPU: resource.MustParse("4"), v1.ResourceCPU: resource.MustParse("4"),
}, },
}, },
}, },
@@ -457,22 +458,22 @@ func TestAddQuota(t *testing.T) {
{ {
name: "status, mismatch", name: "status, mismatch",
expectedPriority: true, expectedPriority: true,
quota: &api.ResourceQuota{ quota: &v1.ResourceQuota{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Namespace: "default", Namespace: "default",
Name: "rq", Name: "rq",
}, },
Spec: api.ResourceQuotaSpec{ Spec: v1.ResourceQuotaSpec{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourceCPU: resource.MustParse("4"), v1.ResourceCPU: resource.MustParse("4"),
}, },
}, },
Status: api.ResourceQuotaStatus{ Status: v1.ResourceQuotaStatus{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourceCPU: resource.MustParse("6"), v1.ResourceCPU: resource.MustParse("6"),
}, },
Used: api.ResourceList{ Used: v1.ResourceList{
api.ResourceCPU: resource.MustParse("0"), v1.ResourceCPU: resource.MustParse("0"),
}, },
}, },
}, },
@@ -480,19 +481,19 @@ func TestAddQuota(t *testing.T) {
{ {
name: "status, missing usage, but don't care", name: "status, missing usage, but don't care",
expectedPriority: false, expectedPriority: false,
quota: &api.ResourceQuota{ quota: &v1.ResourceQuota{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Namespace: "default", Namespace: "default",
Name: "rq", Name: "rq",
}, },
Spec: api.ResourceQuotaSpec{ Spec: v1.ResourceQuotaSpec{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourceServices: resource.MustParse("4"), v1.ResourceServices: resource.MustParse("4"),
}, },
}, },
Status: api.ResourceQuotaStatus{ Status: v1.ResourceQuotaStatus{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourceServices: resource.MustParse("4"), v1.ResourceServices: resource.MustParse("4"),
}, },
}, },
}, },
@@ -500,22 +501,22 @@ func TestAddQuota(t *testing.T) {
{ {
name: "ready", name: "ready",
expectedPriority: false, expectedPriority: false,
quota: &api.ResourceQuota{ quota: &v1.ResourceQuota{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Namespace: "default", Namespace: "default",
Name: "rq", Name: "rq",
}, },
Spec: api.ResourceQuotaSpec{ Spec: v1.ResourceQuotaSpec{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourceCPU: resource.MustParse("4"), v1.ResourceCPU: resource.MustParse("4"),
}, },
}, },
Status: api.ResourceQuotaStatus{ Status: v1.ResourceQuotaStatus{
Hard: api.ResourceList{ Hard: v1.ResourceList{
api.ResourceCPU: resource.MustParse("4"), v1.ResourceCPU: resource.MustParse("4"),
}, },
Used: api.ResourceList{ Used: v1.ResourceList{
api.ResourceCPU: resource.MustParse("0"), v1.ResourceCPU: resource.MustParse("0"),
}, },
}, },
}, },

View File

@@ -23,11 +23,11 @@ import (
"time" "time"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
@@ -71,14 +71,14 @@ func New(routes cloudprovider.Routes, kubeClient clientset.Interface, clusterNam
rc.nodeStore.Store, rc.nodeController = cache.NewInformer( rc.nodeStore.Store, rc.nodeController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return rc.kubeClient.Core().Nodes().List(options) return rc.kubeClient.Core().Nodes().List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return rc.kubeClient.Core().Nodes().Watch(options) return rc.kubeClient.Core().Nodes().Watch(options)
}, },
}, },
&api.Node{}, &v1.Node{},
controller.NoResyncPeriodFunc(), controller.NoResyncPeriodFunc(),
cache.ResourceEventHandlerFuncs{}, cache.ResourceEventHandlerFuncs{},
) )
@@ -116,7 +116,7 @@ func (rc *RouteController) reconcileNodeRoutes() error {
return rc.reconcile(nodeList.Items, routeList) return rc.reconcile(nodeList.Items, routeList)
} }
func (rc *RouteController) reconcile(nodes []api.Node, routes []*cloudprovider.Route) error { func (rc *RouteController) reconcile(nodes []v1.Node, routes []*cloudprovider.Route) error {
// nodeCIDRs maps nodeName->nodeCIDR // nodeCIDRs maps nodeName->nodeCIDR
nodeCIDRs := make(map[types.NodeName]string) nodeCIDRs := make(map[types.NodeName]string)
// routeMap maps routeTargetNode->route // routeMap maps routeTargetNode->route
@@ -166,8 +166,8 @@ func (rc *RouteController) reconcile(nodes []api.Node, routes []*cloudprovider.R
}(nodeName, nameHint, route) }(nodeName, nameHint, route)
} else { } else {
// Update condition only if it doesn't reflect the current state. // Update condition only if it doesn't reflect the current state.
_, condition := api.GetNodeCondition(&node.Status, api.NodeNetworkUnavailable) _, condition := v1.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable)
if condition == nil || condition.Status != api.ConditionFalse { if condition == nil || condition.Status != v1.ConditionFalse {
rc.updateNetworkingCondition(types.NodeName(node.Name), true) rc.updateNetworkingCondition(types.NodeName(node.Name), true)
} }
} }
@@ -203,17 +203,17 @@ func (rc *RouteController) updateNetworkingCondition(nodeName types.NodeName, ro
// patch in the retry loop. // patch in the retry loop.
currentTime := unversioned.Now() currentTime := unversioned.Now()
if routeCreated { if routeCreated {
err = nodeutil.SetNodeCondition(rc.kubeClient, nodeName, api.NodeCondition{ err = nodeutil.SetNodeCondition(rc.kubeClient, nodeName, v1.NodeCondition{
Type: api.NodeNetworkUnavailable, Type: v1.NodeNetworkUnavailable,
Status: api.ConditionFalse, Status: v1.ConditionFalse,
Reason: "RouteCreated", Reason: "RouteCreated",
Message: "RouteController created a route", Message: "RouteController created a route",
LastTransitionTime: currentTime, LastTransitionTime: currentTime,
}) })
} else { } else {
err = nodeutil.SetNodeCondition(rc.kubeClient, nodeName, api.NodeCondition{ err = nodeutil.SetNodeCondition(rc.kubeClient, nodeName, v1.NodeCondition{
Type: api.NodeNetworkUnavailable, Type: v1.NodeNetworkUnavailable,
Status: api.ConditionTrue, Status: v1.ConditionTrue,
Reason: "NoRouteCreated", Reason: "NoRouteCreated",
Message: "RouteController failed to create a route", Message: "RouteController failed to create a route",
LastTransitionTime: currentTime, LastTransitionTime: currentTime,

View File

@@ -21,8 +21,8 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
@@ -70,12 +70,12 @@ func TestIsResponsibleForRoute(t *testing.T) {
func TestReconcile(t *testing.T) { func TestReconcile(t *testing.T) {
cluster := "my-k8s" cluster := "my-k8s"
node1 := api.Node{ObjectMeta: api.ObjectMeta{Name: "node-1", UID: "01"}, Spec: api.NodeSpec{PodCIDR: "10.120.0.0/24"}} node1 := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "node-1", UID: "01"}, Spec: v1.NodeSpec{PodCIDR: "10.120.0.0/24"}}
node2 := api.Node{ObjectMeta: api.ObjectMeta{Name: "node-2", UID: "02"}, Spec: api.NodeSpec{PodCIDR: "10.120.1.0/24"}} node2 := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "node-2", UID: "02"}, Spec: v1.NodeSpec{PodCIDR: "10.120.1.0/24"}}
nodeNoCidr := api.Node{ObjectMeta: api.ObjectMeta{Name: "node-2", UID: "02"}, Spec: api.NodeSpec{PodCIDR: ""}} nodeNoCidr := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "node-2", UID: "02"}, Spec: v1.NodeSpec{PodCIDR: ""}}
testCases := []struct { testCases := []struct {
nodes []api.Node nodes []v1.Node
initialRoutes []*cloudprovider.Route initialRoutes []*cloudprovider.Route
expectedRoutes []*cloudprovider.Route expectedRoutes []*cloudprovider.Route
expectedNetworkUnavailable []bool expectedNetworkUnavailable []bool
@@ -83,7 +83,7 @@ func TestReconcile(t *testing.T) {
}{ }{
// 2 nodes, routes already there // 2 nodes, routes already there
{ {
nodes: []api.Node{ nodes: []v1.Node{
node1, node1,
node2, node2,
}, },
@@ -96,11 +96,11 @@ func TestReconcile(t *testing.T) {
{cluster + "-02", "node-2", "10.120.1.0/24"}, {cluster + "-02", "node-2", "10.120.1.0/24"},
}, },
expectedNetworkUnavailable: []bool{true, true}, expectedNetworkUnavailable: []bool{true, true},
clientset: fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{node1, node2}}), clientset: fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{node1, node2}}),
}, },
// 2 nodes, one route already there // 2 nodes, one route already there
{ {
nodes: []api.Node{ nodes: []v1.Node{
node1, node1,
node2, node2,
}, },
@@ -112,11 +112,11 @@ func TestReconcile(t *testing.T) {
{cluster + "-02", "node-2", "10.120.1.0/24"}, {cluster + "-02", "node-2", "10.120.1.0/24"},
}, },
expectedNetworkUnavailable: []bool{true, true}, expectedNetworkUnavailable: []bool{true, true},
clientset: fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{node1, node2}}), clientset: fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{node1, node2}}),
}, },
// 2 nodes, no routes yet // 2 nodes, no routes yet
{ {
nodes: []api.Node{ nodes: []v1.Node{
node1, node1,
node2, node2,
}, },
@@ -126,11 +126,11 @@ func TestReconcile(t *testing.T) {
{cluster + "-02", "node-2", "10.120.1.0/24"}, {cluster + "-02", "node-2", "10.120.1.0/24"},
}, },
expectedNetworkUnavailable: []bool{true, true}, expectedNetworkUnavailable: []bool{true, true},
clientset: fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{node1, node2}}), clientset: fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{node1, node2}}),
}, },
// 2 nodes, a few too many routes // 2 nodes, a few too many routes
{ {
nodes: []api.Node{ nodes: []v1.Node{
node1, node1,
node2, node2,
}, },
@@ -145,11 +145,11 @@ func TestReconcile(t *testing.T) {
{cluster + "-02", "node-2", "10.120.1.0/24"}, {cluster + "-02", "node-2", "10.120.1.0/24"},
}, },
expectedNetworkUnavailable: []bool{true, true}, expectedNetworkUnavailable: []bool{true, true},
clientset: fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{node1, node2}}), clientset: fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{node1, node2}}),
}, },
// 2 nodes, 2 routes, but only 1 is right // 2 nodes, 2 routes, but only 1 is right
{ {
nodes: []api.Node{ nodes: []v1.Node{
node1, node1,
node2, node2,
}, },
@@ -162,11 +162,11 @@ func TestReconcile(t *testing.T) {
{cluster + "-02", "node-2", "10.120.1.0/24"}, {cluster + "-02", "node-2", "10.120.1.0/24"},
}, },
expectedNetworkUnavailable: []bool{true, true}, expectedNetworkUnavailable: []bool{true, true},
clientset: fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{node1, node2}}), clientset: fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{node1, node2}}),
}, },
// 2 nodes, one node without CIDR assigned. // 2 nodes, one node without CIDR assigned.
{ {
nodes: []api.Node{ nodes: []v1.Node{
node1, node1,
nodeNoCidr, nodeNoCidr,
}, },
@@ -175,7 +175,7 @@ func TestReconcile(t *testing.T) {
{cluster + "-01", "node-1", "10.120.0.0/24"}, {cluster + "-01", "node-1", "10.120.0.0/24"},
}, },
expectedNetworkUnavailable: []bool{true, false}, expectedNetworkUnavailable: []bool{true, false},
clientset: fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{node1, nodeNoCidr}}), clientset: fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{node1, nodeNoCidr}}),
}, },
} }
for i, testCase := range testCases { for i, testCase := range testCases {
@@ -197,13 +197,13 @@ func TestReconcile(t *testing.T) {
} }
for _, action := range testCase.clientset.Actions() { for _, action := range testCase.clientset.Actions() {
if action.GetVerb() == "update" && action.GetResource().Resource == "nodes" { if action.GetVerb() == "update" && action.GetResource().Resource == "nodes" {
node := action.(core.UpdateAction).GetObject().(*api.Node) node := action.(core.UpdateAction).GetObject().(*v1.Node)
_, condition := api.GetNodeCondition(&node.Status, api.NodeNetworkUnavailable) _, condition := v1.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable)
if condition == nil { if condition == nil {
t.Errorf("%d. Missing NodeNetworkUnavailable condition for Node %v", i, node.Name) t.Errorf("%d. Missing NodeNetworkUnavailable condition for Node %v", i, node.Name)
} else { } else {
check := func(index int) bool { check := func(index int) bool {
return (condition.Status == api.ConditionFalse) == testCase.expectedNetworkUnavailable[index] return (condition.Status == v1.ConditionFalse) == testCase.expectedNetworkUnavailable[index]
} }
index := -1 index := -1
for j := range testCase.nodes { for j := range testCase.nodes {
@@ -217,7 +217,7 @@ func TestReconcile(t *testing.T) {
} }
if !check(index) { if !check(index) {
t.Errorf("%d. Invalid NodeNetworkUnavailable condition for Node %v, expected %v, got %v", t.Errorf("%d. Invalid NodeNetworkUnavailable condition for Node %v, expected %v, got %v",
i, node.Name, testCase.expectedNetworkUnavailable[index], (condition.Status == api.ConditionFalse)) i, node.Name, testCase.expectedNetworkUnavailable[index], (condition.Status == v1.ConditionFalse))
} }
} }
} }

View File

@@ -25,11 +25,11 @@ import (
"reflect" "reflect"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors" "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
@@ -65,7 +65,7 @@ const (
type cachedService struct { type cachedService struct {
// The cached state of the service // The cached state of the service
state *api.Service state *v1.Service
// Controls error back-off // Controls error back-off
lastRetryDelay time.Duration lastRetryDelay time.Duration
} }
@@ -78,7 +78,7 @@ type serviceCache struct {
type ServiceController struct { type ServiceController struct {
cloud cloudprovider.Interface cloud cloudprovider.Interface
knownHosts []string knownHosts []string
servicesToUpdate []*api.Service servicesToUpdate []*v1.Service
kubeClient clientset.Interface kubeClient clientset.Interface
clusterName string clusterName string
balancer cloudprovider.LoadBalancer balancer cloudprovider.LoadBalancer
@@ -100,7 +100,7 @@ type ServiceController struct {
func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterName string) (*ServiceController, error) { func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterName string) (*ServiceController, error) {
broadcaster := record.NewBroadcaster() broadcaster := record.NewBroadcaster()
broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")}) broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"}) recorder := broadcaster.NewRecorder(v1.EventSource{Component: "service-controller"})
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil { if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("service_controller", kubeClient.Core().RESTClient().GetRateLimiter()) metrics.RegisterMetricAndTrackRateLimiterUsage("service_controller", kubeClient.Core().RESTClient().GetRateLimiter())
@@ -121,20 +121,20 @@ func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterN
} }
s.serviceStore.Indexer, s.serviceController = cache.NewIndexerInformer( s.serviceStore.Indexer, s.serviceController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) { ListFunc: func(options v1.ListOptions) (pkg_runtime.Object, error) {
return s.kubeClient.Core().Services(api.NamespaceAll).List(options) return s.kubeClient.Core().Services(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return s.kubeClient.Core().Services(api.NamespaceAll).Watch(options) return s.kubeClient.Core().Services(v1.NamespaceAll).Watch(options)
}, },
}, },
&api.Service{}, &v1.Service{},
serviceSyncPeriod, serviceSyncPeriod,
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: s.enqueueService, AddFunc: s.enqueueService,
UpdateFunc: func(old, cur interface{}) { UpdateFunc: func(old, cur interface{}) {
oldSvc, ok1 := old.(*api.Service) oldSvc, ok1 := old.(*v1.Service)
curSvc, ok2 := cur.(*api.Service) curSvc, ok2 := cur.(*v1.Service)
if ok1 && ok2 && s.needsUpdate(oldSvc, curSvc) { if ok1 && ok2 && s.needsUpdate(oldSvc, curSvc) {
s.enqueueService(cur) s.enqueueService(cur)
} }
@@ -149,7 +149,7 @@ func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterN
return s, nil return s, nil
} }
// obj could be an *api.Service, or a DeletionFinalStateUnknown marker item. // obj could be an *v1.Service, or a DeletionFinalStateUnknown marker item.
func (s *ServiceController) enqueueService(obj interface{}) { func (s *ServiceController) enqueueService(obj interface{}) {
key, err := controller.KeyFunc(obj) key, err := controller.KeyFunc(obj)
if err != nil { if err != nil {
@@ -175,8 +175,8 @@ func (s *ServiceController) Run(workers int) {
for i := 0; i < workers; i++ { for i := 0; i < workers; i++ {
go wait.Until(s.worker, time.Second, wait.NeverStop) go wait.Until(s.worker, time.Second, wait.NeverStop)
} }
nodeLW := cache.NewListWatchFromClient(s.kubeClient.Core().RESTClient(), "nodes", api.NamespaceAll, fields.Everything()) nodeLW := cache.NewListWatchFromClient(s.kubeClient.Core().RESTClient(), "nodes", v1.NamespaceAll, fields.Everything())
cache.NewReflector(nodeLW, &api.Node{}, s.nodeLister.Store, 0).Run() cache.NewReflector(nodeLW, &v1.Node{}, s.nodeLister.Store, 0).Run()
go wait.Until(s.nodeSyncLoop, nodeSyncPeriod, wait.NeverStop) go wait.Until(s.nodeSyncLoop, nodeSyncPeriod, wait.NeverStop)
} }
@@ -224,7 +224,7 @@ func (s *ServiceController) init() error {
// Returns an error if processing the service update failed, along with a time.Duration // Returns an error if processing the service update failed, along with a time.Duration
// indicating whether processing should be retried; zero means no-retry; otherwise // indicating whether processing should be retried; zero means no-retry; otherwise
// we should retry in that Duration. // we should retry in that Duration.
func (s *ServiceController) processServiceUpdate(cachedService *cachedService, service *api.Service, key string) (error, time.Duration) { func (s *ServiceController) processServiceUpdate(cachedService *cachedService, service *v1.Service, key string) (error, time.Duration) {
// cache the service, we need the info for service deletion // cache the service, we need the info for service deletion
cachedService.state = service cachedService.state = service
@@ -237,7 +237,7 @@ func (s *ServiceController) processServiceUpdate(cachedService *cachedService, s
message += " (will not retry): " message += " (will not retry): "
} }
message += err.Error() message += err.Error()
s.eventRecorder.Event(service, api.EventTypeWarning, "CreatingLoadBalancerFailed", message) s.eventRecorder.Event(service, v1.EventTypeWarning, "CreatingLoadBalancerFailed", message)
return err, cachedService.nextRetryDelay() return err, cachedService.nextRetryDelay()
} }
@@ -253,13 +253,13 @@ func (s *ServiceController) processServiceUpdate(cachedService *cachedService, s
// Returns whatever error occurred along with a boolean indicator of whether it // Returns whatever error occurred along with a boolean indicator of whether it
// should be retried. // should be retried.
func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *api.Service) (error, bool) { func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.Service) (error, bool) {
// Note: It is safe to just call EnsureLoadBalancer. But, on some clouds that requires a delete & create, // Note: It is safe to just call EnsureLoadBalancer. But, on some clouds that requires a delete & create,
// which may involve service interruption. Also, we would like user-friendly events. // which may involve service interruption. Also, we would like user-friendly events.
// Save the state so we can avoid a write if it doesn't change // Save the state so we can avoid a write if it doesn't change
previousState := api.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer) previousState := v1.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer)
if !wantsLoadBalancer(service) { if !wantsLoadBalancer(service) {
needDelete := true needDelete := true
@@ -273,31 +273,31 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *api.
if needDelete { if needDelete {
glog.Infof("Deleting existing load balancer for service %s that no longer needs a load balancer.", key) glog.Infof("Deleting existing load balancer for service %s that no longer needs a load balancer.", key)
s.eventRecorder.Event(service, api.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer") s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
if err := s.balancer.EnsureLoadBalancerDeleted(s.clusterName, service); err != nil { if err := s.balancer.EnsureLoadBalancerDeleted(s.clusterName, service); err != nil {
return err, retryable return err, retryable
} }
s.eventRecorder.Event(service, api.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer") s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer")
} }
service.Status.LoadBalancer = api.LoadBalancerStatus{} service.Status.LoadBalancer = v1.LoadBalancerStatus{}
} else { } else {
glog.V(2).Infof("Ensuring LB for service %s", key) glog.V(2).Infof("Ensuring LB for service %s", key)
// TODO: We could do a dry-run here if wanted to avoid the spurious cloud-calls & events when we restart // TODO: We could do a dry-run here if wanted to avoid the spurious cloud-calls & events when we restart
// The load balancer doesn't exist yet, so create it. // The load balancer doesn't exist yet, so create it.
s.eventRecorder.Event(service, api.EventTypeNormal, "CreatingLoadBalancer", "Creating load balancer") s.eventRecorder.Event(service, v1.EventTypeNormal, "CreatingLoadBalancer", "Creating load balancer")
err := s.createLoadBalancer(service) err := s.createLoadBalancer(service)
if err != nil { if err != nil {
return fmt.Errorf("Failed to create load balancer for service %s: %v", key, err), retryable return fmt.Errorf("Failed to create load balancer for service %s: %v", key, err), retryable
} }
s.eventRecorder.Event(service, api.EventTypeNormal, "CreatedLoadBalancer", "Created load balancer") s.eventRecorder.Event(service, v1.EventTypeNormal, "CreatedLoadBalancer", "Created load balancer")
} }
// Write the state if changed // Write the state if changed
// TODO: Be careful here ... what if there were other changes to the service? // TODO: Be careful here ... what if there were other changes to the service?
if !api.LoadBalancerStatusEqual(previousState, &service.Status.LoadBalancer) { if !v1.LoadBalancerStatusEqual(previousState, &service.Status.LoadBalancer) {
if err := s.persistUpdate(service); err != nil { if err := s.persistUpdate(service); err != nil {
return fmt.Errorf("Failed to persist updated status to apiserver, even after retries. Giving up: %v", err), notRetryable return fmt.Errorf("Failed to persist updated status to apiserver, even after retries. Giving up: %v", err), notRetryable
} }
@@ -308,7 +308,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *api.
return nil, notRetryable return nil, notRetryable
} }
func (s *ServiceController) persistUpdate(service *api.Service) error { func (s *ServiceController) persistUpdate(service *v1.Service) error {
var err error var err error
for i := 0; i < clientRetryCount; i++ { for i := 0; i < clientRetryCount; i++ {
_, err = s.kubeClient.Core().Services(service.Namespace).UpdateStatus(service) _, err = s.kubeClient.Core().Services(service.Namespace).UpdateStatus(service)
@@ -338,7 +338,7 @@ func (s *ServiceController) persistUpdate(service *api.Service) error {
return err return err
} }
func (s *ServiceController) createLoadBalancer(service *api.Service) error { func (s *ServiceController) createLoadBalancer(service *v1.Service) error {
nodes, err := s.nodeLister.List() nodes, err := s.nodeLister.List()
if err != nil { if err != nil {
return err return err
@@ -381,10 +381,10 @@ func (s *serviceCache) GetByKey(key string) (interface{}, bool, error) {
// ListKeys implements the interface required by DeltaFIFO to list the keys we // ListKeys implements the interface required by DeltaFIFO to list the keys we
// already know about. // already know about.
func (s *serviceCache) allServices() []*api.Service { func (s *serviceCache) allServices() []*v1.Service {
s.mu.Lock() s.mu.Lock()
defer s.mu.Unlock() defer s.mu.Unlock()
services := make([]*api.Service, 0, len(s.serviceMap)) services := make([]*v1.Service, 0, len(s.serviceMap))
for _, v := range s.serviceMap { for _, v := range s.serviceMap {
services = append(services, v.state) services = append(services, v.state)
} }
@@ -421,12 +421,12 @@ func (s *serviceCache) delete(serviceName string) {
delete(s.serviceMap, serviceName) delete(s.serviceMap, serviceName)
} }
func (s *ServiceController) needsUpdate(oldService *api.Service, newService *api.Service) bool { func (s *ServiceController) needsUpdate(oldService *v1.Service, newService *v1.Service) bool {
if !wantsLoadBalancer(oldService) && !wantsLoadBalancer(newService) { if !wantsLoadBalancer(oldService) && !wantsLoadBalancer(newService) {
return false return false
} }
if wantsLoadBalancer(oldService) != wantsLoadBalancer(newService) { if wantsLoadBalancer(oldService) != wantsLoadBalancer(newService) {
s.eventRecorder.Eventf(newService, api.EventTypeNormal, "Type", "%v -> %v", s.eventRecorder.Eventf(newService, v1.EventTypeNormal, "Type", "%v -> %v",
oldService.Spec.Type, newService.Spec.Type) oldService.Spec.Type, newService.Spec.Type)
return true return true
} }
@@ -434,18 +434,18 @@ func (s *ServiceController) needsUpdate(oldService *api.Service, newService *api
return true return true
} }
if !loadBalancerIPsAreEqual(oldService, newService) { if !loadBalancerIPsAreEqual(oldService, newService) {
s.eventRecorder.Eventf(newService, api.EventTypeNormal, "LoadbalancerIP", "%v -> %v", s.eventRecorder.Eventf(newService, v1.EventTypeNormal, "LoadbalancerIP", "%v -> %v",
oldService.Spec.LoadBalancerIP, newService.Spec.LoadBalancerIP) oldService.Spec.LoadBalancerIP, newService.Spec.LoadBalancerIP)
return true return true
} }
if len(oldService.Spec.ExternalIPs) != len(newService.Spec.ExternalIPs) { if len(oldService.Spec.ExternalIPs) != len(newService.Spec.ExternalIPs) {
s.eventRecorder.Eventf(newService, api.EventTypeNormal, "ExternalIP", "Count: %v -> %v", s.eventRecorder.Eventf(newService, v1.EventTypeNormal, "ExternalIP", "Count: %v -> %v",
len(oldService.Spec.ExternalIPs), len(newService.Spec.ExternalIPs)) len(oldService.Spec.ExternalIPs), len(newService.Spec.ExternalIPs))
return true return true
} }
for i := range oldService.Spec.ExternalIPs { for i := range oldService.Spec.ExternalIPs {
if oldService.Spec.ExternalIPs[i] != newService.Spec.ExternalIPs[i] { if oldService.Spec.ExternalIPs[i] != newService.Spec.ExternalIPs[i] {
s.eventRecorder.Eventf(newService, api.EventTypeNormal, "ExternalIP", "Added: %v", s.eventRecorder.Eventf(newService, v1.EventTypeNormal, "ExternalIP", "Added: %v",
newService.Spec.ExternalIPs[i]) newService.Spec.ExternalIPs[i])
return true return true
} }
@@ -454,7 +454,7 @@ func (s *ServiceController) needsUpdate(oldService *api.Service, newService *api
return true return true
} }
if oldService.UID != newService.UID { if oldService.UID != newService.UID {
s.eventRecorder.Eventf(newService, api.EventTypeNormal, "UID", "%v -> %v", s.eventRecorder.Eventf(newService, v1.EventTypeNormal, "UID", "%v -> %v",
oldService.UID, newService.UID) oldService.UID, newService.UID)
return true return true
} }
@@ -462,14 +462,14 @@ func (s *ServiceController) needsUpdate(oldService *api.Service, newService *api
return false return false
} }
func (s *ServiceController) loadBalancerName(service *api.Service) string { func (s *ServiceController) loadBalancerName(service *v1.Service) string {
return cloudprovider.GetLoadBalancerName(service) return cloudprovider.GetLoadBalancerName(service)
} }
func getPortsForLB(service *api.Service) ([]*api.ServicePort, error) { func getPortsForLB(service *v1.Service) ([]*v1.ServicePort, error) {
var protocol api.Protocol var protocol v1.Protocol
ports := []*api.ServicePort{} ports := []*v1.ServicePort{}
for i := range service.Spec.Ports { for i := range service.Spec.Ports {
sp := &service.Spec.Ports[i] sp := &service.Spec.Ports[i]
// The check on protocol was removed here. The cloud provider itself is now responsible for all protocol validation // The check on protocol was removed here. The cloud provider itself is now responsible for all protocol validation
@@ -484,7 +484,7 @@ func getPortsForLB(service *api.Service) ([]*api.ServicePort, error) {
return ports, nil return ports, nil
} }
func portsEqualForLB(x, y *api.Service) bool { func portsEqualForLB(x, y *v1.Service) bool {
xPorts, err := getPortsForLB(x) xPorts, err := getPortsForLB(x)
if err != nil { if err != nil {
return false return false
@@ -496,7 +496,7 @@ func portsEqualForLB(x, y *api.Service) bool {
return portSlicesEqualForLB(xPorts, yPorts) return portSlicesEqualForLB(xPorts, yPorts)
} }
func portSlicesEqualForLB(x, y []*api.ServicePort) bool { func portSlicesEqualForLB(x, y []*v1.ServicePort) bool {
if len(x) != len(y) { if len(x) != len(y) {
return false return false
} }
@@ -509,7 +509,7 @@ func portSlicesEqualForLB(x, y []*api.ServicePort) bool {
return true return true
} }
func portEqualForLB(x, y *api.ServicePort) bool { func portEqualForLB(x, y *v1.ServicePort) bool {
// TODO: Should we check name? (In theory, an LB could expose it) // TODO: Should we check name? (In theory, an LB could expose it)
if x.Name != y.Name { if x.Name != y.Name {
return false return false
@@ -569,11 +569,11 @@ func stringSlicesEqual(x, y []string) bool {
return true return true
} }
func includeNodeFromNodeList(node *api.Node) bool { func includeNodeFromNodeList(node *v1.Node) bool {
return !node.Spec.Unschedulable return !node.Spec.Unschedulable
} }
func hostsFromNodeList(list *api.NodeList) []string { func hostsFromNodeList(list *v1.NodeList) []string {
result := []string{} result := []string{}
for ix := range list.Items { for ix := range list.Items {
if includeNodeFromNodeList(&list.Items[ix]) { if includeNodeFromNodeList(&list.Items[ix]) {
@@ -583,7 +583,7 @@ func hostsFromNodeList(list *api.NodeList) []string {
return result return result
} }
func hostsFromNodeSlice(nodes []*api.Node) []string { func hostsFromNodeSlice(nodes []*v1.Node) []string {
result := []string{} result := []string{}
for _, node := range nodes { for _, node := range nodes {
if includeNodeFromNodeList(node) { if includeNodeFromNodeList(node) {
@@ -594,7 +594,7 @@ func hostsFromNodeSlice(nodes []*api.Node) []string {
} }
func getNodeConditionPredicate() cache.NodeConditionPredicate { func getNodeConditionPredicate() cache.NodeConditionPredicate {
return func(node *api.Node) bool { return func(node *v1.Node) bool {
// We add the master to the node list, but its unschedulable. So we use this to filter // We add the master to the node list, but its unschedulable. So we use this to filter
// the master. // the master.
// TODO: Use a node annotation to indicate the master // TODO: Use a node annotation to indicate the master
@@ -608,7 +608,7 @@ func getNodeConditionPredicate() cache.NodeConditionPredicate {
for _, cond := range node.Status.Conditions { for _, cond := range node.Status.Conditions {
// We consider the node for load balancing only when its NodeReady condition status // We consider the node for load balancing only when its NodeReady condition status
// is ConditionTrue // is ConditionTrue
if cond.Type == api.NodeReady && cond.Status != api.ConditionTrue { if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status) glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
return false return false
} }
@@ -648,7 +648,7 @@ func (s *ServiceController) nodeSyncLoop() {
// updateLoadBalancerHosts updates all existing load balancers so that // updateLoadBalancerHosts updates all existing load balancers so that
// they will match the list of hosts provided. // they will match the list of hosts provided.
// Returns the list of services that couldn't be updated. // Returns the list of services that couldn't be updated.
func (s *ServiceController) updateLoadBalancerHosts(services []*api.Service, hosts []string) (servicesToRetry []*api.Service) { func (s *ServiceController) updateLoadBalancerHosts(services []*v1.Service, hosts []string) (servicesToRetry []*v1.Service) {
for _, service := range services { for _, service := range services {
func() { func() {
if service == nil { if service == nil {
@@ -665,7 +665,7 @@ func (s *ServiceController) updateLoadBalancerHosts(services []*api.Service, hos
// Updates the load balancer of a service, assuming we hold the mutex // Updates the load balancer of a service, assuming we hold the mutex
// associated with the service. // associated with the service.
func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *api.Service, hosts []string) error { func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *v1.Service, hosts []string) error {
if !wantsLoadBalancer(service) { if !wantsLoadBalancer(service) {
return nil return nil
} }
@@ -673,7 +673,7 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *api.Service,
// This operation doesn't normally take very long (and happens pretty often), so we only record the final event // This operation doesn't normally take very long (and happens pretty often), so we only record the final event
err := s.balancer.UpdateLoadBalancer(s.clusterName, service, hosts) err := s.balancer.UpdateLoadBalancer(s.clusterName, service, hosts)
if err == nil { if err == nil {
s.eventRecorder.Event(service, api.EventTypeNormal, "UpdatedLoadBalancer", "Updated load balancer with new hosts") s.eventRecorder.Event(service, v1.EventTypeNormal, "UpdatedLoadBalancer", "Updated load balancer with new hosts")
return nil return nil
} }
@@ -684,15 +684,15 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *api.Service,
return nil return nil
} }
s.eventRecorder.Eventf(service, api.EventTypeWarning, "LoadBalancerUpdateFailed", "Error updating load balancer with new hosts %v: %v", hosts, err) s.eventRecorder.Eventf(service, v1.EventTypeWarning, "LoadBalancerUpdateFailed", "Error updating load balancer with new hosts %v: %v", hosts, err)
return err return err
} }
func wantsLoadBalancer(service *api.Service) bool { func wantsLoadBalancer(service *v1.Service) bool {
return service.Spec.Type == api.ServiceTypeLoadBalancer return service.Spec.Type == v1.ServiceTypeLoadBalancer
} }
func loadBalancerIPsAreEqual(oldService, newService *api.Service) bool { func loadBalancerIPsAreEqual(oldService, newService *v1.Service) bool {
return oldService.Spec.LoadBalancerIP == newService.Spec.LoadBalancerIP return oldService.Spec.LoadBalancerIP == newService.Spec.LoadBalancerIP
} }
@@ -736,7 +736,7 @@ func (s *ServiceController) syncService(key string) error {
glog.Infof("Service has been deleted %v", key) glog.Infof("Service has been deleted %v", key)
err, retryDelay = s.processServiceDeletion(key) err, retryDelay = s.processServiceDeletion(key)
} else { } else {
service, ok := obj.(*api.Service) service, ok := obj.(*v1.Service)
if ok { if ok {
cachedService = s.cache.getOrCreate(key) cachedService = s.cache.getOrCreate(key)
err, retryDelay = s.processServiceUpdate(cachedService, service, key) err, retryDelay = s.processServiceUpdate(cachedService, service, key)
@@ -778,14 +778,14 @@ func (s *ServiceController) processServiceDeletion(key string) (error, time.Dura
if !wantsLoadBalancer(service) { if !wantsLoadBalancer(service) {
return nil, doNotRetry return nil, doNotRetry
} }
s.eventRecorder.Event(service, api.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer") s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
err := s.balancer.EnsureLoadBalancerDeleted(s.clusterName, service) err := s.balancer.EnsureLoadBalancerDeleted(s.clusterName, service)
if err != nil { if err != nil {
message := "Error deleting load balancer (will retry): " + err.Error() message := "Error deleting load balancer (will retry): " + err.Error()
s.eventRecorder.Event(service, api.EventTypeWarning, "DeletingLoadBalancerFailed", message) s.eventRecorder.Event(service, v1.EventTypeWarning, "DeletingLoadBalancerFailed", message)
return err, cachedService.nextRetryDelay() return err, cachedService.nextRetryDelay()
} }
s.eventRecorder.Event(service, api.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer") s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer")
s.cache.delete(key) s.cache.delete(key)
cachedService.resetRetryDelay() cachedService.resetRetryDelay()

View File

@@ -20,69 +20,69 @@ import (
"reflect" "reflect"
"testing" "testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/api/testapi"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake" fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
) )
const region = "us-central" const region = "us-central"
func newService(name string, uid types.UID, serviceType api.ServiceType) *api.Service { func newService(name string, uid types.UID, serviceType v1.ServiceType) *v1.Service {
return &api.Service{ObjectMeta: api.ObjectMeta{Name: name, Namespace: "namespace", UID: uid, SelfLink: testapi.Default.SelfLink("services", name)}, Spec: api.ServiceSpec{Type: serviceType}} return &v1.Service{ObjectMeta: v1.ObjectMeta{Name: name, Namespace: "namespace", UID: uid, SelfLink: testapi.Default.SelfLink("services", name)}, Spec: v1.ServiceSpec{Type: serviceType}}
} }
func TestCreateExternalLoadBalancer(t *testing.T) { func TestCreateExternalLoadBalancer(t *testing.T) {
table := []struct { table := []struct {
service *api.Service service *v1.Service
expectErr bool expectErr bool
expectCreateAttempt bool expectCreateAttempt bool
}{ }{
{ {
service: &api.Service{ service: &v1.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "no-external-balancer", Name: "no-external-balancer",
Namespace: "default", Namespace: "default",
}, },
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Type: api.ServiceTypeClusterIP, Type: v1.ServiceTypeClusterIP,
}, },
}, },
expectErr: false, expectErr: false,
expectCreateAttempt: false, expectCreateAttempt: false,
}, },
{ {
service: &api.Service{ service: &v1.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "udp-service", Name: "udp-service",
Namespace: "default", Namespace: "default",
SelfLink: testapi.Default.SelfLink("services", "udp-service"), SelfLink: testapi.Default.SelfLink("services", "udp-service"),
}, },
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Ports: []api.ServicePort{{ Ports: []v1.ServicePort{{
Port: 80, Port: 80,
Protocol: api.ProtocolUDP, Protocol: v1.ProtocolUDP,
}}, }},
Type: api.ServiceTypeLoadBalancer, Type: v1.ServiceTypeLoadBalancer,
}, },
}, },
expectErr: false, expectErr: false,
expectCreateAttempt: true, expectCreateAttempt: true,
}, },
{ {
service: &api.Service{ service: &v1.Service{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "basic-service1", Name: "basic-service1",
Namespace: "default", Namespace: "default",
SelfLink: testapi.Default.SelfLink("services", "basic-service1"), SelfLink: testapi.Default.SelfLink("services", "basic-service1"),
}, },
Spec: api.ServiceSpec{ Spec: v1.ServiceSpec{
Ports: []api.ServicePort{{ Ports: []v1.ServicePort{{
Port: 80, Port: 80,
Protocol: api.ProtocolTCP, Protocol: v1.ProtocolTCP,
}}, }},
Type: api.ServiceTypeLoadBalancer, Type: v1.ServiceTypeLoadBalancer,
}, },
}, },
expectErr: false, expectErr: false,
@@ -147,65 +147,65 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
func TestUpdateNodesInExternalLoadBalancer(t *testing.T) { func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
hosts := []string{"node0", "node1", "node73"} hosts := []string{"node0", "node1", "node73"}
table := []struct { table := []struct {
services []*api.Service services []*v1.Service
expectedUpdateCalls []fakecloud.FakeUpdateBalancerCall expectedUpdateCalls []fakecloud.FakeUpdateBalancerCall
}{ }{
{ {
// No services present: no calls should be made. // No services present: no calls should be made.
services: []*api.Service{}, services: []*v1.Service{},
expectedUpdateCalls: nil, expectedUpdateCalls: nil,
}, },
{ {
// Services do not have external load balancers: no calls should be made. // Services do not have external load balancers: no calls should be made.
services: []*api.Service{ services: []*v1.Service{
newService("s0", "111", api.ServiceTypeClusterIP), newService("s0", "111", v1.ServiceTypeClusterIP),
newService("s1", "222", api.ServiceTypeNodePort), newService("s1", "222", v1.ServiceTypeNodePort),
}, },
expectedUpdateCalls: nil, expectedUpdateCalls: nil,
}, },
{ {
// Services does have an external load balancer: one call should be made. // Services does have an external load balancer: one call should be made.
services: []*api.Service{ services: []*v1.Service{
newService("s0", "333", api.ServiceTypeLoadBalancer), newService("s0", "333", v1.ServiceTypeLoadBalancer),
}, },
expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{ expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{
{newService("s0", "333", api.ServiceTypeLoadBalancer), hosts}, {newService("s0", "333", v1.ServiceTypeLoadBalancer), hosts},
}, },
}, },
{ {
// Three services have an external load balancer: three calls. // Three services have an external load balancer: three calls.
services: []*api.Service{ services: []*v1.Service{
newService("s0", "444", api.ServiceTypeLoadBalancer), newService("s0", "444", v1.ServiceTypeLoadBalancer),
newService("s1", "555", api.ServiceTypeLoadBalancer), newService("s1", "555", v1.ServiceTypeLoadBalancer),
newService("s2", "666", api.ServiceTypeLoadBalancer), newService("s2", "666", v1.ServiceTypeLoadBalancer),
}, },
expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{ expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{
{newService("s0", "444", api.ServiceTypeLoadBalancer), hosts}, {newService("s0", "444", v1.ServiceTypeLoadBalancer), hosts},
{newService("s1", "555", api.ServiceTypeLoadBalancer), hosts}, {newService("s1", "555", v1.ServiceTypeLoadBalancer), hosts},
{newService("s2", "666", api.ServiceTypeLoadBalancer), hosts}, {newService("s2", "666", v1.ServiceTypeLoadBalancer), hosts},
}, },
}, },
{ {
// Two services have an external load balancer and two don't: two calls. // Two services have an external load balancer and two don't: two calls.
services: []*api.Service{ services: []*v1.Service{
newService("s0", "777", api.ServiceTypeNodePort), newService("s0", "777", v1.ServiceTypeNodePort),
newService("s1", "888", api.ServiceTypeLoadBalancer), newService("s1", "888", v1.ServiceTypeLoadBalancer),
newService("s3", "999", api.ServiceTypeLoadBalancer), newService("s3", "999", v1.ServiceTypeLoadBalancer),
newService("s4", "123", api.ServiceTypeClusterIP), newService("s4", "123", v1.ServiceTypeClusterIP),
}, },
expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{ expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{
{newService("s1", "888", api.ServiceTypeLoadBalancer), hosts}, {newService("s1", "888", v1.ServiceTypeLoadBalancer), hosts},
{newService("s3", "999", api.ServiceTypeLoadBalancer), hosts}, {newService("s3", "999", v1.ServiceTypeLoadBalancer), hosts},
}, },
}, },
{ {
// One service has an external load balancer and one is nil: one call. // One service has an external load balancer and one is nil: one call.
services: []*api.Service{ services: []*v1.Service{
newService("s0", "234", api.ServiceTypeLoadBalancer), newService("s0", "234", v1.ServiceTypeLoadBalancer),
nil, nil,
}, },
expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{ expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{
{newService("s0", "234", api.ServiceTypeLoadBalancer), hosts}, {newService("s0", "234", v1.ServiceTypeLoadBalancer), hosts},
}, },
}, },
} }
@@ -218,7 +218,7 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
controller.init() controller.init()
cloud.Calls = nil // ignore any cloud calls made in init() cloud.Calls = nil // ignore any cloud calls made in init()
var services []*api.Service var services []*v1.Service
for _, service := range item.services { for _, service := range item.services {
services = append(services, service) services = append(services, service)
} }
@@ -233,43 +233,43 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
func TestHostsFromNodeList(t *testing.T) { func TestHostsFromNodeList(t *testing.T) {
tests := []struct { tests := []struct {
nodes *api.NodeList nodes *v1.NodeList
expectedHosts []string expectedHosts []string
}{ }{
{ {
nodes: &api.NodeList{}, nodes: &v1.NodeList{},
expectedHosts: []string{}, expectedHosts: []string{},
}, },
{ {
nodes: &api.NodeList{ nodes: &v1.NodeList{
Items: []api.Node{ Items: []v1.Node{
{ {
ObjectMeta: api.ObjectMeta{Name: "foo"}, ObjectMeta: v1.ObjectMeta{Name: "foo"},
Status: api.NodeStatus{Phase: api.NodeRunning}, Status: v1.NodeStatus{Phase: v1.NodeRunning},
}, },
{ {
ObjectMeta: api.ObjectMeta{Name: "bar"}, ObjectMeta: v1.ObjectMeta{Name: "bar"},
Status: api.NodeStatus{Phase: api.NodeRunning}, Status: v1.NodeStatus{Phase: v1.NodeRunning},
}, },
}, },
}, },
expectedHosts: []string{"foo", "bar"}, expectedHosts: []string{"foo", "bar"},
}, },
{ {
nodes: &api.NodeList{ nodes: &v1.NodeList{
Items: []api.Node{ Items: []v1.Node{
{ {
ObjectMeta: api.ObjectMeta{Name: "foo"}, ObjectMeta: v1.ObjectMeta{Name: "foo"},
Status: api.NodeStatus{Phase: api.NodeRunning}, Status: v1.NodeStatus{Phase: v1.NodeRunning},
}, },
{ {
ObjectMeta: api.ObjectMeta{Name: "bar"}, ObjectMeta: v1.ObjectMeta{Name: "bar"},
Status: api.NodeStatus{Phase: api.NodeRunning}, Status: v1.NodeStatus{Phase: v1.NodeRunning},
}, },
{ {
ObjectMeta: api.ObjectMeta{Name: "unschedulable"}, ObjectMeta: v1.ObjectMeta{Name: "unschedulable"},
Spec: api.NodeSpec{Unschedulable: true}, Spec: v1.NodeSpec{Unschedulable: true},
Status: api.NodeStatus{Phase: api.NodeRunning}, Status: v1.NodeStatus{Phase: v1.NodeRunning},
}, },
}, },
}, },
@@ -287,20 +287,20 @@ func TestHostsFromNodeList(t *testing.T) {
func TestGetNodeConditionPredicate(t *testing.T) { func TestGetNodeConditionPredicate(t *testing.T) {
tests := []struct { tests := []struct {
node api.Node node v1.Node
expectAccept bool expectAccept bool
name string name string
}{ }{
{ {
node: api.Node{}, node: v1.Node{},
expectAccept: false, expectAccept: false,
name: "empty", name: "empty",
}, },
{ {
node: api.Node{ node: v1.Node{
Status: api.NodeStatus{ Status: v1.NodeStatus{
Conditions: []api.NodeCondition{ Conditions: []v1.NodeCondition{
{Type: api.NodeReady, Status: api.ConditionTrue}, {Type: v1.NodeReady, Status: v1.ConditionTrue},
}, },
}, },
}, },
@@ -308,11 +308,11 @@ func TestGetNodeConditionPredicate(t *testing.T) {
name: "basic", name: "basic",
}, },
{ {
node: api.Node{ node: v1.Node{
Spec: api.NodeSpec{Unschedulable: true}, Spec: v1.NodeSpec{Unschedulable: true},
Status: api.NodeStatus{ Status: v1.NodeStatus{
Conditions: []api.NodeCondition{ Conditions: []v1.NodeCondition{
{Type: api.NodeReady, Status: api.ConditionTrue}, {Type: v1.NodeReady, Status: v1.ConditionTrue},
}, },
}, },
}, },

View File

@@ -21,11 +21,11 @@ import (
"time" "time"
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
apierrs "k8s.io/kubernetes/pkg/api/errors" apierrs "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/meta" "k8s.io/kubernetes/pkg/api/meta"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/controller/informers"
utilerrors "k8s.io/kubernetes/pkg/util/errors" utilerrors "k8s.io/kubernetes/pkg/util/errors"
"k8s.io/kubernetes/pkg/util/metrics" "k8s.io/kubernetes/pkg/util/metrics"
@@ -46,7 +46,7 @@ func nameIndexFunc(obj interface{}) ([]string, error) {
// ServiceAccountsControllerOptions contains options for running a ServiceAccountsController // ServiceAccountsControllerOptions contains options for running a ServiceAccountsController
type ServiceAccountsControllerOptions struct { type ServiceAccountsControllerOptions struct {
// ServiceAccounts is the list of service accounts to ensure exist in every namespace // ServiceAccounts is the list of service accounts to ensure exist in every namespace
ServiceAccounts []api.ServiceAccount ServiceAccounts []v1.ServiceAccount
// ServiceAccountResync is the interval between full resyncs of ServiceAccounts. // ServiceAccountResync is the interval between full resyncs of ServiceAccounts.
// If non-zero, all service accounts will be re-listed this often. // If non-zero, all service accounts will be re-listed this often.
@@ -61,8 +61,8 @@ type ServiceAccountsControllerOptions struct {
func DefaultServiceAccountsControllerOptions() ServiceAccountsControllerOptions { func DefaultServiceAccountsControllerOptions() ServiceAccountsControllerOptions {
return ServiceAccountsControllerOptions{ return ServiceAccountsControllerOptions{
ServiceAccounts: []api.ServiceAccount{ ServiceAccounts: []v1.ServiceAccount{
{ObjectMeta: api.ObjectMeta{Name: "default"}}, {ObjectMeta: v1.ObjectMeta{Name: "default"}},
}, },
} }
} }
@@ -99,7 +99,7 @@ func NewServiceAccountsController(saInformer informers.ServiceAccountInformer, n
// ServiceAccountsController manages ServiceAccount objects inside Namespaces // ServiceAccountsController manages ServiceAccount objects inside Namespaces
type ServiceAccountsController struct { type ServiceAccountsController struct {
client clientset.Interface client clientset.Interface
serviceAccountsToEnsure []api.ServiceAccount serviceAccountsToEnsure []v1.ServiceAccount
// To allow injection for testing. // To allow injection for testing.
syncHandler func(key string) error syncHandler func(key string) error
@@ -133,14 +133,14 @@ func (c *ServiceAccountsController) Run(workers int, stopCh <-chan struct{}) {
// serviceAccountDeleted reacts to a ServiceAccount deletion by recreating a default ServiceAccount in the namespace if needed // serviceAccountDeleted reacts to a ServiceAccount deletion by recreating a default ServiceAccount in the namespace if needed
func (c *ServiceAccountsController) serviceAccountDeleted(obj interface{}) { func (c *ServiceAccountsController) serviceAccountDeleted(obj interface{}) {
sa, ok := obj.(*api.ServiceAccount) sa, ok := obj.(*v1.ServiceAccount)
if !ok { if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown) tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok { if !ok {
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return return
} }
sa, ok = tombstone.Obj.(*api.ServiceAccount) sa, ok = tombstone.Obj.(*v1.ServiceAccount)
if !ok { if !ok {
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a ServiceAccount %#v", obj)) utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a ServiceAccount %#v", obj))
return return
@@ -151,13 +151,13 @@ func (c *ServiceAccountsController) serviceAccountDeleted(obj interface{}) {
// namespaceAdded reacts to a Namespace creation by creating a default ServiceAccount object // namespaceAdded reacts to a Namespace creation by creating a default ServiceAccount object
func (c *ServiceAccountsController) namespaceAdded(obj interface{}) { func (c *ServiceAccountsController) namespaceAdded(obj interface{}) {
namespace := obj.(*api.Namespace) namespace := obj.(*v1.Namespace)
c.queue.Add(namespace.Name) c.queue.Add(namespace.Name)
} }
// namespaceUpdated reacts to a Namespace update (or re-list) by creating a default ServiceAccount in the namespace if needed // namespaceUpdated reacts to a Namespace update (or re-list) by creating a default ServiceAccount in the namespace if needed
func (c *ServiceAccountsController) namespaceUpdated(oldObj interface{}, newObj interface{}) { func (c *ServiceAccountsController) namespaceUpdated(oldObj interface{}, newObj interface{}) {
newNamespace := newObj.(*api.Namespace) newNamespace := newObj.(*v1.Namespace)
c.queue.Add(newNamespace.Name) c.queue.Add(newNamespace.Name)
} }
@@ -198,7 +198,7 @@ func (c *ServiceAccountsController) syncNamespace(key string) error {
if err != nil { if err != nil {
return err return err
} }
if ns.Status.Phase != api.NamespaceActive { if ns.Status.Phase != v1.NamespaceActive {
// If namespace is not active, we shouldn't try to create anything // If namespace is not active, we shouldn't try to create anything
return nil return nil
} }

View File

@@ -20,9 +20,9 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/controller" "k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/controller/informers"
@@ -35,39 +35,39 @@ type serverResponse struct {
} }
func TestServiceAccountCreation(t *testing.T) { func TestServiceAccountCreation(t *testing.T) {
ns := api.NamespaceDefault ns := v1.NamespaceDefault
defaultName := "default" defaultName := "default"
managedName := "managed" managedName := "managed"
activeNS := &api.Namespace{ activeNS := &v1.Namespace{
ObjectMeta: api.ObjectMeta{Name: ns}, ObjectMeta: v1.ObjectMeta{Name: ns},
Status: api.NamespaceStatus{ Status: v1.NamespaceStatus{
Phase: api.NamespaceActive, Phase: v1.NamespaceActive,
}, },
} }
terminatingNS := &api.Namespace{ terminatingNS := &v1.Namespace{
ObjectMeta: api.ObjectMeta{Name: ns}, ObjectMeta: v1.ObjectMeta{Name: ns},
Status: api.NamespaceStatus{ Status: v1.NamespaceStatus{
Phase: api.NamespaceTerminating, Phase: v1.NamespaceTerminating,
}, },
} }
defaultServiceAccount := &api.ServiceAccount{ defaultServiceAccount := &v1.ServiceAccount{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: defaultName, Name: defaultName,
Namespace: ns, Namespace: ns,
ResourceVersion: "1", ResourceVersion: "1",
}, },
} }
managedServiceAccount := &api.ServiceAccount{ managedServiceAccount := &v1.ServiceAccount{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: managedName, Name: managedName,
Namespace: ns, Namespace: ns,
ResourceVersion: "1", ResourceVersion: "1",
}, },
} }
unmanagedServiceAccount := &api.ServiceAccount{ unmanagedServiceAccount := &v1.ServiceAccount{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "other-unmanaged", Name: "other-unmanaged",
Namespace: ns, Namespace: ns,
ResourceVersion: "1", ResourceVersion: "1",
@@ -75,54 +75,54 @@ func TestServiceAccountCreation(t *testing.T) {
} }
testcases := map[string]struct { testcases := map[string]struct {
ExistingNamespace *api.Namespace ExistingNamespace *v1.Namespace
ExistingServiceAccounts []*api.ServiceAccount ExistingServiceAccounts []*v1.ServiceAccount
AddedNamespace *api.Namespace AddedNamespace *v1.Namespace
UpdatedNamespace *api.Namespace UpdatedNamespace *v1.Namespace
DeletedServiceAccount *api.ServiceAccount DeletedServiceAccount *v1.ServiceAccount
ExpectCreatedServiceAccounts []string ExpectCreatedServiceAccounts []string
}{ }{
"new active namespace missing serviceaccounts": { "new active namespace missing serviceaccounts": {
ExistingServiceAccounts: []*api.ServiceAccount{}, ExistingServiceAccounts: []*v1.ServiceAccount{},
AddedNamespace: activeNS, AddedNamespace: activeNS,
ExpectCreatedServiceAccounts: sets.NewString(defaultName, managedName).List(), ExpectCreatedServiceAccounts: sets.NewString(defaultName, managedName).List(),
}, },
"new active namespace missing serviceaccount": { "new active namespace missing serviceaccount": {
ExistingServiceAccounts: []*api.ServiceAccount{managedServiceAccount}, ExistingServiceAccounts: []*v1.ServiceAccount{managedServiceAccount},
AddedNamespace: activeNS, AddedNamespace: activeNS,
ExpectCreatedServiceAccounts: []string{defaultName}, ExpectCreatedServiceAccounts: []string{defaultName},
}, },
"new active namespace with serviceaccounts": { "new active namespace with serviceaccounts": {
ExistingServiceAccounts: []*api.ServiceAccount{defaultServiceAccount, managedServiceAccount}, ExistingServiceAccounts: []*v1.ServiceAccount{defaultServiceAccount, managedServiceAccount},
AddedNamespace: activeNS, AddedNamespace: activeNS,
ExpectCreatedServiceAccounts: []string{}, ExpectCreatedServiceAccounts: []string{},
}, },
"new terminating namespace": { "new terminating namespace": {
ExistingServiceAccounts: []*api.ServiceAccount{}, ExistingServiceAccounts: []*v1.ServiceAccount{},
AddedNamespace: terminatingNS, AddedNamespace: terminatingNS,
ExpectCreatedServiceAccounts: []string{}, ExpectCreatedServiceAccounts: []string{},
}, },
"updated active namespace missing serviceaccounts": { "updated active namespace missing serviceaccounts": {
ExistingServiceAccounts: []*api.ServiceAccount{}, ExistingServiceAccounts: []*v1.ServiceAccount{},
UpdatedNamespace: activeNS, UpdatedNamespace: activeNS,
ExpectCreatedServiceAccounts: sets.NewString(defaultName, managedName).List(), ExpectCreatedServiceAccounts: sets.NewString(defaultName, managedName).List(),
}, },
"updated active namespace missing serviceaccount": { "updated active namespace missing serviceaccount": {
ExistingServiceAccounts: []*api.ServiceAccount{defaultServiceAccount}, ExistingServiceAccounts: []*v1.ServiceAccount{defaultServiceAccount},
UpdatedNamespace: activeNS, UpdatedNamespace: activeNS,
ExpectCreatedServiceAccounts: []string{managedName}, ExpectCreatedServiceAccounts: []string{managedName},
}, },
"updated active namespace with serviceaccounts": { "updated active namespace with serviceaccounts": {
ExistingServiceAccounts: []*api.ServiceAccount{defaultServiceAccount, managedServiceAccount}, ExistingServiceAccounts: []*v1.ServiceAccount{defaultServiceAccount, managedServiceAccount},
UpdatedNamespace: activeNS, UpdatedNamespace: activeNS,
ExpectCreatedServiceAccounts: []string{}, ExpectCreatedServiceAccounts: []string{},
}, },
"updated terminating namespace": { "updated terminating namespace": {
ExistingServiceAccounts: []*api.ServiceAccount{}, ExistingServiceAccounts: []*v1.ServiceAccount{},
UpdatedNamespace: terminatingNS, UpdatedNamespace: terminatingNS,
ExpectCreatedServiceAccounts: []string{}, ExpectCreatedServiceAccounts: []string{},
}, },
@@ -132,7 +132,7 @@ func TestServiceAccountCreation(t *testing.T) {
ExpectCreatedServiceAccounts: []string{}, ExpectCreatedServiceAccounts: []string{},
}, },
"deleted serviceaccount with active namespace": { "deleted serviceaccount with active namespace": {
ExistingServiceAccounts: []*api.ServiceAccount{managedServiceAccount}, ExistingServiceAccounts: []*v1.ServiceAccount{managedServiceAccount},
ExistingNamespace: activeNS, ExistingNamespace: activeNS,
DeletedServiceAccount: defaultServiceAccount, DeletedServiceAccount: defaultServiceAccount,
ExpectCreatedServiceAccounts: []string{defaultName}, ExpectCreatedServiceAccounts: []string{defaultName},
@@ -143,7 +143,7 @@ func TestServiceAccountCreation(t *testing.T) {
ExpectCreatedServiceAccounts: []string{}, ExpectCreatedServiceAccounts: []string{},
}, },
"deleted unmanaged serviceaccount with active namespace": { "deleted unmanaged serviceaccount with active namespace": {
ExistingServiceAccounts: []*api.ServiceAccount{defaultServiceAccount, managedServiceAccount}, ExistingServiceAccounts: []*v1.ServiceAccount{defaultServiceAccount, managedServiceAccount},
ExistingNamespace: activeNS, ExistingNamespace: activeNS,
DeletedServiceAccount: unmanagedServiceAccount, DeletedServiceAccount: unmanagedServiceAccount,
ExpectCreatedServiceAccounts: []string{}, ExpectCreatedServiceAccounts: []string{},
@@ -157,11 +157,11 @@ func TestServiceAccountCreation(t *testing.T) {
for k, tc := range testcases { for k, tc := range testcases {
client := fake.NewSimpleClientset(defaultServiceAccount, managedServiceAccount) client := fake.NewSimpleClientset(defaultServiceAccount, managedServiceAccount)
informers := informers.NewSharedInformerFactory(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc()) informers := informers.NewSharedInformerFactory(fake.NewSimpleClientset(), nil, controller.NoResyncPeriodFunc())
options := DefaultServiceAccountsControllerOptions() options := DefaultServiceAccountsControllerOptions()
options.ServiceAccounts = []api.ServiceAccount{ options.ServiceAccounts = []v1.ServiceAccount{
{ObjectMeta: api.ObjectMeta{Name: defaultName}}, {ObjectMeta: v1.ObjectMeta{Name: defaultName}},
{ObjectMeta: api.ObjectMeta{Name: managedName}}, {ObjectMeta: v1.ObjectMeta{Name: managedName}},
} }
controller := NewServiceAccountsController(informers.ServiceAccounts(), informers.Namespaces(), client, options) controller := NewServiceAccountsController(informers.ServiceAccounts(), informers.Namespaces(), client, options)
controller.saLister = &cache.StoreToServiceAccountLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})} controller.saLister = &cache.StoreToServiceAccountLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
@@ -220,7 +220,7 @@ func TestServiceAccountCreation(t *testing.T) {
t.Errorf("%s: Unexpected action %s", k, action) t.Errorf("%s: Unexpected action %s", k, action)
break break
} }
createdAccount := action.(core.CreateAction).GetObject().(*api.ServiceAccount) createdAccount := action.(core.CreateAction).GetObject().(*v1.ServiceAccount)
if createdAccount.Name != expectedName { if createdAccount.Name != expectedName {
t.Errorf("%s: Expected %s to be created, got %s", k, expectedName, createdAccount.Name) t.Errorf("%s: Expected %s to be created, got %s", k, expectedName, createdAccount.Name)
} }

View File

@@ -18,7 +18,8 @@ package serviceaccount
import ( import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/api/v1"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/registry/core/secret" "k8s.io/kubernetes/pkg/registry/core/secret"
secretetcd "k8s.io/kubernetes/pkg/registry/core/secret/etcd" secretetcd "k8s.io/kubernetes/pkg/registry/core/secret/etcd"
serviceaccountregistry "k8s.io/kubernetes/pkg/registry/core/serviceaccount" serviceaccountregistry "k8s.io/kubernetes/pkg/registry/core/serviceaccount"
@@ -40,10 +41,10 @@ type clientGetter struct {
func NewGetterFromClient(c clientset.Interface) serviceaccount.ServiceAccountTokenGetter { func NewGetterFromClient(c clientset.Interface) serviceaccount.ServiceAccountTokenGetter {
return clientGetter{c} return clientGetter{c}
} }
func (c clientGetter) GetServiceAccount(namespace, name string) (*api.ServiceAccount, error) { func (c clientGetter) GetServiceAccount(namespace, name string) (*v1.ServiceAccount, error) {
return c.client.Core().ServiceAccounts(namespace).Get(name) return c.client.Core().ServiceAccounts(namespace).Get(name)
} }
func (c clientGetter) GetSecret(namespace, name string) (*api.Secret, error) { func (c clientGetter) GetSecret(namespace, name string) (*v1.Secret, error) {
return c.client.Core().Secrets(namespace).Get(name) return c.client.Core().Secrets(namespace).Get(name)
} }
@@ -58,13 +59,27 @@ type registryGetter struct {
func NewGetterFromRegistries(serviceAccounts serviceaccountregistry.Registry, secrets secret.Registry) serviceaccount.ServiceAccountTokenGetter { func NewGetterFromRegistries(serviceAccounts serviceaccountregistry.Registry, secrets secret.Registry) serviceaccount.ServiceAccountTokenGetter {
return &registryGetter{serviceAccounts, secrets} return &registryGetter{serviceAccounts, secrets}
} }
func (r *registryGetter) GetServiceAccount(namespace, name string) (*api.ServiceAccount, error) { func (r *registryGetter) GetServiceAccount(namespace, name string) (*v1.ServiceAccount, error) {
ctx := api.WithNamespace(api.NewContext(), namespace) ctx := api.WithNamespace(api.NewContext(), namespace)
return r.serviceAccounts.GetServiceAccount(ctx, name) internalServiceAccount, err := r.serviceAccounts.GetServiceAccount(ctx, name)
if err != nil {
return nil, err
}
v1ServiceAccount := v1.ServiceAccount{}
err = v1.Convert_api_ServiceAccount_To_v1_ServiceAccount(internalServiceAccount, &v1ServiceAccount, nil)
return &v1ServiceAccount, err
} }
func (r *registryGetter) GetSecret(namespace, name string) (*api.Secret, error) { func (r *registryGetter) GetSecret(namespace, name string) (*v1.Secret, error) {
ctx := api.WithNamespace(api.NewContext(), namespace) ctx := api.WithNamespace(api.NewContext(), namespace)
return r.secrets.GetSecret(ctx, name) internalSecret, err := r.secrets.GetSecret(ctx, name)
if err != nil {
return nil, err
}
v1Secret := v1.Secret{}
err = v1.Convert_api_Secret_To_v1_Secret(internalSecret, &v1Secret, nil)
return &v1Secret, err
} }
// NewGetterFromStorageInterface returns a ServiceAccountTokenGetter that // NewGetterFromStorageInterface returns a ServiceAccountTokenGetter that

View File

@@ -24,8 +24,9 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors" apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
clientretry "k8s.io/kubernetes/pkg/client/retry" clientretry "k8s.io/kubernetes/pkg/client/retry"
"k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/fields"
"k8s.io/kubernetes/pkg/registry/core/secret" "k8s.io/kubernetes/pkg/registry/core/secret"
@@ -91,14 +92,14 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
e.serviceAccounts, e.serviceAccountController = cache.NewInformer( e.serviceAccounts, e.serviceAccountController = cache.NewInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options) return e.client.Core().ServiceAccounts(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return e.client.Core().ServiceAccounts(api.NamespaceAll).Watch(options) return e.client.Core().ServiceAccounts(v1.NamespaceAll).Watch(options)
}, },
}, },
&api.ServiceAccount{}, &v1.ServiceAccount{},
options.ServiceAccountResync, options.ServiceAccountResync,
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: e.queueServiceAccountSync, AddFunc: e.queueServiceAccountSync,
@@ -107,19 +108,19 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
}, },
) )
tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)}) tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(v1.SecretTypeServiceAccountToken)})
e.secrets, e.secretController = cache.NewIndexerInformer( e.secrets, e.secretController = cache.NewIndexerInformer(
&cache.ListWatch{ &cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) { ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
options.FieldSelector = tokenSelector options.FieldSelector = tokenSelector.String()
return e.client.Core().Secrets(api.NamespaceAll).List(options) return e.client.Core().Secrets(v1.NamespaceAll).List(options)
}, },
WatchFunc: func(options api.ListOptions) (watch.Interface, error) { WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
options.FieldSelector = tokenSelector options.FieldSelector = tokenSelector.String()
return e.client.Core().Secrets(api.NamespaceAll).Watch(options) return e.client.Core().Secrets(v1.NamespaceAll).Watch(options)
}, },
}, },
&api.Secret{}, &v1.Secret{},
options.SecretResync, options.SecretResync,
cache.ResourceEventHandlerFuncs{ cache.ResourceEventHandlerFuncs{
AddFunc: e.queueSecretSync, AddFunc: e.queueSecretSync,
@@ -190,13 +191,13 @@ func (e *TokensController) Run(workers int, stopCh <-chan struct{}) {
} }
func (e *TokensController) queueServiceAccountSync(obj interface{}) { func (e *TokensController) queueServiceAccountSync(obj interface{}) {
if serviceAccount, ok := obj.(*api.ServiceAccount); ok { if serviceAccount, ok := obj.(*v1.ServiceAccount); ok {
e.syncServiceAccountQueue.Add(makeServiceAccountKey(serviceAccount)) e.syncServiceAccountQueue.Add(makeServiceAccountKey(serviceAccount))
} }
} }
func (e *TokensController) queueServiceAccountUpdateSync(oldObj interface{}, newObj interface{}) { func (e *TokensController) queueServiceAccountUpdateSync(oldObj interface{}, newObj interface{}) {
if serviceAccount, ok := newObj.(*api.ServiceAccount); ok { if serviceAccount, ok := newObj.(*v1.ServiceAccount); ok {
e.syncServiceAccountQueue.Add(makeServiceAccountKey(serviceAccount)) e.syncServiceAccountQueue.Add(makeServiceAccountKey(serviceAccount))
} }
} }
@@ -219,13 +220,13 @@ func (e *TokensController) retryOrForget(queue workqueue.RateLimitingInterface,
} }
func (e *TokensController) queueSecretSync(obj interface{}) { func (e *TokensController) queueSecretSync(obj interface{}) {
if secret, ok := obj.(*api.Secret); ok { if secret, ok := obj.(*v1.Secret); ok {
e.syncSecretQueue.Add(makeSecretQueueKey(secret)) e.syncSecretQueue.Add(makeSecretQueueKey(secret))
} }
} }
func (e *TokensController) queueSecretUpdateSync(oldObj interface{}, newObj interface{}) { func (e *TokensController) queueSecretUpdateSync(oldObj interface{}, newObj interface{}) {
if secret, ok := newObj.(*api.Secret); ok { if secret, ok := newObj.(*v1.Secret); ok {
e.syncSecretQueue.Add(makeSecretQueueKey(secret)) e.syncSecretQueue.Add(makeSecretQueueKey(secret))
} }
} }
@@ -256,7 +257,7 @@ func (e *TokensController) syncServiceAccount() {
case sa == nil: case sa == nil:
// service account no longer exists, so delete related tokens // service account no longer exists, so delete related tokens
glog.V(4).Infof("syncServiceAccount(%s/%s), service account deleted, removing tokens", saInfo.namespace, saInfo.name) glog.V(4).Infof("syncServiceAccount(%s/%s), service account deleted, removing tokens", saInfo.namespace, saInfo.name)
sa = &api.ServiceAccount{ObjectMeta: api.ObjectMeta{Namespace: saInfo.namespace, Name: saInfo.name, UID: saInfo.uid}} sa = &v1.ServiceAccount{ObjectMeta: v1.ObjectMeta{Namespace: saInfo.namespace, Name: saInfo.name, UID: saInfo.uid}}
if retriable, err := e.deleteTokens(sa); err != nil { if retriable, err := e.deleteTokens(sa); err != nil {
glog.Errorf("error deleting serviceaccount tokens for %s/%s: %v", saInfo.namespace, saInfo.name, err) glog.Errorf("error deleting serviceaccount tokens for %s/%s: %v", saInfo.namespace, saInfo.name, err)
retry = retriable retry = retriable
@@ -328,7 +329,7 @@ func (e *TokensController) syncSecret() {
} }
} }
func (e *TokensController) deleteTokens(serviceAccount *api.ServiceAccount) ( /*retry*/ bool, error) { func (e *TokensController) deleteTokens(serviceAccount *v1.ServiceAccount) ( /*retry*/ bool, error) {
tokens, err := e.listTokenSecrets(serviceAccount) tokens, err := e.listTokenSecrets(serviceAccount)
if err != nil { if err != nil {
// don't retry on cache lookup errors // don't retry on cache lookup errors
@@ -349,9 +350,9 @@ func (e *TokensController) deleteTokens(serviceAccount *api.ServiceAccount) ( /*
} }
func (e *TokensController) deleteToken(ns, name string, uid types.UID) ( /*retry*/ bool, error) { func (e *TokensController) deleteToken(ns, name string, uid types.UID) ( /*retry*/ bool, error) {
var opts *api.DeleteOptions var opts *v1.DeleteOptions
if len(uid) > 0 { if len(uid) > 0 {
opts = &api.DeleteOptions{Preconditions: &api.Preconditions{UID: &uid}} opts = &v1.DeleteOptions{Preconditions: &v1.Preconditions{UID: &uid}}
} }
err := e.client.Core().Secrets(ns).Delete(name, opts) err := e.client.Core().Secrets(ns).Delete(name, opts)
// NotFound doesn't need a retry (it's already been deleted) // NotFound doesn't need a retry (it's already been deleted)
@@ -364,7 +365,7 @@ func (e *TokensController) deleteToken(ns, name string, uid types.UID) ( /*retry
} }
// ensureReferencedToken makes sure at least one ServiceAccountToken secret exists, and is included in the serviceAccount's Secrets list // ensureReferencedToken makes sure at least one ServiceAccountToken secret exists, and is included in the serviceAccount's Secrets list
func (e *TokensController) ensureReferencedToken(serviceAccount *api.ServiceAccount) ( /* retry */ bool, error) { func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccount) ( /* retry */ bool, error) {
if len(serviceAccount.Secrets) > 0 { if len(serviceAccount.Secrets) > 0 {
allSecrets, err := e.listTokenSecrets(serviceAccount) allSecrets, err := e.listTokenSecrets(serviceAccount)
if err != nil { if err != nil {
@@ -396,16 +397,16 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *api.ServiceAcco
} }
// Build the secret // Build the secret
secret := &api.Secret{ secret := &v1.Secret{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: secret.Strategy.GenerateName(fmt.Sprintf("%s-token-", serviceAccount.Name)), Name: secret.Strategy.GenerateName(fmt.Sprintf("%s-token-", serviceAccount.Name)),
Namespace: serviceAccount.Namespace, Namespace: serviceAccount.Namespace,
Annotations: map[string]string{ Annotations: map[string]string{
api.ServiceAccountNameKey: serviceAccount.Name, v1.ServiceAccountNameKey: serviceAccount.Name,
api.ServiceAccountUIDKey: string(serviceAccount.UID), v1.ServiceAccountUIDKey: string(serviceAccount.UID),
}, },
}, },
Type: api.SecretTypeServiceAccountToken, Type: v1.SecretTypeServiceAccountToken,
Data: map[string][]byte{}, Data: map[string][]byte{},
} }
@@ -415,10 +416,10 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *api.ServiceAcco
// retriable error // retriable error
return true, err return true, err
} }
secret.Data[api.ServiceAccountTokenKey] = []byte(token) secret.Data[v1.ServiceAccountTokenKey] = []byte(token)
secret.Data[api.ServiceAccountNamespaceKey] = []byte(serviceAccount.Namespace) secret.Data[v1.ServiceAccountNamespaceKey] = []byte(serviceAccount.Namespace)
if e.rootCA != nil && len(e.rootCA) > 0 { if e.rootCA != nil && len(e.rootCA) > 0 {
secret.Data[api.ServiceAccountRootCAKey] = e.rootCA secret.Data[v1.ServiceAccountRootCAKey] = e.rootCA
} }
// Save the secret // Save the secret
@@ -431,12 +432,12 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *api.ServiceAcco
// This prevents the service account update (below) triggering another token creation, if the referenced token couldn't be found in the store // This prevents the service account update (below) triggering another token creation, if the referenced token couldn't be found in the store
e.secrets.Add(createdToken) e.secrets.Add(createdToken)
liveServiceAccount.Secrets = append(liveServiceAccount.Secrets, api.ObjectReference{Name: secret.Name}) liveServiceAccount.Secrets = append(liveServiceAccount.Secrets, v1.ObjectReference{Name: secret.Name})
if _, err = serviceAccounts.Update(liveServiceAccount); err != nil { if _, err = serviceAccounts.Update(liveServiceAccount); err != nil {
// we weren't able to use the token, try to clean it up. // we weren't able to use the token, try to clean it up.
glog.V(2).Infof("deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err) glog.V(2).Infof("deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err)
deleteOpts := &api.DeleteOptions{Preconditions: &api.Preconditions{UID: &createdToken.UID}} deleteOpts := &v1.DeleteOptions{Preconditions: &v1.Preconditions{UID: &createdToken.UID}}
if deleteErr := e.client.Core().Secrets(createdToken.Namespace).Delete(createdToken.Name, deleteOpts); deleteErr != nil { if deleteErr := e.client.Core().Secrets(createdToken.Namespace).Delete(createdToken.Name, deleteOpts); deleteErr != nil {
glog.Error(deleteErr) // if we fail, just log it glog.Error(deleteErr) // if we fail, just log it
} }
@@ -454,20 +455,20 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *api.ServiceAcco
return false, nil return false, nil
} }
func (e *TokensController) secretUpdateNeeded(secret *api.Secret) (bool, bool, bool) { func (e *TokensController) secretUpdateNeeded(secret *v1.Secret) (bool, bool, bool) {
caData := secret.Data[api.ServiceAccountRootCAKey] caData := secret.Data[v1.ServiceAccountRootCAKey]
needsCA := len(e.rootCA) > 0 && bytes.Compare(caData, e.rootCA) != 0 needsCA := len(e.rootCA) > 0 && bytes.Compare(caData, e.rootCA) != 0
needsNamespace := len(secret.Data[api.ServiceAccountNamespaceKey]) == 0 needsNamespace := len(secret.Data[v1.ServiceAccountNamespaceKey]) == 0
tokenData := secret.Data[api.ServiceAccountTokenKey] tokenData := secret.Data[v1.ServiceAccountTokenKey]
needsToken := len(tokenData) == 0 needsToken := len(tokenData) == 0
return needsCA, needsNamespace, needsToken return needsCA, needsNamespace, needsToken
} }
// generateTokenIfNeeded populates the token data for the given Secret if not already set // generateTokenIfNeeded populates the token data for the given Secret if not already set
func (e *TokensController) generateTokenIfNeeded(serviceAccount *api.ServiceAccount, cachedSecret *api.Secret) ( /* retry */ bool, error) { func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccount, cachedSecret *v1.Secret) ( /* retry */ bool, error) {
// Check the cached secret to see if changes are needed // Check the cached secret to see if changes are needed
if needsCA, needsNamespace, needsToken := e.secretUpdateNeeded(cachedSecret); !needsCA && !needsToken && !needsNamespace { if needsCA, needsNamespace, needsToken := e.secretUpdateNeeded(cachedSecret); !needsCA && !needsToken && !needsNamespace {
return false, nil return false, nil
@@ -502,11 +503,11 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *api.ServiceAcco
// Set the CA // Set the CA
if needsCA { if needsCA {
liveSecret.Data[api.ServiceAccountRootCAKey] = e.rootCA liveSecret.Data[v1.ServiceAccountRootCAKey] = e.rootCA
} }
// Set the namespace // Set the namespace
if needsNamespace { if needsNamespace {
liveSecret.Data[api.ServiceAccountNamespaceKey] = []byte(liveSecret.Namespace) liveSecret.Data[v1.ServiceAccountNamespaceKey] = []byte(liveSecret.Namespace)
} }
// Generate the token // Generate the token
@@ -515,12 +516,12 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *api.ServiceAcco
if err != nil { if err != nil {
return false, err return false, err
} }
liveSecret.Data[api.ServiceAccountTokenKey] = []byte(token) liveSecret.Data[v1.ServiceAccountTokenKey] = []byte(token)
} }
// Set annotations // Set annotations
liveSecret.Annotations[api.ServiceAccountNameKey] = serviceAccount.Name liveSecret.Annotations[v1.ServiceAccountNameKey] = serviceAccount.Name
liveSecret.Annotations[api.ServiceAccountUIDKey] = string(serviceAccount.UID) liveSecret.Annotations[v1.ServiceAccountUIDKey] = string(serviceAccount.UID)
// Save the secret // Save the secret
_, err = secrets.Update(liveSecret) _, err = secrets.Update(liveSecret)
@@ -560,7 +561,7 @@ func (e *TokensController) removeSecretReference(saNamespace string, saName stri
} }
// Remove the secret // Remove the secret
secrets := []api.ObjectReference{} secrets := []v1.ObjectReference{}
for _, s := range serviceAccount.Secrets { for _, s := range serviceAccount.Secrets {
if s.Name != secretName { if s.Name != secretName {
secrets = append(secrets, s) secrets = append(secrets, s)
@@ -575,16 +576,16 @@ func (e *TokensController) removeSecretReference(saNamespace string, saName stri
return err return err
} }
func (e *TokensController) getServiceAccount(ns string, name string, uid types.UID, fetchOnCacheMiss bool) (*api.ServiceAccount, error) { func (e *TokensController) getServiceAccount(ns string, name string, uid types.UID, fetchOnCacheMiss bool) (*v1.ServiceAccount, error) {
// Look up in cache // Look up in cache
obj, exists, err := e.serviceAccounts.GetByKey(makeCacheKey(ns, name)) obj, exists, err := e.serviceAccounts.GetByKey(makeCacheKey(ns, name))
if err != nil { if err != nil {
return nil, err return nil, err
} }
if exists { if exists {
sa, ok := obj.(*api.ServiceAccount) sa, ok := obj.(*v1.ServiceAccount)
if !ok { if !ok {
return nil, fmt.Errorf("expected *api.ServiceAccount, got %#v", sa) return nil, fmt.Errorf("expected *v1.ServiceAccount, got %#v", sa)
} }
// Ensure UID matches if given // Ensure UID matches if given
if len(uid) == 0 || uid == sa.UID { if len(uid) == 0 || uid == sa.UID {
@@ -611,16 +612,16 @@ func (e *TokensController) getServiceAccount(ns string, name string, uid types.U
return nil, nil return nil, nil
} }
func (e *TokensController) getSecret(ns string, name string, uid types.UID, fetchOnCacheMiss bool) (*api.Secret, error) { func (e *TokensController) getSecret(ns string, name string, uid types.UID, fetchOnCacheMiss bool) (*v1.Secret, error) {
// Look up in cache // Look up in cache
obj, exists, err := e.secrets.GetByKey(makeCacheKey(ns, name)) obj, exists, err := e.secrets.GetByKey(makeCacheKey(ns, name))
if err != nil { if err != nil {
return nil, err return nil, err
} }
if exists { if exists {
secret, ok := obj.(*api.Secret) secret, ok := obj.(*v1.Secret)
if !ok { if !ok {
return nil, fmt.Errorf("expected *api.Secret, got %#v", secret) return nil, fmt.Errorf("expected *v1.Secret, got %#v", secret)
} }
// Ensure UID matches if given // Ensure UID matches if given
if len(uid) == 0 || uid == secret.UID { if len(uid) == 0 || uid == secret.UID {
@@ -649,15 +650,15 @@ func (e *TokensController) getSecret(ns string, name string, uid types.UID, fetc
// listTokenSecrets returns a list of all of the ServiceAccountToken secrets that // listTokenSecrets returns a list of all of the ServiceAccountToken secrets that
// reference the given service account's name and uid // reference the given service account's name and uid
func (e *TokensController) listTokenSecrets(serviceAccount *api.ServiceAccount) ([]*api.Secret, error) { func (e *TokensController) listTokenSecrets(serviceAccount *v1.ServiceAccount) ([]*v1.Secret, error) {
namespaceSecrets, err := e.secrets.ByIndex("namespace", serviceAccount.Namespace) namespaceSecrets, err := e.secrets.ByIndex("namespace", serviceAccount.Namespace)
if err != nil { if err != nil {
return nil, err return nil, err
} }
items := []*api.Secret{} items := []*v1.Secret{}
for _, obj := range namespaceSecrets { for _, obj := range namespaceSecrets {
secret := obj.(*api.Secret) secret := obj.(*v1.Secret)
if serviceaccount.IsServiceAccountToken(secret, serviceAccount) { if serviceaccount.IsServiceAccountToken(secret, serviceAccount) {
items = append(items, secret) items = append(items, secret)
@@ -669,14 +670,14 @@ func (e *TokensController) listTokenSecrets(serviceAccount *api.ServiceAccount)
// serviceAccountNameAndUID is a helper method to get the ServiceAccount Name and UID from the given secret // serviceAccountNameAndUID is a helper method to get the ServiceAccount Name and UID from the given secret
// Returns "","" if the secret is not a ServiceAccountToken secret // Returns "","" if the secret is not a ServiceAccountToken secret
// If the name or uid annotation is missing, "" is returned instead // If the name or uid annotation is missing, "" is returned instead
func serviceAccountNameAndUID(secret *api.Secret) (string, string) { func serviceAccountNameAndUID(secret *v1.Secret) (string, string) {
if secret.Type != api.SecretTypeServiceAccountToken { if secret.Type != v1.SecretTypeServiceAccountToken {
return "", "" return "", ""
} }
return secret.Annotations[api.ServiceAccountNameKey], secret.Annotations[api.ServiceAccountUIDKey] return secret.Annotations[v1.ServiceAccountNameKey], secret.Annotations[v1.ServiceAccountUIDKey]
} }
func getSecretReferences(serviceAccount *api.ServiceAccount) sets.String { func getSecretReferences(serviceAccount *v1.ServiceAccount) sets.String {
references := sets.NewString() references := sets.NewString()
for _, secret := range serviceAccount.Secrets { for _, secret := range serviceAccount.Secrets {
references.Insert(secret.Name) references.Insert(secret.Name)
@@ -693,7 +694,7 @@ type serviceAccountQueueKey struct {
uid types.UID uid types.UID
} }
func makeServiceAccountKey(sa *api.ServiceAccount) interface{} { func makeServiceAccountKey(sa *v1.ServiceAccount) interface{} {
return serviceAccountQueueKey{ return serviceAccountQueueKey{
namespace: sa.Namespace, namespace: sa.Namespace,
name: sa.Name, name: sa.Name,
@@ -721,13 +722,13 @@ type secretQueueKey struct {
saUID types.UID saUID types.UID
} }
func makeSecretQueueKey(secret *api.Secret) interface{} { func makeSecretQueueKey(secret *v1.Secret) interface{} {
return secretQueueKey{ return secretQueueKey{
namespace: secret.Namespace, namespace: secret.Namespace,
name: secret.Name, name: secret.Name,
uid: secret.UID, uid: secret.UID,
saName: secret.Annotations[api.ServiceAccountNameKey], saName: secret.Annotations[v1.ServiceAccountNameKey],
saUID: types.UID(secret.Annotations[api.ServiceAccountUIDKey]), saUID: types.UID(secret.Annotations[v1.ServiceAccountUIDKey]),
} }
} }

View File

@@ -27,59 +27,60 @@ import (
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
apierrors "k8s.io/kubernetes/pkg/api/errors" apierrors "k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/api/unversioned" "k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
utilrand "k8s.io/kubernetes/pkg/util/rand" utilrand "k8s.io/kubernetes/pkg/util/rand"
) )
type testGenerator struct { type testGenerator struct {
GeneratedServiceAccounts []api.ServiceAccount GeneratedServiceAccounts []v1.ServiceAccount
GeneratedSecrets []api.Secret GeneratedSecrets []v1.Secret
Token string Token string
Err error Err error
} }
func (t *testGenerator) GenerateToken(serviceAccount api.ServiceAccount, secret api.Secret) (string, error) { func (t *testGenerator) GenerateToken(serviceAccount v1.ServiceAccount, secret v1.Secret) (string, error) {
t.GeneratedSecrets = append(t.GeneratedSecrets, secret) t.GeneratedSecrets = append(t.GeneratedSecrets, secret)
t.GeneratedServiceAccounts = append(t.GeneratedServiceAccounts, serviceAccount) t.GeneratedServiceAccounts = append(t.GeneratedServiceAccounts, serviceAccount)
return t.Token, t.Err return t.Token, t.Err
} }
// emptySecretReferences is used by a service account without any secrets // emptySecretReferences is used by a service account without any secrets
func emptySecretReferences() []api.ObjectReference { func emptySecretReferences() []v1.ObjectReference {
return []api.ObjectReference{} return []v1.ObjectReference{}
} }
// missingSecretReferences is used by a service account that references secrets which do no exist // missingSecretReferences is used by a service account that references secrets which do no exist
func missingSecretReferences() []api.ObjectReference { func missingSecretReferences() []v1.ObjectReference {
return []api.ObjectReference{{Name: "missing-secret-1"}} return []v1.ObjectReference{{Name: "missing-secret-1"}}
} }
// regularSecretReferences is used by a service account that references secrets which are not ServiceAccountTokens // regularSecretReferences is used by a service account that references secrets which are not ServiceAccountTokens
func regularSecretReferences() []api.ObjectReference { func regularSecretReferences() []v1.ObjectReference {
return []api.ObjectReference{{Name: "regular-secret-1"}} return []v1.ObjectReference{{Name: "regular-secret-1"}}
} }
// tokenSecretReferences is used by a service account that references a ServiceAccountToken secret // tokenSecretReferences is used by a service account that references a ServiceAccountToken secret
func tokenSecretReferences() []api.ObjectReference { func tokenSecretReferences() []v1.ObjectReference {
return []api.ObjectReference{{Name: "token-secret-1"}} return []v1.ObjectReference{{Name: "token-secret-1"}}
} }
// addTokenSecretReference adds a reference to the ServiceAccountToken that will be created // addTokenSecretReference adds a reference to the ServiceAccountToken that will be created
func addTokenSecretReference(refs []api.ObjectReference) []api.ObjectReference { func addTokenSecretReference(refs []v1.ObjectReference) []v1.ObjectReference {
return addNamedTokenSecretReference(refs, "default-token-fplln") return addNamedTokenSecretReference(refs, "default-token-fplln")
} }
// addNamedTokenSecretReference adds a reference to the named ServiceAccountToken // addNamedTokenSecretReference adds a reference to the named ServiceAccountToken
func addNamedTokenSecretReference(refs []api.ObjectReference, name string) []api.ObjectReference { func addNamedTokenSecretReference(refs []v1.ObjectReference, name string) []v1.ObjectReference {
return append(refs, api.ObjectReference{Name: name}) return append(refs, v1.ObjectReference{Name: name})
} }
// serviceAccount returns a service account with the given secret refs // serviceAccount returns a service account with the given secret refs
func serviceAccount(secretRefs []api.ObjectReference) *api.ServiceAccount { func serviceAccount(secretRefs []v1.ObjectReference) *v1.ServiceAccount {
return &api.ServiceAccount{ return &v1.ServiceAccount{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "default", Name: "default",
UID: "12345", UID: "12345",
Namespace: "default", Namespace: "default",
@@ -90,16 +91,16 @@ func serviceAccount(secretRefs []api.ObjectReference) *api.ServiceAccount {
} }
// updatedServiceAccount returns a service account with the resource version modified // updatedServiceAccount returns a service account with the resource version modified
func updatedServiceAccount(secretRefs []api.ObjectReference) *api.ServiceAccount { func updatedServiceAccount(secretRefs []v1.ObjectReference) *v1.ServiceAccount {
sa := serviceAccount(secretRefs) sa := serviceAccount(secretRefs)
sa.ResourceVersion = "2" sa.ResourceVersion = "2"
return sa return sa
} }
// opaqueSecret returns a persisted non-ServiceAccountToken secret named "regular-secret-1" // opaqueSecret returns a persisted non-ServiceAccountToken secret named "regular-secret-1"
func opaqueSecret() *api.Secret { func opaqueSecret() *v1.Secret {
return &api.Secret{ return &v1.Secret{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "regular-secret-1", Name: "regular-secret-1",
Namespace: "default", Namespace: "default",
UID: "23456", UID: "23456",
@@ -114,22 +115,22 @@ func opaqueSecret() *api.Secret {
// createdTokenSecret returns the ServiceAccountToken secret posted when creating a new token secret. // createdTokenSecret returns the ServiceAccountToken secret posted when creating a new token secret.
// Named "default-token-fplln", since that is the first generated name after rand.Seed(1) // Named "default-token-fplln", since that is the first generated name after rand.Seed(1)
func createdTokenSecret(overrideName ...string) *api.Secret { func createdTokenSecret(overrideName ...string) *v1.Secret {
return namedCreatedTokenSecret("default-token-fplln") return namedCreatedTokenSecret("default-token-fplln")
} }
// namedTokenSecret returns the ServiceAccountToken secret posted when creating a new token secret with the given name. // namedTokenSecret returns the ServiceAccountToken secret posted when creating a new token secret with the given name.
func namedCreatedTokenSecret(name string) *api.Secret { func namedCreatedTokenSecret(name string) *v1.Secret {
return &api.Secret{ return &v1.Secret{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: name, Name: name,
Namespace: "default", Namespace: "default",
Annotations: map[string]string{ Annotations: map[string]string{
api.ServiceAccountNameKey: "default", v1.ServiceAccountNameKey: "default",
api.ServiceAccountUIDKey: "12345", v1.ServiceAccountUIDKey: "12345",
}, },
}, },
Type: api.SecretTypeServiceAccountToken, Type: v1.SecretTypeServiceAccountToken,
Data: map[string][]byte{ Data: map[string][]byte{
"token": []byte("ABC"), "token": []byte("ABC"),
"ca.crt": []byte("CA Data"), "ca.crt": []byte("CA Data"),
@@ -139,19 +140,19 @@ func namedCreatedTokenSecret(name string) *api.Secret {
} }
// serviceAccountTokenSecret returns an existing ServiceAccountToken secret named "token-secret-1" // serviceAccountTokenSecret returns an existing ServiceAccountToken secret named "token-secret-1"
func serviceAccountTokenSecret() *api.Secret { func serviceAccountTokenSecret() *v1.Secret {
return &api.Secret{ return &v1.Secret{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: "token-secret-1", Name: "token-secret-1",
Namespace: "default", Namespace: "default",
UID: "23456", UID: "23456",
ResourceVersion: "1", ResourceVersion: "1",
Annotations: map[string]string{ Annotations: map[string]string{
api.ServiceAccountNameKey: "default", v1.ServiceAccountNameKey: "default",
api.ServiceAccountUIDKey: "12345", v1.ServiceAccountUIDKey: "12345",
}, },
}, },
Type: api.SecretTypeServiceAccountToken, Type: v1.SecretTypeServiceAccountToken,
Data: map[string][]byte{ Data: map[string][]byte{
"token": []byte("ABC"), "token": []byte("ABC"),
"ca.crt": []byte("CA Data"), "ca.crt": []byte("CA Data"),
@@ -161,37 +162,37 @@ func serviceAccountTokenSecret() *api.Secret {
} }
// serviceAccountTokenSecretWithoutTokenData returns an existing ServiceAccountToken secret that lacks token data // serviceAccountTokenSecretWithoutTokenData returns an existing ServiceAccountToken secret that lacks token data
func serviceAccountTokenSecretWithoutTokenData() *api.Secret { func serviceAccountTokenSecretWithoutTokenData() *v1.Secret {
secret := serviceAccountTokenSecret() secret := serviceAccountTokenSecret()
delete(secret.Data, api.ServiceAccountTokenKey) delete(secret.Data, v1.ServiceAccountTokenKey)
return secret return secret
} }
// serviceAccountTokenSecretWithoutCAData returns an existing ServiceAccountToken secret that lacks ca data // serviceAccountTokenSecretWithoutCAData returns an existing ServiceAccountToken secret that lacks ca data
func serviceAccountTokenSecretWithoutCAData() *api.Secret { func serviceAccountTokenSecretWithoutCAData() *v1.Secret {
secret := serviceAccountTokenSecret() secret := serviceAccountTokenSecret()
delete(secret.Data, api.ServiceAccountRootCAKey) delete(secret.Data, v1.ServiceAccountRootCAKey)
return secret return secret
} }
// serviceAccountTokenSecretWithCAData returns an existing ServiceAccountToken secret with the specified ca data // serviceAccountTokenSecretWithCAData returns an existing ServiceAccountToken secret with the specified ca data
func serviceAccountTokenSecretWithCAData(data []byte) *api.Secret { func serviceAccountTokenSecretWithCAData(data []byte) *v1.Secret {
secret := serviceAccountTokenSecret() secret := serviceAccountTokenSecret()
secret.Data[api.ServiceAccountRootCAKey] = data secret.Data[v1.ServiceAccountRootCAKey] = data
return secret return secret
} }
// serviceAccountTokenSecretWithoutNamespaceData returns an existing ServiceAccountToken secret that lacks namespace data // serviceAccountTokenSecretWithoutNamespaceData returns an existing ServiceAccountToken secret that lacks namespace data
func serviceAccountTokenSecretWithoutNamespaceData() *api.Secret { func serviceAccountTokenSecretWithoutNamespaceData() *v1.Secret {
secret := serviceAccountTokenSecret() secret := serviceAccountTokenSecret()
delete(secret.Data, api.ServiceAccountNamespaceKey) delete(secret.Data, v1.ServiceAccountNamespaceKey)
return secret return secret
} }
// serviceAccountTokenSecretWithNamespaceData returns an existing ServiceAccountToken secret with the specified namespace data // serviceAccountTokenSecretWithNamespaceData returns an existing ServiceAccountToken secret with the specified namespace data
func serviceAccountTokenSecretWithNamespaceData(data []byte) *api.Secret { func serviceAccountTokenSecretWithNamespaceData(data []byte) *v1.Secret {
secret := serviceAccountTokenSecret() secret := serviceAccountTokenSecret()
secret.Data[api.ServiceAccountNamespaceKey] = data secret.Data[v1.ServiceAccountNamespaceKey] = data
return secret return secret
} }
@@ -210,15 +211,15 @@ func TestTokenCreation(t *testing.T) {
Reactors []reaction Reactors []reaction
ExistingServiceAccount *api.ServiceAccount ExistingServiceAccount *v1.ServiceAccount
ExistingSecrets []*api.Secret ExistingSecrets []*v1.Secret
AddedServiceAccount *api.ServiceAccount AddedServiceAccount *v1.ServiceAccount
UpdatedServiceAccount *api.ServiceAccount UpdatedServiceAccount *v1.ServiceAccount
DeletedServiceAccount *api.ServiceAccount DeletedServiceAccount *v1.ServiceAccount
AddedSecret *api.Secret AddedSecret *v1.Secret
UpdatedSecret *api.Secret UpdatedSecret *v1.Secret
DeletedSecret *api.Secret DeletedSecret *v1.Secret
ExpectedActions []core.Action ExpectedActions []core.Action
}{ }{
@@ -227,9 +228,9 @@ func TestTokenCreation(t *testing.T) {
AddedServiceAccount: serviceAccount(emptySecretReferences()), AddedServiceAccount: serviceAccount(emptySecretReferences()),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()), core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()),
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))), core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))),
}, },
}, },
"new serviceaccount with no secrets encountering create error": { "new serviceaccount with no secrets encountering create error": {
@@ -253,17 +254,17 @@ func TestTokenCreation(t *testing.T) {
AddedServiceAccount: serviceAccount(emptySecretReferences()), AddedServiceAccount: serviceAccount(emptySecretReferences()),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
// Attempt 1 // Attempt 1
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()), core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()),
// Attempt 2 // Attempt 2
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, namedCreatedTokenSecret("default-token-gziey")), core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, namedCreatedTokenSecret("default-token-gziey")),
// Attempt 3 // Attempt 3
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, namedCreatedTokenSecret("default-token-oh43e")), core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, namedCreatedTokenSecret("default-token-oh43e")),
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addNamedTokenSecretReference(emptySecretReferences(), "default-token-oh43e"))), core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addNamedTokenSecretReference(emptySecretReferences(), "default-token-oh43e"))),
}, },
}, },
"new serviceaccount with no secrets encountering unending create error": { "new serviceaccount with no secrets encountering unending create error": {
@@ -283,14 +284,14 @@ func TestTokenCreation(t *testing.T) {
AddedServiceAccount: serviceAccount(emptySecretReferences()), AddedServiceAccount: serviceAccount(emptySecretReferences()),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
// Attempt // Attempt
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()), core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()),
// Retry 1 // Retry 1
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, namedCreatedTokenSecret("default-token-gziey")), core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, namedCreatedTokenSecret("default-token-gziey")),
// Retry 2 // Retry 2
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, namedCreatedTokenSecret("default-token-oh43e")), core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, namedCreatedTokenSecret("default-token-oh43e")),
}, },
}, },
"new serviceaccount with missing secrets": { "new serviceaccount with missing secrets": {
@@ -298,9 +299,9 @@ func TestTokenCreation(t *testing.T) {
AddedServiceAccount: serviceAccount(missingSecretReferences()), AddedServiceAccount: serviceAccount(missingSecretReferences()),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()), core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()),
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(missingSecretReferences()))), core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addTokenSecretReference(missingSecretReferences()))),
}, },
}, },
"new serviceaccount with non-token secrets": { "new serviceaccount with non-token secrets": {
@@ -308,14 +309,14 @@ func TestTokenCreation(t *testing.T) {
AddedServiceAccount: serviceAccount(regularSecretReferences()), AddedServiceAccount: serviceAccount(regularSecretReferences()),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()), core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()),
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(regularSecretReferences()))), core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addTokenSecretReference(regularSecretReferences()))),
}, },
}, },
"new serviceaccount with token secrets": { "new serviceaccount with token secrets": {
ClientObjects: []runtime.Object{serviceAccount(tokenSecretReferences()), serviceAccountTokenSecret()}, ClientObjects: []runtime.Object{serviceAccount(tokenSecretReferences()), serviceAccountTokenSecret()},
ExistingSecrets: []*api.Secret{serviceAccountTokenSecret()}, ExistingSecrets: []*v1.Secret{serviceAccountTokenSecret()},
AddedServiceAccount: serviceAccount(tokenSecretReferences()), AddedServiceAccount: serviceAccount(tokenSecretReferences()),
ExpectedActions: []core.Action{}, ExpectedActions: []core.Action{},
@@ -325,7 +326,7 @@ func TestTokenCreation(t *testing.T) {
AddedServiceAccount: serviceAccount(emptySecretReferences()), AddedServiceAccount: serviceAccount(emptySecretReferences()),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
}, },
}, },
"updated serviceaccount with no secrets": { "updated serviceaccount with no secrets": {
@@ -333,9 +334,9 @@ func TestTokenCreation(t *testing.T) {
UpdatedServiceAccount: serviceAccount(emptySecretReferences()), UpdatedServiceAccount: serviceAccount(emptySecretReferences()),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()), core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()),
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))), core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))),
}, },
}, },
"updated serviceaccount with missing secrets": { "updated serviceaccount with missing secrets": {
@@ -343,9 +344,9 @@ func TestTokenCreation(t *testing.T) {
UpdatedServiceAccount: serviceAccount(missingSecretReferences()), UpdatedServiceAccount: serviceAccount(missingSecretReferences()),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()), core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()),
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(missingSecretReferences()))), core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addTokenSecretReference(missingSecretReferences()))),
}, },
}, },
"updated serviceaccount with non-token secrets": { "updated serviceaccount with non-token secrets": {
@@ -353,13 +354,13 @@ func TestTokenCreation(t *testing.T) {
UpdatedServiceAccount: serviceAccount(regularSecretReferences()), UpdatedServiceAccount: serviceAccount(regularSecretReferences()),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()), core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()),
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(regularSecretReferences()))), core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addTokenSecretReference(regularSecretReferences()))),
}, },
}, },
"updated serviceaccount with token secrets": { "updated serviceaccount with token secrets": {
ExistingSecrets: []*api.Secret{serviceAccountTokenSecret()}, ExistingSecrets: []*v1.Secret{serviceAccountTokenSecret()},
UpdatedServiceAccount: serviceAccount(tokenSecretReferences()), UpdatedServiceAccount: serviceAccount(tokenSecretReferences()),
ExpectedActions: []core.Action{}, ExpectedActions: []core.Action{},
@@ -369,7 +370,7 @@ func TestTokenCreation(t *testing.T) {
UpdatedServiceAccount: serviceAccount(emptySecretReferences()), UpdatedServiceAccount: serviceAccount(emptySecretReferences()),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
}, },
}, },
@@ -389,11 +390,11 @@ func TestTokenCreation(t *testing.T) {
}, },
"deleted serviceaccount with token secrets": { "deleted serviceaccount with token secrets": {
ClientObjects: []runtime.Object{serviceAccountTokenSecret()}, ClientObjects: []runtime.Object{serviceAccountTokenSecret()},
ExistingSecrets: []*api.Secret{serviceAccountTokenSecret()}, ExistingSecrets: []*v1.Secret{serviceAccountTokenSecret()},
DeletedServiceAccount: serviceAccount(tokenSecretReferences()), DeletedServiceAccount: serviceAccount(tokenSecretReferences()),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewDeleteAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"), core.NewDeleteAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
}, },
}, },
@@ -402,8 +403,8 @@ func TestTokenCreation(t *testing.T) {
AddedSecret: serviceAccountTokenSecret(), AddedSecret: serviceAccountTokenSecret(),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
core.NewDeleteAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"), core.NewDeleteAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
}, },
}, },
"added secret with serviceaccount": { "added secret with serviceaccount": {
@@ -418,8 +419,8 @@ func TestTokenCreation(t *testing.T) {
AddedSecret: serviceAccountTokenSecretWithoutTokenData(), AddedSecret: serviceAccountTokenSecretWithoutTokenData(),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()), core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()),
}, },
}, },
"added token secret without ca data": { "added token secret without ca data": {
@@ -428,8 +429,8 @@ func TestTokenCreation(t *testing.T) {
AddedSecret: serviceAccountTokenSecretWithoutCAData(), AddedSecret: serviceAccountTokenSecretWithoutCAData(),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()), core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()),
}, },
}, },
"added token secret with mismatched ca data": { "added token secret with mismatched ca data": {
@@ -438,8 +439,8 @@ func TestTokenCreation(t *testing.T) {
AddedSecret: serviceAccountTokenSecretWithCAData([]byte("mismatched")), AddedSecret: serviceAccountTokenSecretWithCAData([]byte("mismatched")),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()), core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()),
}, },
}, },
"added token secret without namespace data": { "added token secret without namespace data": {
@@ -448,8 +449,8 @@ func TestTokenCreation(t *testing.T) {
AddedSecret: serviceAccountTokenSecretWithoutNamespaceData(), AddedSecret: serviceAccountTokenSecretWithoutNamespaceData(),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()), core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()),
}, },
}, },
"added token secret with custom namespace data": { "added token secret with custom namespace data": {
@@ -467,8 +468,8 @@ func TestTokenCreation(t *testing.T) {
UpdatedSecret: serviceAccountTokenSecret(), UpdatedSecret: serviceAccountTokenSecret(),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
core.NewDeleteAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"), core.NewDeleteAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
}, },
}, },
"updated secret with serviceaccount": { "updated secret with serviceaccount": {
@@ -483,8 +484,8 @@ func TestTokenCreation(t *testing.T) {
UpdatedSecret: serviceAccountTokenSecretWithoutTokenData(), UpdatedSecret: serviceAccountTokenSecretWithoutTokenData(),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()), core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()),
}, },
}, },
"updated token secret without ca data": { "updated token secret without ca data": {
@@ -493,8 +494,8 @@ func TestTokenCreation(t *testing.T) {
UpdatedSecret: serviceAccountTokenSecretWithoutCAData(), UpdatedSecret: serviceAccountTokenSecretWithoutCAData(),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()), core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()),
}, },
}, },
"updated token secret with mismatched ca data": { "updated token secret with mismatched ca data": {
@@ -503,8 +504,8 @@ func TestTokenCreation(t *testing.T) {
UpdatedSecret: serviceAccountTokenSecretWithCAData([]byte("mismatched")), UpdatedSecret: serviceAccountTokenSecretWithCAData([]byte("mismatched")),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()), core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()),
}, },
}, },
"updated token secret without namespace data": { "updated token secret without namespace data": {
@@ -513,8 +514,8 @@ func TestTokenCreation(t *testing.T) {
UpdatedSecret: serviceAccountTokenSecretWithoutNamespaceData(), UpdatedSecret: serviceAccountTokenSecretWithoutNamespaceData(),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()), core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()),
}, },
}, },
"updated token secret with custom namespace data": { "updated token secret with custom namespace data": {
@@ -537,8 +538,8 @@ func TestTokenCreation(t *testing.T) {
DeletedSecret: serviceAccountTokenSecret(), DeletedSecret: serviceAccountTokenSecret(),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(emptySecretReferences())), core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(emptySecretReferences())),
}, },
}, },
"deleted secret with serviceaccount without reference": { "deleted secret with serviceaccount without reference": {
@@ -546,7 +547,7 @@ func TestTokenCreation(t *testing.T) {
DeletedSecret: serviceAccountTokenSecret(), DeletedSecret: serviceAccountTokenSecret(),
ExpectedActions: []core.Action{ ExpectedActions: []core.Action{
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"), core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
}, },
}, },
} }

View File

@@ -25,8 +25,9 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
kcache "k8s.io/kubernetes/pkg/client/cache" kcache "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/cloudprovider" "k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
@@ -69,7 +70,7 @@ type AttachDetachController interface {
// NewAttachDetachController returns a new instance of AttachDetachController. // NewAttachDetachController returns a new instance of AttachDetachController.
func NewAttachDetachController( func NewAttachDetachController(
kubeClient internalclientset.Interface, kubeClient clientset.Interface,
podInformer kcache.SharedInformer, podInformer kcache.SharedInformer,
nodeInformer kcache.SharedInformer, nodeInformer kcache.SharedInformer,
pvcInformer kcache.SharedInformer, pvcInformer kcache.SharedInformer,
@@ -144,7 +145,7 @@ func NewAttachDetachController(
type attachDetachController struct { type attachDetachController struct {
// kubeClient is the kube API client used by volumehost to communicate with // kubeClient is the kube API client used by volumehost to communicate with
// the API server. // the API server.
kubeClient internalclientset.Interface kubeClient clientset.Interface
// pvcInformer is the shared PVC informer used to fetch and store PVC // pvcInformer is the shared PVC informer used to fetch and store PVC
// objects from the API server. It is shared with other controllers and // objects from the API server. It is shared with other controllers and
@@ -210,7 +211,7 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) {
} }
func (adc *attachDetachController) podAdd(obj interface{}) { func (adc *attachDetachController) podAdd(obj interface{}) {
pod, ok := obj.(*api.Pod) pod, ok := obj.(*v1.Pod)
if pod == nil || !ok { if pod == nil || !ok {
return return
} }
@@ -229,7 +230,7 @@ func (adc *attachDetachController) podUpdate(oldObj, newObj interface{}) {
} }
func (adc *attachDetachController) podDelete(obj interface{}) { func (adc *attachDetachController) podDelete(obj interface{}) {
pod, ok := obj.(*api.Pod) pod, ok := obj.(*v1.Pod)
if pod == nil || !ok { if pod == nil || !ok {
return return
} }
@@ -238,7 +239,7 @@ func (adc *attachDetachController) podDelete(obj interface{}) {
} }
func (adc *attachDetachController) nodeAdd(obj interface{}) { func (adc *attachDetachController) nodeAdd(obj interface{}) {
node, ok := obj.(*api.Node) node, ok := obj.(*v1.Node)
if node == nil || !ok { if node == nil || !ok {
return return
} }
@@ -259,7 +260,7 @@ func (adc *attachDetachController) nodeUpdate(oldObj, newObj interface{}) {
} }
func (adc *attachDetachController) nodeDelete(obj interface{}) { func (adc *attachDetachController) nodeDelete(obj interface{}) {
node, ok := obj.(*api.Node) node, ok := obj.(*v1.Node)
if node == nil || !ok { if node == nil || !ok {
return return
} }
@@ -275,7 +276,7 @@ func (adc *attachDetachController) nodeDelete(obj interface{}) {
// processPodVolumes processes the volumes in the given pod and adds them to the // processPodVolumes processes the volumes in the given pod and adds them to the
// desired state of the world if addVolumes is true, otherwise it removes them. // desired state of the world if addVolumes is true, otherwise it removes them.
func (adc *attachDetachController) processPodVolumes( func (adc *attachDetachController) processPodVolumes(
pod *api.Pod, addVolumes bool) { pod *v1.Pod, addVolumes bool) {
if pod == nil { if pod == nil {
return return
} }
@@ -363,7 +364,7 @@ func (adc *attachDetachController) processPodVolumes(
// createVolumeSpec creates and returns a mutatable volume.Spec object for the // createVolumeSpec creates and returns a mutatable volume.Spec object for the
// specified volume. It dereference any PVC to get PV objects, if needed. // specified volume. It dereference any PVC to get PV objects, if needed.
func (adc *attachDetachController) createVolumeSpec( func (adc *attachDetachController) createVolumeSpec(
podVolume api.Volume, podNamespace string) (*volume.Spec, error) { podVolume v1.Volume, podNamespace string) (*volume.Spec, error) {
if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil { if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
glog.V(10).Infof( glog.V(10).Infof(
"Found PVC, ClaimName: %q/%q", "Found PVC, ClaimName: %q/%q",
@@ -418,9 +419,9 @@ func (adc *attachDetachController) createVolumeSpec(
"failed to deep copy %q volume object. err=%v", podVolume.Name, err) "failed to deep copy %q volume object. err=%v", podVolume.Name, err)
} }
clonedPodVolume, ok := clonedPodVolumeObj.(api.Volume) clonedPodVolume, ok := clonedPodVolumeObj.(v1.Volume)
if !ok { if !ok {
return nil, fmt.Errorf("failed to cast clonedPodVolume %#v to api.Volume", clonedPodVolumeObj) return nil, fmt.Errorf("failed to cast clonedPodVolume %#v to v1.Volume", clonedPodVolumeObj)
} }
return volume.NewSpecFromVolume(&clonedPodVolume), nil return volume.NewSpecFromVolume(&clonedPodVolume), nil
@@ -447,7 +448,7 @@ func (adc *attachDetachController) getPVCFromCacheExtractPV(
err) err)
} }
pvc, ok := pvcObj.(*api.PersistentVolumeClaim) pvc, ok := pvcObj.(*v1.PersistentVolumeClaim)
if !ok || pvc == nil { if !ok || pvc == nil {
return "", "", fmt.Errorf( return "", "", fmt.Errorf(
"failed to cast %q object %#v to PersistentVolumeClaim", "failed to cast %q object %#v to PersistentVolumeClaim",
@@ -455,7 +456,7 @@ func (adc *attachDetachController) getPVCFromCacheExtractPV(
pvcObj) pvcObj)
} }
if pvc.Status.Phase != api.ClaimBound || pvc.Spec.VolumeName == "" { if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" {
return "", "", fmt.Errorf( return "", "", fmt.Errorf(
"PVC %q has non-bound phase (%q) or empty pvc.Spec.VolumeName (%q)", "PVC %q has non-bound phase (%q) or empty pvc.Spec.VolumeName (%q)",
key, key,
@@ -482,7 +483,7 @@ func (adc *attachDetachController) getPVSpecFromCache(
"failed to find PV %q in PVInformer cache. %v", name, err) "failed to find PV %q in PVInformer cache. %v", name, err)
} }
pv, ok := pvObj.(*api.PersistentVolume) pv, ok := pvObj.(*v1.PersistentVolume)
if !ok || pv == nil { if !ok || pv == nil {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"failed to cast %q object %#v to PersistentVolume", name, pvObj) "failed to cast %q object %#v to PersistentVolume", name, pvObj)
@@ -510,7 +511,7 @@ func (adc *attachDetachController) getPVSpecFromCache(
"failed to deep copy %q PV object. err=%v", name, err) "failed to deep copy %q PV object. err=%v", name, err)
} }
clonedPV, ok := clonedPVObj.(api.PersistentVolume) clonedPV, ok := clonedPVObj.(v1.PersistentVolume)
if !ok { if !ok {
return nil, fmt.Errorf( return nil, fmt.Errorf(
"failed to cast %q clonedPV %#v to PersistentVolume", name, pvObj) "failed to cast %q clonedPV %#v to PersistentVolume", name, pvObj)
@@ -524,7 +525,7 @@ func (adc *attachDetachController) getPVSpecFromCache(
// corresponding volume in the actual state of the world to indicate that it is // corresponding volume in the actual state of the world to indicate that it is
// mounted. // mounted.
func (adc *attachDetachController) processVolumesInUse( func (adc *attachDetachController) processVolumesInUse(
nodeName types.NodeName, volumesInUse []api.UniqueVolumeName) { nodeName types.NodeName, volumesInUse []v1.UniqueVolumeName) {
glog.V(4).Infof("processVolumesInUse for node %q", nodeName) glog.V(4).Infof("processVolumesInUse for node %q", nodeName)
for _, attachedVolume := range adc.actualStateOfWorld.GetAttachedVolumesForNode(nodeName) { for _, attachedVolume := range adc.actualStateOfWorld.GetAttachedVolumesForNode(nodeName) {
mounted := false mounted := false
@@ -562,11 +563,11 @@ func (adc *attachDetachController) GetPodPluginDir(podUID types.UID, pluginName
return "" return ""
} }
func (adc *attachDetachController) GetKubeClient() internalclientset.Interface { func (adc *attachDetachController) GetKubeClient() clientset.Interface {
return adc.kubeClient return adc.kubeClient
} }
func (adc *attachDetachController) NewWrapperMounter(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) { func (adc *attachDetachController) NewWrapperMounter(volName string, spec volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
return nil, fmt.Errorf("NewWrapperMounter not supported by Attach/Detach controller's VolumeHost implementation") return nil, fmt.Errorf("NewWrapperMounter not supported by Attach/Detach controller's VolumeHost implementation")
} }
@@ -594,6 +595,6 @@ func (adc *attachDetachController) GetHostIP() (net.IP, error) {
return nil, fmt.Errorf("GetHostIP() not supported by Attach/Detach controller's VolumeHost implementation") return nil, fmt.Errorf("GetHostIP() not supported by Attach/Detach controller's VolumeHost implementation")
} }
func (adc *attachDetachController) GetNodeAllocatable() (api.ResourceList, error) { func (adc *attachDetachController) GetNodeAllocatable() (v1.ResourceList, error) {
return api.ResourceList{}, nil return v1.ResourceList{}, nil
} }

View File

@@ -28,7 +28,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor" "k8s.io/kubernetes/pkg/volume/util/operationexecutor"
@@ -56,7 +56,7 @@ type ActualStateOfWorld interface {
// added. // added.
// If no node with the name nodeName exists in list of attached nodes for // If no node with the name nodeName exists in list of attached nodes for
// the specified volume, the node is added. // the specified volume, the node is added.
AddVolumeNode(volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) (api.UniqueVolumeName, error) AddVolumeNode(volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) (v1.UniqueVolumeName, error)
// SetVolumeMountedByNode sets the MountedByNode value for the given volume // SetVolumeMountedByNode sets the MountedByNode value for the given volume
// and node. When set to true this value indicates the volume is mounted by // and node. When set to true this value indicates the volume is mounted by
@@ -65,7 +65,7 @@ type ActualStateOfWorld interface {
// returned. // returned.
// If no node with the name nodeName exists in list of attached nodes for // If no node with the name nodeName exists in list of attached nodes for
// the specified volume, an error is returned. // the specified volume, an error is returned.
SetVolumeMountedByNode(volumeName api.UniqueVolumeName, nodeName types.NodeName, mounted bool) error SetVolumeMountedByNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error
// SetNodeStatusUpdateNeeded sets statusUpdateNeeded for the specified // SetNodeStatusUpdateNeeded sets statusUpdateNeeded for the specified
// node to true indicating the AttachedVolume field in the Node's Status // node to true indicating the AttachedVolume field in the Node's Status
@@ -76,12 +76,12 @@ type ActualStateOfWorld interface {
// ResetDetachRequestTime resets the detachRequestTime to 0 which indicates there is no detach // ResetDetachRequestTime resets the detachRequestTime to 0 which indicates there is no detach
// request any more for the volume // request any more for the volume
ResetDetachRequestTime(volumeName api.UniqueVolumeName, nodeName types.NodeName) ResetDetachRequestTime(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
// SetDetachRequestTime sets the detachRequestedTime to current time if this is no // SetDetachRequestTime sets the detachRequestedTime to current time if this is no
// previous request (the previous detachRequestedTime is zero) and return the time elapsed // previous request (the previous detachRequestedTime is zero) and return the time elapsed
// since last request // since last request
SetDetachRequestTime(volumeName api.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error) SetDetachRequestTime(volumeName v1.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error)
// DeleteVolumeNode removes the given volume and node from the underlying // DeleteVolumeNode removes the given volume and node from the underlying
// store indicating the specified volume is no longer attached to the // store indicating the specified volume is no longer attached to the
@@ -89,12 +89,12 @@ type ActualStateOfWorld interface {
// If the volume/node combo does not exist, this is a no-op. // If the volume/node combo does not exist, this is a no-op.
// If after deleting the node, the specified volume contains no other child // If after deleting the node, the specified volume contains no other child
// nodes, the volume is also deleted. // nodes, the volume is also deleted.
DeleteVolumeNode(volumeName api.UniqueVolumeName, nodeName types.NodeName) DeleteVolumeNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
// VolumeNodeExists returns true if the specified volume/node combo exists // VolumeNodeExists returns true if the specified volume/node combo exists
// in the underlying store indicating the specified volume is attached to // in the underlying store indicating the specified volume is attached to
// the specified node. // the specified node.
VolumeNodeExists(volumeName api.UniqueVolumeName, nodeName types.NodeName) bool VolumeNodeExists(volumeName v1.UniqueVolumeName, nodeName types.NodeName) bool
// GetAttachedVolumes generates and returns a list of volumes/node pairs // GetAttachedVolumes generates and returns a list of volumes/node pairs
// reflecting which volumes are attached to which nodes based on the // reflecting which volumes are attached to which nodes based on the
@@ -115,7 +115,7 @@ type ActualStateOfWorld interface {
// this may differ from the actual list of attached volumes for the node // this may differ from the actual list of attached volumes for the node
// since volumes should be removed from this list as soon a detach operation // since volumes should be removed from this list as soon a detach operation
// is considered, before the detach operation is triggered). // is considered, before the detach operation is triggered).
GetVolumesToReportAttached() map[types.NodeName][]api.AttachedVolume GetVolumesToReportAttached() map[types.NodeName][]v1.AttachedVolume
} }
// AttachedVolume represents a volume that is attached to a node. // AttachedVolume represents a volume that is attached to a node.
@@ -138,7 +138,7 @@ type AttachedVolume struct {
// NewActualStateOfWorld returns a new instance of ActualStateOfWorld. // NewActualStateOfWorld returns a new instance of ActualStateOfWorld.
func NewActualStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld { func NewActualStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld {
return &actualStateOfWorld{ return &actualStateOfWorld{
attachedVolumes: make(map[api.UniqueVolumeName]attachedVolume), attachedVolumes: make(map[v1.UniqueVolumeName]attachedVolume),
nodesToUpdateStatusFor: make(map[types.NodeName]nodeToUpdateStatusFor), nodesToUpdateStatusFor: make(map[types.NodeName]nodeToUpdateStatusFor),
volumePluginMgr: volumePluginMgr, volumePluginMgr: volumePluginMgr,
} }
@@ -149,7 +149,7 @@ type actualStateOfWorld struct {
// controller believes to be successfully attached to the nodes it is // controller believes to be successfully attached to the nodes it is
// managing. The key in this map is the name of the volume and the value is // managing. The key in this map is the name of the volume and the value is
// an object containing more information about the attached volume. // an object containing more information about the attached volume.
attachedVolumes map[api.UniqueVolumeName]attachedVolume attachedVolumes map[v1.UniqueVolumeName]attachedVolume
// nodesToUpdateStatusFor is a map containing the set of nodes for which to // nodesToUpdateStatusFor is a map containing the set of nodes for which to
// update the VolumesAttached Status field. The key in this map is the name // update the VolumesAttached Status field. The key in this map is the name
@@ -168,7 +168,7 @@ type actualStateOfWorld struct {
// believes to be successfully attached to a node it is managing. // believes to be successfully attached to a node it is managing.
type attachedVolume struct { type attachedVolume struct {
// volumeName contains the unique identifier for this volume. // volumeName contains the unique identifier for this volume.
volumeName api.UniqueVolumeName volumeName v1.UniqueVolumeName
// spec is the volume spec containing the specification for this volume. // spec is the volume spec containing the specification for this volume.
// Used to generate the volume plugin object, and passed to attach/detach // Used to generate the volume plugin object, and passed to attach/detach
@@ -223,36 +223,36 @@ type nodeToUpdateStatusFor struct {
// actual list of attached volumes since volumes should be removed from this // actual list of attached volumes since volumes should be removed from this
// list as soon a detach operation is considered, before the detach // list as soon a detach operation is considered, before the detach
// operation is triggered). // operation is triggered).
volumesToReportAsAttached map[api.UniqueVolumeName]api.UniqueVolumeName volumesToReportAsAttached map[v1.UniqueVolumeName]v1.UniqueVolumeName
} }
func (asw *actualStateOfWorld) MarkVolumeAsAttached( func (asw *actualStateOfWorld) MarkVolumeAsAttached(
_ api.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error { _ v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error {
_, err := asw.AddVolumeNode(volumeSpec, nodeName, devicePath) _, err := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
return err return err
} }
func (asw *actualStateOfWorld) MarkVolumeAsDetached( func (asw *actualStateOfWorld) MarkVolumeAsDetached(
volumeName api.UniqueVolumeName, nodeName types.NodeName) { volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
asw.DeleteVolumeNode(volumeName, nodeName) asw.DeleteVolumeNode(volumeName, nodeName)
} }
func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached( func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached(
volumeName api.UniqueVolumeName, nodeName types.NodeName) error { volumeName v1.UniqueVolumeName, nodeName types.NodeName) error {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
return asw.removeVolumeFromReportAsAttached(volumeName, nodeName) return asw.removeVolumeFromReportAsAttached(volumeName, nodeName)
} }
func (asw *actualStateOfWorld) AddVolumeToReportAsAttached( func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(
volumeName api.UniqueVolumeName, nodeName types.NodeName) { volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
asw.addVolumeToReportAsAttached(volumeName, nodeName) asw.addVolumeToReportAsAttached(volumeName, nodeName)
} }
func (asw *actualStateOfWorld) AddVolumeNode( func (asw *actualStateOfWorld) AddVolumeNode(
volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) (api.UniqueVolumeName, error) { volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) (v1.UniqueVolumeName, error) {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
@@ -313,7 +313,7 @@ func (asw *actualStateOfWorld) AddVolumeNode(
} }
func (asw *actualStateOfWorld) SetVolumeMountedByNode( func (asw *actualStateOfWorld) SetVolumeMountedByNode(
volumeName api.UniqueVolumeName, nodeName types.NodeName, mounted bool) error { volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
@@ -342,7 +342,7 @@ func (asw *actualStateOfWorld) SetVolumeMountedByNode(
} }
func (asw *actualStateOfWorld) ResetDetachRequestTime( func (asw *actualStateOfWorld) ResetDetachRequestTime(
volumeName api.UniqueVolumeName, nodeName types.NodeName) { volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
@@ -356,7 +356,7 @@ func (asw *actualStateOfWorld) ResetDetachRequestTime(
} }
func (asw *actualStateOfWorld) SetDetachRequestTime( func (asw *actualStateOfWorld) SetDetachRequestTime(
volumeName api.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error) { volumeName v1.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error) {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
@@ -378,7 +378,7 @@ func (asw *actualStateOfWorld) SetDetachRequestTime(
// Get the volume and node object from actual state of world // Get the volume and node object from actual state of world
// This is an internal function and caller should acquire and release the lock // This is an internal function and caller should acquire and release the lock
func (asw *actualStateOfWorld) getNodeAndVolume( func (asw *actualStateOfWorld) getNodeAndVolume(
volumeName api.UniqueVolumeName, nodeName types.NodeName) (attachedVolume, nodeAttachedTo, error) { volumeName v1.UniqueVolumeName, nodeName types.NodeName) (attachedVolume, nodeAttachedTo, error) {
volumeObj, volumeExists := asw.attachedVolumes[volumeName] volumeObj, volumeExists := asw.attachedVolumes[volumeName]
if volumeExists { if volumeExists {
@@ -396,7 +396,7 @@ func (asw *actualStateOfWorld) getNodeAndVolume(
// Remove the volumeName from the node's volumesToReportAsAttached list // Remove the volumeName from the node's volumesToReportAsAttached list
// This is an internal function and caller should acquire and release the lock // This is an internal function and caller should acquire and release the lock
func (asw *actualStateOfWorld) removeVolumeFromReportAsAttached( func (asw *actualStateOfWorld) removeVolumeFromReportAsAttached(
volumeName api.UniqueVolumeName, nodeName types.NodeName) error { volumeName v1.UniqueVolumeName, nodeName types.NodeName) error {
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName] nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
if nodeToUpdateExists { if nodeToUpdateExists {
@@ -418,7 +418,7 @@ func (asw *actualStateOfWorld) removeVolumeFromReportAsAttached(
// Add the volumeName to the node's volumesToReportAsAttached list // Add the volumeName to the node's volumesToReportAsAttached list
// This is an internal function and caller should acquire and release the lock // This is an internal function and caller should acquire and release the lock
func (asw *actualStateOfWorld) addVolumeToReportAsAttached( func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
volumeName api.UniqueVolumeName, nodeName types.NodeName) { volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
// In case the volume/node entry is no longer in attachedVolume list, skip the rest // In case the volume/node entry is no longer in attachedVolume list, skip the rest
if _, _, err := asw.getNodeAndVolume(volumeName, nodeName); err != nil { if _, _, err := asw.getNodeAndVolume(volumeName, nodeName); err != nil {
glog.V(4).Infof("Volume %q is no longer attached to node %q", volumeName, nodeName) glog.V(4).Infof("Volume %q is no longer attached to node %q", volumeName, nodeName)
@@ -430,7 +430,7 @@ func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
nodeToUpdate = nodeToUpdateStatusFor{ nodeToUpdate = nodeToUpdateStatusFor{
nodeName: nodeName, nodeName: nodeName,
statusUpdateNeeded: true, statusUpdateNeeded: true,
volumesToReportAsAttached: make(map[api.UniqueVolumeName]api.UniqueVolumeName), volumesToReportAsAttached: make(map[v1.UniqueVolumeName]v1.UniqueVolumeName),
} }
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
glog.V(4).Infof("Add new node %q to nodesToUpdateStatusFor", nodeName) glog.V(4).Infof("Add new node %q to nodesToUpdateStatusFor", nodeName)
@@ -470,7 +470,7 @@ func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(nodeName types.NodeName
} }
func (asw *actualStateOfWorld) DeleteVolumeNode( func (asw *actualStateOfWorld) DeleteVolumeNode(
volumeName api.UniqueVolumeName, nodeName types.NodeName) { volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
asw.Lock() asw.Lock()
defer asw.Unlock() defer asw.Unlock()
@@ -493,7 +493,7 @@ func (asw *actualStateOfWorld) DeleteVolumeNode(
} }
func (asw *actualStateOfWorld) VolumeNodeExists( func (asw *actualStateOfWorld) VolumeNodeExists(
volumeName api.UniqueVolumeName, nodeName types.NodeName) bool { volumeName v1.UniqueVolumeName, nodeName types.NodeName) bool {
asw.RLock() asw.RLock()
defer asw.RUnlock() defer asw.RUnlock()
@@ -562,19 +562,19 @@ func (asw *actualStateOfWorld) GetAttachedVolumesPerNode() map[types.NodeName][]
return attachedVolumesPerNode return attachedVolumesPerNode
} }
func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[types.NodeName][]api.AttachedVolume { func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[types.NodeName][]v1.AttachedVolume {
asw.RLock() asw.RLock()
defer asw.RUnlock() defer asw.RUnlock()
volumesToReportAttached := make(map[types.NodeName][]api.AttachedVolume) volumesToReportAttached := make(map[types.NodeName][]v1.AttachedVolume)
for nodeName, nodeToUpdateObj := range asw.nodesToUpdateStatusFor { for nodeName, nodeToUpdateObj := range asw.nodesToUpdateStatusFor {
if nodeToUpdateObj.statusUpdateNeeded { if nodeToUpdateObj.statusUpdateNeeded {
attachedVolumes := make( attachedVolumes := make(
[]api.AttachedVolume, []v1.AttachedVolume,
len(nodeToUpdateObj.volumesToReportAsAttached) /* len */) len(nodeToUpdateObj.volumesToReportAsAttached) /* len */)
i := 0 i := 0
for _, volume := range nodeToUpdateObj.volumesToReportAsAttached { for _, volume := range nodeToUpdateObj.volumesToReportAsAttached {
attachedVolumes[i] = api.AttachedVolume{ attachedVolumes[i] = v1.AttachedVolume{
Name: volume, Name: volume,
DevicePath: asw.attachedVolumes[volume].devicePath, DevicePath: asw.attachedVolumes[volume].devicePath,
} }

View File

@@ -20,7 +20,7 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing" controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
volumetesting "k8s.io/kubernetes/pkg/volume/testing" volumetesting "k8s.io/kubernetes/pkg/volume/testing"
@@ -32,7 +32,7 @@ func Test_AddVolumeNode_Positive_NewVolumeNewNode(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
@@ -65,7 +65,7 @@ func Test_AddVolumeNode_Positive_ExistingVolumeNewNode(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
node1Name := types.NodeName("node1-name") node1Name := types.NodeName("node1-name")
node2Name := types.NodeName("node2-name") node2Name := types.NodeName("node2-name")
@@ -115,7 +115,7 @@ func Test_AddVolumeNode_Positive_ExistingVolumeExistingNode(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -159,7 +159,7 @@ func Test_DeleteVolumeNode_Positive_VolumeExistsNodeExists(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -189,7 +189,7 @@ func Test_DeleteVolumeNode_Positive_VolumeDoesntExistNodeDoesntExist(t *testing.
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
// Act // Act
@@ -215,7 +215,7 @@ func Test_DeleteVolumeNode_Positive_TwoNodesOneDeleted(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
node1Name := types.NodeName("node1-name") node1Name := types.NodeName("node1-name")
node2Name := types.NodeName("node2-name") node2Name := types.NodeName("node2-name")
@@ -264,7 +264,7 @@ func Test_VolumeNodeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -296,7 +296,7 @@ func Test_VolumeNodeExists_Positive_VolumeExistsNodeDoesntExist(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
node1Name := types.NodeName("node1-name") node1Name := types.NodeName("node1-name")
node2Name := types.NodeName("node2-name") node2Name := types.NodeName("node2-name")
@@ -328,7 +328,7 @@ func Test_VolumeNodeExists_Positive_VolumeAndNodeDontExist(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
// Act // Act
@@ -368,7 +368,7 @@ func Test_GetAttachedVolumes_Positive_OneVolumeOneNode(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -395,7 +395,7 @@ func Test_GetAttachedVolumes_Positive_TwoVolumeTwoNodes(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volume1Name := api.UniqueVolumeName("volume1-name") volume1Name := v1.UniqueVolumeName("volume1-name")
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name) volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
node1Name := types.NodeName("node1-name") node1Name := types.NodeName("node1-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -403,7 +403,7 @@ func Test_GetAttachedVolumes_Positive_TwoVolumeTwoNodes(t *testing.T) {
if add1Err != nil { if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
} }
volume2Name := api.UniqueVolumeName("volume2-name") volume2Name := v1.UniqueVolumeName("volume2-name")
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name) volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
node2Name := types.NodeName("node2-name") node2Name := types.NodeName("node2-name")
generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Spec, node2Name, devicePath) generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Spec, node2Name, devicePath)
@@ -430,7 +430,7 @@ func Test_GetAttachedVolumes_Positive_OneVolumeTwoNodes(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
node1Name := types.NodeName("node1-name") node1Name := types.NodeName("node1-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -469,7 +469,7 @@ func Test_SetVolumeMountedByNode_Positive_Set(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -496,7 +496,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSet(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -532,7 +532,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithoutInitialSet(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -565,7 +565,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetAddVolumeNodeNotRes
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -606,7 +606,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetVerifyDetachRequest
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -653,7 +653,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_Set(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
@@ -680,7 +680,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_Marked(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -716,7 +716,7 @@ func Test_MarkDesireToDetach_Positive_MarkedAddVolumeNodeReset(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -759,7 +759,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_UnsetWithInitialSetVolumeMou
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -802,7 +802,7 @@ func Test_RemoveVolumeFromReportAsAttached(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -835,7 +835,7 @@ func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive(
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -878,7 +878,7 @@ func Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -923,7 +923,7 @@ func Test_SetDetachRequestTime_Positive(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -970,7 +970,7 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeOneNode(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := types.NodeName("node-name") nodeName := types.NodeName("node-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -994,7 +994,7 @@ func Test_GetAttachedVolumesForNode_Positive_TwoVolumeTwoNodes(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volume1Name := api.UniqueVolumeName("volume1-name") volume1Name := v1.UniqueVolumeName("volume1-name")
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name) volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
node1Name := types.NodeName("node1-name") node1Name := types.NodeName("node1-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -1002,7 +1002,7 @@ func Test_GetAttachedVolumesForNode_Positive_TwoVolumeTwoNodes(t *testing.T) {
if add1Err != nil { if add1Err != nil {
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err) t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
} }
volume2Name := api.UniqueVolumeName("volume2-name") volume2Name := v1.UniqueVolumeName("volume2-name")
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name) volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
node2Name := types.NodeName("node2-name") node2Name := types.NodeName("node2-name")
generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Spec, node2Name, devicePath) generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Spec, node2Name, devicePath)
@@ -1025,7 +1025,7 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeTwoNodes(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
node1Name := types.NodeName("node1-name") node1Name := types.NodeName("node1-name")
devicePath := "fake/device/path" devicePath := "fake/device/path"
@@ -1061,7 +1061,7 @@ func Test_OneVolumeTwoNodes_TwoDevicePaths(t *testing.T) {
// Arrange // Arrange
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
asw := NewActualStateOfWorld(volumePluginMgr) asw := NewActualStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
node1Name := types.NodeName("node1-name") node1Name := types.NodeName("node1-name")
devicePath1 := "fake/device/path1" devicePath1 := "fake/device/path1"
@@ -1097,7 +1097,7 @@ func Test_OneVolumeTwoNodes_TwoDevicePaths(t *testing.T) {
func verifyAttachedVolume( func verifyAttachedVolume(
t *testing.T, t *testing.T,
attachedVolumes []AttachedVolume, attachedVolumes []AttachedVolume,
expectedVolumeName api.UniqueVolumeName, expectedVolumeName v1.UniqueVolumeName,
expectedVolumeSpecName string, expectedVolumeSpecName string,
expectedNodeName types.NodeName, expectedNodeName types.NodeName,
expectedDevicePath string, expectedDevicePath string,

View File

@@ -25,7 +25,7 @@ import (
"fmt" "fmt"
"sync" "sync"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
k8stypes "k8s.io/kubernetes/pkg/types" k8stypes "k8s.io/kubernetes/pkg/types"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util/operationexecutor" "k8s.io/kubernetes/pkg/volume/util/operationexecutor"
@@ -58,7 +58,7 @@ type DesiredStateOfWorld interface {
// should be attached to the specified node, the volume is implicitly added. // should be attached to the specified node, the volume is implicitly added.
// If no node with the name nodeName exists in list of nodes managed by the // If no node with the name nodeName exists in list of nodes managed by the
// attach/detach attached controller, an error is returned. // attach/detach attached controller, an error is returned.
AddPod(podName types.UniquePodName, pod *api.Pod, volumeSpec *volume.Spec, nodeName k8stypes.NodeName) (api.UniqueVolumeName, error) AddPod(podName types.UniquePodName, pod *v1.Pod, volumeSpec *volume.Spec, nodeName k8stypes.NodeName) (v1.UniqueVolumeName, error)
// DeleteNode removes the given node from the list of nodes managed by the // DeleteNode removes the given node from the list of nodes managed by the
// attach/detach controller. // attach/detach controller.
@@ -76,7 +76,7 @@ type DesiredStateOfWorld interface {
// volumes under the specified node, this is a no-op. // volumes under the specified node, this is a no-op.
// If after deleting the pod, the specified volume contains no other child // If after deleting the pod, the specified volume contains no other child
// pods, the volume is also deleted. // pods, the volume is also deleted.
DeletePod(podName types.UniquePodName, volumeName api.UniqueVolumeName, nodeName k8stypes.NodeName) DeletePod(podName types.UniquePodName, volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName)
// NodeExists returns true if the node with the specified name exists in // NodeExists returns true if the node with the specified name exists in
// the list of nodes managed by the attach/detach controller. // the list of nodes managed by the attach/detach controller.
@@ -85,7 +85,7 @@ type DesiredStateOfWorld interface {
// VolumeExists returns true if the volume with the specified name exists // VolumeExists returns true if the volume with the specified name exists
// in the list of volumes that should be attached to the specified node by // in the list of volumes that should be attached to the specified node by
// the attach detach controller. // the attach detach controller.
VolumeExists(volumeName api.UniqueVolumeName, nodeName k8stypes.NodeName) bool VolumeExists(volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName) bool
// GetVolumesToAttach generates and returns a list of volumes to attach // GetVolumesToAttach generates and returns a list of volumes to attach
// and the nodes they should be attached to based on the current desired // and the nodes they should be attached to based on the current desired
@@ -106,10 +106,10 @@ type VolumeToAttach struct {
// scheduled to the underlying node. // scheduled to the underlying node.
type PodToAdd struct { type PodToAdd struct {
// pod contains the api object of pod // pod contains the api object of pod
Pod *api.Pod Pod *v1.Pod
// volumeName contains the unique identifier for this volume. // volumeName contains the unique identifier for this volume.
VolumeName api.UniqueVolumeName VolumeName v1.UniqueVolumeName
// nodeName contains the name of this node. // nodeName contains the name of this node.
NodeName k8stypes.NodeName NodeName k8stypes.NodeName
@@ -143,13 +143,13 @@ type nodeManaged struct {
// volumesToAttach is a map containing the set of volumes that should be // volumesToAttach is a map containing the set of volumes that should be
// attached to this node. The key in the map is the name of the volume and // attached to this node. The key in the map is the name of the volume and
// the value is a pod object containing more information about the volume. // the value is a pod object containing more information about the volume.
volumesToAttach map[api.UniqueVolumeName]volumeToAttach volumesToAttach map[v1.UniqueVolumeName]volumeToAttach
} }
// The volume object represents a volume that should be attached to a node. // The volume object represents a volume that should be attached to a node.
type volumeToAttach struct { type volumeToAttach struct {
// volumeName contains the unique identifier for this volume. // volumeName contains the unique identifier for this volume.
volumeName api.UniqueVolumeName volumeName v1.UniqueVolumeName
// spec is the volume spec containing the specification for this volume. // spec is the volume spec containing the specification for this volume.
// Used to generate the volume plugin object, and passed to attach/detach // Used to generate the volume plugin object, and passed to attach/detach
@@ -170,7 +170,7 @@ type pod struct {
podName types.UniquePodName podName types.UniquePodName
// pod object contains the api object of pod // pod object contains the api object of pod
podObj *api.Pod podObj *v1.Pod
} }
func (dsw *desiredStateOfWorld) AddNode(nodeName k8stypes.NodeName) { func (dsw *desiredStateOfWorld) AddNode(nodeName k8stypes.NodeName) {
@@ -180,16 +180,16 @@ func (dsw *desiredStateOfWorld) AddNode(nodeName k8stypes.NodeName) {
if _, nodeExists := dsw.nodesManaged[nodeName]; !nodeExists { if _, nodeExists := dsw.nodesManaged[nodeName]; !nodeExists {
dsw.nodesManaged[nodeName] = nodeManaged{ dsw.nodesManaged[nodeName] = nodeManaged{
nodeName: nodeName, nodeName: nodeName,
volumesToAttach: make(map[api.UniqueVolumeName]volumeToAttach), volumesToAttach: make(map[v1.UniqueVolumeName]volumeToAttach),
} }
} }
} }
func (dsw *desiredStateOfWorld) AddPod( func (dsw *desiredStateOfWorld) AddPod(
podName types.UniquePodName, podName types.UniquePodName,
podToAdd *api.Pod, podToAdd *v1.Pod,
volumeSpec *volume.Spec, volumeSpec *volume.Spec,
nodeName k8stypes.NodeName) (api.UniqueVolumeName, error) { nodeName k8stypes.NodeName) (v1.UniqueVolumeName, error) {
dsw.Lock() dsw.Lock()
defer dsw.Unlock() defer dsw.Unlock()
@@ -261,7 +261,7 @@ func (dsw *desiredStateOfWorld) DeleteNode(nodeName k8stypes.NodeName) error {
func (dsw *desiredStateOfWorld) DeletePod( func (dsw *desiredStateOfWorld) DeletePod(
podName types.UniquePodName, podName types.UniquePodName,
volumeName api.UniqueVolumeName, volumeName v1.UniqueVolumeName,
nodeName k8stypes.NodeName) { nodeName k8stypes.NodeName) {
dsw.Lock() dsw.Lock()
defer dsw.Unlock() defer dsw.Unlock()
@@ -299,7 +299,7 @@ func (dsw *desiredStateOfWorld) NodeExists(nodeName k8stypes.NodeName) bool {
} }
func (dsw *desiredStateOfWorld) VolumeExists( func (dsw *desiredStateOfWorld) VolumeExists(
volumeName api.UniqueVolumeName, nodeName k8stypes.NodeName) bool { volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName) bool {
dsw.RLock() dsw.RLock()
defer dsw.RUnlock() defer dsw.RUnlock()
@@ -334,9 +334,9 @@ func (dsw *desiredStateOfWorld) GetVolumesToAttach() []VolumeToAttach {
return volumesToAttach return volumesToAttach
} }
// Construct a list of api.Pod objects from the given pod map // Construct a list of v1.Pod objects from the given pod map
func getPodsFromMap(podMap map[types.UniquePodName]pod) []*api.Pod { func getPodsFromMap(podMap map[types.UniquePodName]pod) []*v1.Pod {
pods := make([]*api.Pod, 0, len(podMap)) pods := make([]*v1.Pod, 0, len(podMap))
for _, pod := range podMap { for _, pod := range podMap {
pods = append(pods, pod.podObj) pods = append(pods, pod.podObj)
} }

View File

@@ -19,7 +19,7 @@ package cache
import ( import (
"testing" "testing"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing" controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
k8stypes "k8s.io/kubernetes/pkg/types" k8stypes "k8s.io/kubernetes/pkg/types"
volumetesting "k8s.io/kubernetes/pkg/volume/testing" volumetesting "k8s.io/kubernetes/pkg/volume/testing"
@@ -91,7 +91,7 @@ func Test_AddPod_Positive_NewPodNodeExistsVolumeDoesntExist(t *testing.T) {
podName := "pod-uid" podName := "pod-uid"
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := k8stypes.NodeName("node-name") nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
@@ -139,7 +139,7 @@ func Test_AddPod_Positive_NewPodNodeExistsVolumeExists(t *testing.T) {
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
pod1Name := "pod1-uid" pod1Name := "pod1-uid"
pod2Name := "pod2-uid" pod2Name := "pod2-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := k8stypes.NodeName("node-name") nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
@@ -212,7 +212,7 @@ func Test_AddPod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := k8stypes.NodeName("node-name") nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
@@ -278,7 +278,7 @@ func Test_AddPod_Negative_NewPodNodeDoesntExistVolumeDoesntExist(t *testing.T) {
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := k8stypes.NodeName("node-name") nodeName := k8stypes.NodeName("node-name")
volumeExists := dsw.VolumeExists(volumeName, nodeName) volumeExists := dsw.VolumeExists(volumeName, nodeName)
@@ -377,7 +377,7 @@ func Test_DeleteNode_Negative_NodeExistsHasChildVolumes(t *testing.T) {
nodeName := k8stypes.NodeName("node-name") nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName) generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
if podAddErr != nil { if podAddErr != nil {
@@ -416,7 +416,7 @@ func Test_DeletePod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := k8stypes.NodeName("node-name") nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
@@ -464,7 +464,7 @@ func Test_DeletePod_Positive_2PodsExistNodeExistsVolumesExist(t *testing.T) {
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
pod1Name := "pod1-uid" pod1Name := "pod1-uid"
pod2Name := "pod2-uid" pod2Name := "pod2-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := k8stypes.NodeName("node-name") nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
@@ -525,7 +525,7 @@ func Test_DeletePod_Positive_PodDoesNotExist(t *testing.T) {
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
pod1Name := "pod1-uid" pod1Name := "pod1-uid"
pod2Name := "pod2-uid" pod2Name := "pod2-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := k8stypes.NodeName("node-name") nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
@@ -573,7 +573,7 @@ func Test_DeletePod_Positive_NodeDoesNotExist(t *testing.T) {
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
node1Name := k8stypes.NodeName("node1-name") node1Name := k8stypes.NodeName("node1-name")
dsw.AddNode(node1Name) dsw.AddNode(node1Name)
@@ -628,7 +628,7 @@ func Test_DeletePod_Positive_VolumeDoesNotExist(t *testing.T) {
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
podName := "pod-uid" podName := "pod-uid"
volume1Name := api.UniqueVolumeName("volume1-name") volume1Name := v1.UniqueVolumeName("volume1-name")
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name) volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
nodeName := k8stypes.NodeName("node-name") nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
@@ -647,7 +647,7 @@ func Test_DeletePod_Positive_VolumeDoesNotExist(t *testing.T) {
generatedVolume1Name, generatedVolume1Name,
nodeName) nodeName)
} }
volume2Name := api.UniqueVolumeName("volume2-name") volume2Name := v1.UniqueVolumeName("volume2-name")
// Act // Act
dsw.DeletePod(types.UniquePodName(podName), volume2Name, nodeName) dsw.DeletePod(types.UniquePodName(podName), volume2Name, nodeName)
@@ -731,7 +731,7 @@ func Test_VolumeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
nodeName := k8stypes.NodeName("node-name") nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
generatedVolumeName, _ := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName) generatedVolumeName, _ := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
@@ -761,7 +761,7 @@ func Test_VolumeExists_Positive_VolumeDoesntExistNodeExists(t *testing.T) {
nodeName := k8stypes.NodeName("node-name") nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
podName := "pod-uid" podName := "pod-uid"
volume1Name := api.UniqueVolumeName("volume1-name") volume1Name := v1.UniqueVolumeName("volume1-name")
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name) volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volume1Spec, nodeName) generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volume1Spec, nodeName)
if podAddErr != nil { if podAddErr != nil {
@@ -770,7 +770,7 @@ func Test_VolumeExists_Positive_VolumeDoesntExistNodeExists(t *testing.T) {
podName, podName,
podAddErr) podAddErr)
} }
volume2Name := api.UniqueVolumeName("volume2-name") volume2Name := v1.UniqueVolumeName("volume2-name")
// Act // Act
volumeExists := dsw.VolumeExists(volume2Name, nodeName) volumeExists := dsw.VolumeExists(volume2Name, nodeName)
@@ -795,7 +795,7 @@ func Test_VolumeExists_Positive_VolumeDoesntExistNodeDoesntExists(t *testing.T)
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t) volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
nodeName := k8stypes.NodeName("node-name") nodeName := k8stypes.NodeName("node-name")
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
// Act // Act
volumeExists := dsw.VolumeExists(volumeName, nodeName) volumeExists := dsw.VolumeExists(volumeName, nodeName)
@@ -857,7 +857,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEach(t *testing.T) {
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
node1Name := k8stypes.NodeName("node1-name") node1Name := k8stypes.NodeName("node1-name")
pod1Name := "pod1-uid" pod1Name := "pod1-uid"
volume1Name := api.UniqueVolumeName("volume1-name") volume1Name := v1.UniqueVolumeName("volume1-name")
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name) volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
dsw.AddNode(node1Name) dsw.AddNode(node1Name)
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volume1Spec, node1Name) generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volume1Spec, node1Name)
@@ -869,7 +869,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEach(t *testing.T) {
} }
node2Name := k8stypes.NodeName("node2-name") node2Name := k8stypes.NodeName("node2-name")
pod2Name := "pod2-uid" pod2Name := "pod2-uid"
volume2Name := api.UniqueVolumeName("volume2-name") volume2Name := v1.UniqueVolumeName("volume2-name")
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name) volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
dsw.AddNode(node2Name) dsw.AddNode(node2Name)
generatedVolume2Name, podAddErr := dsw.AddPod(types.UniquePodName(pod2Name), controllervolumetesting.NewPod(pod2Name, pod2Name), volume2Spec, node2Name) generatedVolume2Name, podAddErr := dsw.AddPod(types.UniquePodName(pod2Name), controllervolumetesting.NewPod(pod2Name, pod2Name), volume2Spec, node2Name)
@@ -902,7 +902,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEachExtraPod(t *testing.T
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
node1Name := k8stypes.NodeName("node1-name") node1Name := k8stypes.NodeName("node1-name")
pod1Name := "pod1-uid" pod1Name := "pod1-uid"
volume1Name := api.UniqueVolumeName("volume1-name") volume1Name := v1.UniqueVolumeName("volume1-name")
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name) volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
dsw.AddNode(node1Name) dsw.AddNode(node1Name)
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volume1Spec, node1Name) generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volume1Spec, node1Name)
@@ -914,7 +914,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEachExtraPod(t *testing.T
} }
node2Name := k8stypes.NodeName("node2-name") node2Name := k8stypes.NodeName("node2-name")
pod2Name := "pod2-uid" pod2Name := "pod2-uid"
volume2Name := api.UniqueVolumeName("volume2-name") volume2Name := v1.UniqueVolumeName("volume2-name")
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name) volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
dsw.AddNode(node2Name) dsw.AddNode(node2Name)
generatedVolume2Name, podAddErr := dsw.AddPod(types.UniquePodName(pod2Name), controllervolumetesting.NewPod(pod2Name, pod2Name), volume2Spec, node2Name) generatedVolume2Name, podAddErr := dsw.AddPod(types.UniquePodName(pod2Name), controllervolumetesting.NewPod(pod2Name, pod2Name), volume2Spec, node2Name)
@@ -956,7 +956,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) {
dsw := NewDesiredStateOfWorld(volumePluginMgr) dsw := NewDesiredStateOfWorld(volumePluginMgr)
node1Name := k8stypes.NodeName("node1-name") node1Name := k8stypes.NodeName("node1-name")
pod1Name := "pod1-uid" pod1Name := "pod1-uid"
volume1Name := api.UniqueVolumeName("volume1-name") volume1Name := v1.UniqueVolumeName("volume1-name")
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name) volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
dsw.AddNode(node1Name) dsw.AddNode(node1Name)
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volume1Spec, node1Name) generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volume1Spec, node1Name)
@@ -968,7 +968,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) {
} }
node2Name := k8stypes.NodeName("node2-name") node2Name := k8stypes.NodeName("node2-name")
pod2aName := "pod2a-name" pod2aName := "pod2a-name"
volume2Name := api.UniqueVolumeName("volume2-name") volume2Name := v1.UniqueVolumeName("volume2-name")
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name) volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
dsw.AddNode(node2Name) dsw.AddNode(node2Name)
generatedVolume2Name1, podAddErr := dsw.AddPod(types.UniquePodName(pod2aName), controllervolumetesting.NewPod(pod2aName, pod2aName), volume2Spec, node2Name) generatedVolume2Name1, podAddErr := dsw.AddPod(types.UniquePodName(pod2aName), controllervolumetesting.NewPod(pod2aName, pod2aName), volume2Spec, node2Name)
@@ -993,7 +993,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) {
generatedVolume2Name2) generatedVolume2Name2)
} }
pod3Name := "pod3-uid" pod3Name := "pod3-uid"
volume3Name := api.UniqueVolumeName("volume3-name") volume3Name := v1.UniqueVolumeName("volume3-name")
volume3Spec := controllervolumetesting.GetTestVolumeSpec(string(volume3Name), volume3Name) volume3Spec := controllervolumetesting.GetTestVolumeSpec(string(volume3Name), volume3Name)
generatedVolume3Name, podAddErr := dsw.AddPod(types.UniquePodName(pod3Name), controllervolumetesting.NewPod(pod3Name, pod3Name), volume3Spec, node1Name) generatedVolume3Name, podAddErr := dsw.AddPod(types.UniquePodName(pod3Name), controllervolumetesting.NewPod(pod3Name, pod3Name), volume3Spec, node1Name)
if podAddErr != nil { if podAddErr != nil {
@@ -1020,7 +1020,7 @@ func verifyVolumeToAttach(
t *testing.T, t *testing.T,
volumesToAttach []VolumeToAttach, volumesToAttach []VolumeToAttach,
expectedNodeName k8stypes.NodeName, expectedNodeName k8stypes.NodeName,
expectedVolumeName api.UniqueVolumeName, expectedVolumeName v1.UniqueVolumeName,
expectedVolumeSpecName string) { expectedVolumeSpecName string) {
for _, volumeToAttach := range volumesToAttach { for _, volumeToAttach := range volumesToAttach {
if volumeToAttach.NodeName == expectedNodeName && if volumeToAttach.NodeName == expectedNodeName &&

View File

@@ -23,7 +23,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
kcache "k8s.io/kubernetes/pkg/client/cache" kcache "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/pkg/util/wait"
@@ -102,7 +102,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
} }
if exists { if exists {
informerPod, ok := informerPodObj.(*api.Pod) informerPod, ok := informerPodObj.(*v1.Pod)
if !ok { if !ok {
glog.Errorf("Failed to cast obj %#v to pod object for pod %q (UID %q)", informerPod, dswPodKey, dswPodUID) glog.Errorf("Failed to cast obj %#v to pod object for pod %q (UID %q)", informerPod, dswPodKey, dswPodUID)
continue continue

View File

@@ -20,7 +20,7 @@ import (
"testing" "testing"
"time" "time"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/client/record"
"k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
@@ -86,7 +86,7 @@ func Test_Run_Positive_OneDesiredVolumeAttach(t *testing.T) {
reconciler := NewReconciler( reconciler := NewReconciler(
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, dsw, asw, ad, nsu) reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, dsw, asw, ad, nsu)
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := k8stypes.NodeName("node-name") nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
@@ -132,7 +132,7 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithUnmountedVolume(t *te
reconciler := NewReconciler( reconciler := NewReconciler(
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, dsw, asw, ad, nsu) reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, dsw, asw, ad, nsu)
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := k8stypes.NodeName("node-name") nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
@@ -199,7 +199,7 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithMountedVolume(t *test
reconciler := NewReconciler( reconciler := NewReconciler(
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, dsw, asw, ad, nsu) reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, dsw, asw, ad, nsu)
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := k8stypes.NodeName("node-name") nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)
@@ -266,7 +266,7 @@ func Test_Run_Negative_OneDesiredVolumeAttachThenDetachWithUnmountedVolumeUpdate
reconciler := NewReconciler( reconciler := NewReconciler(
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, dsw, asw, ad, nsu) reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, dsw, asw, ad, nsu)
podName := "pod-uid" podName := "pod-uid"
volumeName := api.UniqueVolumeName("volume-name") volumeName := v1.UniqueVolumeName("volume-name")
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName) volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
nodeName := k8stypes.NodeName("node-name") nodeName := k8stypes.NodeName("node-name")
dsw.AddNode(nodeName) dsw.AddNode(nodeName)

View File

@@ -24,9 +24,9 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
kcache "k8s.io/kubernetes/pkg/client/cache" kcache "k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache" "k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
"k8s.io/kubernetes/pkg/conversion" "k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/util/strategicpatch" "k8s.io/kubernetes/pkg/util/strategicpatch"
@@ -42,7 +42,7 @@ type NodeStatusUpdater interface {
// NewNodeStatusUpdater returns a new instance of NodeStatusUpdater. // NewNodeStatusUpdater returns a new instance of NodeStatusUpdater.
func NewNodeStatusUpdater( func NewNodeStatusUpdater(
kubeClient internalclientset.Interface, kubeClient clientset.Interface,
nodeInformer kcache.SharedInformer, nodeInformer kcache.SharedInformer,
actualStateOfWorld cache.ActualStateOfWorld) NodeStatusUpdater { actualStateOfWorld cache.ActualStateOfWorld) NodeStatusUpdater {
return &nodeStatusUpdater{ return &nodeStatusUpdater{
@@ -53,7 +53,7 @@ func NewNodeStatusUpdater(
} }
type nodeStatusUpdater struct { type nodeStatusUpdater struct {
kubeClient internalclientset.Interface kubeClient clientset.Interface
nodeInformer kcache.SharedInformer nodeInformer kcache.SharedInformer
actualStateOfWorld cache.ActualStateOfWorld actualStateOfWorld cache.ActualStateOfWorld
} }
@@ -81,7 +81,7 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
err) err)
} }
node, ok := clonedNode.(*api.Node) node, ok := clonedNode.(*v1.Node)
if !ok || node == nil { if !ok || node == nil {
return fmt.Errorf( return fmt.Errorf(
"failed to cast %q object %#v to Node", "failed to cast %q object %#v to Node",

View File

@@ -19,8 +19,8 @@ package testing
import ( import (
"fmt" "fmt"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake" "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
"k8s.io/kubernetes/pkg/client/testing/core" "k8s.io/kubernetes/pkg/client/testing/core"
"k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types" "k8s.io/kubernetes/pkg/types"
@@ -29,12 +29,12 @@ import (
) )
// GetTestVolumeSpec returns a test volume spec // GetTestVolumeSpec returns a test volume spec
func GetTestVolumeSpec(volumeName string, diskName api.UniqueVolumeName) *volume.Spec { func GetTestVolumeSpec(volumeName string, diskName v1.UniqueVolumeName) *volume.Spec {
return &volume.Spec{ return &volume.Spec{
Volume: &api.Volume{ Volume: &v1.Volume{
Name: volumeName, Name: volumeName,
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: string(diskName), PDName: string(diskName),
FSType: "fake", FSType: "fake",
ReadOnly: false, ReadOnly: false,
@@ -48,28 +48,28 @@ func CreateTestClient() *fake.Clientset {
fakeClient := &fake.Clientset{} fakeClient := &fake.Clientset{}
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) { fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
obj := &api.PodList{} obj := &v1.PodList{}
podNamePrefix := "mypod" podNamePrefix := "mypod"
namespace := "mynamespace" namespace := "mynamespace"
for i := 0; i < 5; i++ { for i := 0; i < 5; i++ {
podName := fmt.Sprintf("%s-%d", podNamePrefix, i) podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
pod := api.Pod{ pod := v1.Pod{
Status: api.PodStatus{ Status: v1.PodStatus{
Phase: api.PodRunning, Phase: v1.PodRunning,
}, },
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
Name: podName, Name: podName,
Namespace: namespace, Namespace: namespace,
Labels: map[string]string{ Labels: map[string]string{
"name": podName, "name": podName,
}, },
}, },
Spec: api.PodSpec{ Spec: v1.PodSpec{
Containers: []api.Container{ Containers: []v1.Container{
{ {
Name: "containerName", Name: "containerName",
Image: "containerImage", Image: "containerImage",
VolumeMounts: []api.VolumeMount{ VolumeMounts: []v1.VolumeMount{
{ {
Name: "volumeMountName", Name: "volumeMountName",
ReadOnly: false, ReadOnly: false,
@@ -78,11 +78,11 @@ func CreateTestClient() *fake.Clientset {
}, },
}, },
}, },
Volumes: []api.Volume{ Volumes: []v1.Volume{
{ {
Name: "volumeName", Name: "volumeName",
VolumeSource: api.VolumeSource{ VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "pdName", PDName: "pdName",
FSType: "ext4", FSType: "ext4",
ReadOnly: false, ReadOnly: false,
@@ -104,9 +104,9 @@ func CreateTestClient() *fake.Clientset {
} }
// NewPod returns a test pod object // NewPod returns a test pod object
func NewPod(uid, name string) *api.Pod { func NewPod(uid, name string) *v1.Pod {
return &api.Pod{ return &v1.Pod{
ObjectMeta: api.ObjectMeta{ ObjectMeta: v1.ObjectMeta{
UID: types.UID(uid), UID: types.UID(uid),
Name: name, Name: name,
Namespace: name, Namespace: name,

View File

@@ -19,9 +19,9 @@ package persistentvolume
import ( import (
"testing" "testing"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/storage" storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
storageutil "k8s.io/kubernetes/pkg/apis/storage/util" storageutil "k8s.io/kubernetes/pkg/apis/storage/v1beta1/util"
) )
// Test single call to syncClaim and syncVolume methods. // Test single call to syncClaim and syncVolume methods.
@@ -42,134 +42,134 @@ func TestSync(t *testing.T) {
{ {
// syncClaim binds to a matching unbound volume. // syncClaim binds to a matching unbound volume.
"1-1 - successful bind", "1-1 - successful bind",
newVolumeArray("volume1-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
newClaimArray("claim1-1", "uid1-1", "1Gi", "", api.ClaimPending), newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending),
newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", v1.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncClaim does not do anything when there is no matching volume. // syncClaim does not do anything when there is no matching volume.
"1-2 - noop", "1-2 - noop",
newVolumeArray("volume1-2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume1-2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume1-2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume1-2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newClaimArray("claim1-2", "uid1-2", "10Gi", "", api.ClaimPending), newClaimArray("claim1-2", "uid1-2", "10Gi", "", v1.ClaimPending),
newClaimArray("claim1-2", "uid1-2", "10Gi", "", api.ClaimPending), newClaimArray("claim1-2", "uid1-2", "10Gi", "", v1.ClaimPending),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncClaim resets claim.Status to Pending when there is no // syncClaim resets claim.Status to Pending when there is no
// matching volume. // matching volume.
"1-3 - reset to Pending", "1-3 - reset to Pending",
newVolumeArray("volume1-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume1-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume1-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume1-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newClaimArray("claim1-3", "uid1-3", "10Gi", "", api.ClaimBound), newClaimArray("claim1-3", "uid1-3", "10Gi", "", v1.ClaimBound),
newClaimArray("claim1-3", "uid1-3", "10Gi", "", api.ClaimPending), newClaimArray("claim1-3", "uid1-3", "10Gi", "", v1.ClaimPending),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncClaim binds claims to the smallest matching volume // syncClaim binds claims to the smallest matching volume
"1-4 - smallest volume", "1-4 - smallest volume",
[]*api.PersistentVolume{ []*v1.PersistentVolume{
newVolume("volume1-4_1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolume("volume1-4_1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolume("volume1-4_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolume("volume1-4_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
}, },
[]*api.PersistentVolume{ []*v1.PersistentVolume{
newVolume("volume1-4_1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolume("volume1-4_1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolume("volume1-4_2", "1Gi", "uid1-4", "claim1-4", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolume("volume1-4_2", "1Gi", "uid1-4", "claim1-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
}, },
newClaimArray("claim1-4", "uid1-4", "1Gi", "", api.ClaimPending), newClaimArray("claim1-4", "uid1-4", "1Gi", "", v1.ClaimPending),
newClaimArray("claim1-4", "uid1-4", "1Gi", "volume1-4_2", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim1-4", "uid1-4", "1Gi", "volume1-4_2", v1.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncClaim binds a claim only to volume that points to it (by // syncClaim binds a claim only to volume that points to it (by
// name), even though a smaller one is available. // name), even though a smaller one is available.
"1-5 - prebound volume by name - success", "1-5 - prebound volume by name - success",
[]*api.PersistentVolume{ []*v1.PersistentVolume{
newVolume("volume1-5_1", "10Gi", "", "claim1-5", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolume("volume1-5_1", "10Gi", "", "claim1-5", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolume("volume1-5_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolume("volume1-5_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
}, },
[]*api.PersistentVolume{ []*v1.PersistentVolume{
newVolume("volume1-5_1", "10Gi", "uid1-5", "claim1-5", api.VolumeBound, api.PersistentVolumeReclaimRetain), newVolume("volume1-5_1", "10Gi", "uid1-5", "claim1-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
newVolume("volume1-5_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolume("volume1-5_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
}, },
newClaimArray("claim1-5", "uid1-5", "1Gi", "", api.ClaimPending), newClaimArray("claim1-5", "uid1-5", "1Gi", "", v1.ClaimPending),
withExpectedCapacity("10Gi", newClaimArray("claim1-5", "uid1-5", "1Gi", "volume1-5_1", api.ClaimBound, annBoundByController, annBindCompleted)), withExpectedCapacity("10Gi", newClaimArray("claim1-5", "uid1-5", "1Gi", "volume1-5_1", v1.ClaimBound, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncClaim binds a claim only to volume that points to it (by // syncClaim binds a claim only to volume that points to it (by
// UID), even though a smaller one is available. // UID), even though a smaller one is available.
"1-6 - prebound volume by UID - success", "1-6 - prebound volume by UID - success",
[]*api.PersistentVolume{ []*v1.PersistentVolume{
newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolume("volume1-6_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolume("volume1-6_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
}, },
[]*api.PersistentVolume{ []*v1.PersistentVolume{
newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", api.VolumeBound, api.PersistentVolumeReclaimRetain), newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
newVolume("volume1-6_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolume("volume1-6_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
}, },
newClaimArray("claim1-6", "uid1-6", "1Gi", "", api.ClaimPending), newClaimArray("claim1-6", "uid1-6", "1Gi", "", v1.ClaimPending),
withExpectedCapacity("10Gi", newClaimArray("claim1-6", "uid1-6", "1Gi", "volume1-6_1", api.ClaimBound, annBoundByController, annBindCompleted)), withExpectedCapacity("10Gi", newClaimArray("claim1-6", "uid1-6", "1Gi", "volume1-6_1", v1.ClaimBound, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncClaim does not bind claim to a volume prebound to a claim with // syncClaim does not bind claim to a volume prebound to a claim with
// same name and different UID // same name and different UID
"1-7 - prebound volume to different claim", "1-7 - prebound volume to different claim",
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newClaimArray("claim1-7", "uid1-7", "1Gi", "", api.ClaimPending), newClaimArray("claim1-7", "uid1-7", "1Gi", "", v1.ClaimPending),
newClaimArray("claim1-7", "uid1-7", "1Gi", "", api.ClaimPending), newClaimArray("claim1-7", "uid1-7", "1Gi", "", v1.ClaimPending),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncClaim completes binding - simulates controller crash after // syncClaim completes binding - simulates controller crash after
// PV.ClaimRef is saved // PV.ClaimRef is saved
"1-8 - complete bind after crash - PV bound", "1-8 - complete bind after crash - PV bound",
newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", api.VolumePending, api.PersistentVolumeReclaimRetain, annBoundByController), newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", v1.VolumePending, v1.PersistentVolumeReclaimRetain, annBoundByController),
newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
newClaimArray("claim1-8", "uid1-8", "1Gi", "", api.ClaimPending), newClaimArray("claim1-8", "uid1-8", "1Gi", "", v1.ClaimPending),
newClaimArray("claim1-8", "uid1-8", "1Gi", "volume1-8", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim1-8", "uid1-8", "1Gi", "volume1-8", v1.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncClaim completes binding - simulates controller crash after // syncClaim completes binding - simulates controller crash after
// PV.Status is saved // PV.Status is saved
"1-9 - complete bind after crash - PV status saved", "1-9 - complete bind after crash - PV status saved",
newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
newClaimArray("claim1-9", "uid1-9", "1Gi", "", api.ClaimPending), newClaimArray("claim1-9", "uid1-9", "1Gi", "", v1.ClaimPending),
newClaimArray("claim1-9", "uid1-9", "1Gi", "volume1-9", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim1-9", "uid1-9", "1Gi", "volume1-9", v1.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncClaim completes binding - simulates controller crash after // syncClaim completes binding - simulates controller crash after
// PVC.VolumeName is saved // PVC.VolumeName is saved
"1-10 - complete bind after crash - PVC bound", "1-10 - complete bind after crash - PVC bound",
newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimPending, annBoundByController, annBindCompleted), newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", v1.ClaimPending, annBoundByController, annBindCompleted),
newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", v1.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncClaim binds a claim only when the label selector matches the volume // syncClaim binds a claim only when the label selector matches the volume
"1-11 - bind when selector matches", "1-11 - bind when selector matches",
withLabels(labels, newVolumeArray("volume1-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain)), withLabels(labels, newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain)),
withLabels(labels, newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController)), withLabels(labels, newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController)),
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", api.ClaimPending)), withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending)),
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", api.ClaimBound, annBoundByController, annBindCompleted)), withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", v1.ClaimBound, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncClaim does not bind a claim when the label selector doesn't match // syncClaim does not bind a claim when the label selector doesn't match
"1-12 - do not bind when selector does not match", "1-12 - do not bind when selector does not match",
newVolumeArray("volume1-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume1-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", api.ClaimPending)), withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending)),
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", api.ClaimPending)), withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending)),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
@@ -181,8 +181,8 @@ func TestSync(t *testing.T) {
"2-1 - claim prebound to non-existing volume - noop", "2-1 - claim prebound to non-existing volume - noop",
novolumes, novolumes,
novolumes, novolumes,
newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", api.ClaimPending), newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", v1.ClaimPending),
newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", api.ClaimPending), newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", v1.ClaimPending),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
@@ -191,28 +191,28 @@ func TestSync(t *testing.T) {
"2-2 - claim prebound to non-existing volume - reset status", "2-2 - claim prebound to non-existing volume - reset status",
novolumes, novolumes,
novolumes, novolumes,
newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", api.ClaimBound), newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", v1.ClaimBound),
newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", api.ClaimPending), newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", v1.ClaimPending),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncClaim with claim pre-bound to a PV that exists and is // syncClaim with claim pre-bound to a PV that exists and is
// unbound. Check it gets bound and no annBoundByController is set. // unbound. Check it gets bound and no annBoundByController is set.
"2-3 - claim prebound to unbound volume", "2-3 - claim prebound to unbound volume",
newVolumeArray("volume2-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume2-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", api.ClaimPending), newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", v1.ClaimPending),
newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", api.ClaimBound, annBindCompleted), newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", v1.ClaimBound, annBindCompleted),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// claim with claim pre-bound to a PV that is pre-bound to the claim // claim with claim pre-bound to a PV that is pre-bound to the claim
// by name. Check it gets bound and no annBoundByController is set. // by name. Check it gets bound and no annBoundByController is set.
"2-4 - claim prebound to prebound volume by name", "2-4 - claim prebound to prebound volume by name",
newVolumeArray("volume2-4", "1Gi", "", "claim2-4", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume2-4", "1Gi", "", "claim2-4", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume2-4", "1Gi", "uid2-4", "claim2-4", api.VolumeBound, api.PersistentVolumeReclaimRetain), newVolumeArray("volume2-4", "1Gi", "uid2-4", "claim2-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
newClaimArray("claim2-4", "uid2-4", "1Gi", "volume2-4", api.ClaimPending), newClaimArray("claim2-4", "uid2-4", "1Gi", "volume2-4", v1.ClaimPending),
newClaimArray("claim2-4", "uid2-4", "1Gi", "volume2-4", api.ClaimBound, annBindCompleted), newClaimArray("claim2-4", "uid2-4", "1Gi", "volume2-4", v1.ClaimBound, annBindCompleted),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
@@ -220,30 +220,30 @@ func TestSync(t *testing.T) {
// claim by UID. Check it gets bound and no annBoundByController is // claim by UID. Check it gets bound and no annBoundByController is
// set. // set.
"2-5 - claim prebound to prebound volume by UID", "2-5 - claim prebound to prebound volume by UID",
newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", api.VolumeBound, api.PersistentVolumeReclaimRetain), newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
newClaimArray("claim2-5", "uid2-5", "1Gi", "volume2-5", api.ClaimPending), newClaimArray("claim2-5", "uid2-5", "1Gi", "volume2-5", v1.ClaimPending),
newClaimArray("claim2-5", "uid2-5", "1Gi", "volume2-5", api.ClaimBound, annBindCompleted), newClaimArray("claim2-5", "uid2-5", "1Gi", "volume2-5", v1.ClaimBound, annBindCompleted),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncClaim with claim pre-bound to a PV that is bound to different // syncClaim with claim pre-bound to a PV that is bound to different
// claim. Check it's reset to Pending. // claim. Check it's reset to Pending.
"2-6 - claim prebound to already bound volume", "2-6 - claim prebound to already bound volume",
newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", api.VolumeBound, api.PersistentVolumeReclaimRetain), newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", api.VolumeBound, api.PersistentVolumeReclaimRetain), newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
newClaimArray("claim2-6", "uid2-6", "1Gi", "volume2-6", api.ClaimBound), newClaimArray("claim2-6", "uid2-6", "1Gi", "volume2-6", v1.ClaimBound),
newClaimArray("claim2-6", "uid2-6", "1Gi", "volume2-6", api.ClaimPending), newClaimArray("claim2-6", "uid2-6", "1Gi", "volume2-6", v1.ClaimPending),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncClaim with claim bound by controller to a PV that is bound to // syncClaim with claim bound by controller to a PV that is bound to
// different claim. Check it throws an error. // different claim. Check it throws an error.
"2-7 - claim bound by controller to already bound volume", "2-7 - claim bound by controller to already bound volume",
newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", api.VolumeBound, api.PersistentVolumeReclaimRetain), newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", api.VolumeBound, api.PersistentVolumeReclaimRetain), newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
newClaimArray("claim2-7", "uid2-7", "1Gi", "volume2-7", api.ClaimBound, annBoundByController), newClaimArray("claim2-7", "uid2-7", "1Gi", "volume2-7", v1.ClaimBound, annBoundByController),
newClaimArray("claim2-7", "uid2-7", "1Gi", "volume2-7", api.ClaimBound, annBoundByController), newClaimArray("claim2-7", "uid2-7", "1Gi", "volume2-7", v1.ClaimBound, annBoundByController),
noevents, noerrors, testSyncClaimError, noevents, noerrors, testSyncClaimError,
}, },
{ {
@@ -251,10 +251,10 @@ func TestSync(t *testing.T) {
// unbound, but does not match the selector. Check it gets bound // unbound, but does not match the selector. Check it gets bound
// and no annBoundByController is set. // and no annBoundByController is set.
"2-8 - claim prebound to unbound volume that does not match the selector", "2-8 - claim prebound to unbound volume that does not match the selector",
newVolumeArray("volume2-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume2-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
withLabelSelector(labels, newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", api.ClaimPending)), withLabelSelector(labels, newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", v1.ClaimPending)),
withLabelSelector(labels, newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", api.ClaimBound, annBindCompleted)), withLabelSelector(labels, newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", v1.ClaimBound, annBindCompleted)),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
@@ -265,8 +265,8 @@ func TestSync(t *testing.T) {
"3-1 - bound claim with missing VolumeName", "3-1 - bound claim with missing VolumeName",
novolumes, novolumes,
novolumes, novolumes,
newClaimArray("claim3-1", "uid3-1", "10Gi", "", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim3-1", "uid3-1", "10Gi", "", v1.ClaimBound, annBoundByController, annBindCompleted),
newClaimArray("claim3-1", "uid3-1", "10Gi", "", api.ClaimLost, annBoundByController, annBindCompleted), newClaimArray("claim3-1", "uid3-1", "10Gi", "", v1.ClaimLost, annBoundByController, annBindCompleted),
[]string{"Warning ClaimLost"}, noerrors, testSyncClaim, []string{"Warning ClaimLost"}, noerrors, testSyncClaim,
}, },
{ {
@@ -275,28 +275,28 @@ func TestSync(t *testing.T) {
"3-2 - bound claim with missing volume", "3-2 - bound claim with missing volume",
novolumes, novolumes,
novolumes, novolumes,
newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", v1.ClaimBound, annBoundByController, annBindCompleted),
newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", api.ClaimLost, annBoundByController, annBindCompleted), newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", v1.ClaimLost, annBoundByController, annBindCompleted),
[]string{"Warning ClaimLost"}, noerrors, testSyncClaim, []string{"Warning ClaimLost"}, noerrors, testSyncClaim,
}, },
{ {
// syncClaim with claim bound to unbound volume. Check it's bound. // syncClaim with claim bound to unbound volume. Check it's bound.
// Also check that Pending phase is set to Bound // Also check that Pending phase is set to Bound
"3-3 - bound claim with unbound volume", "3-3 - bound claim with unbound volume",
newVolumeArray("volume3-3", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume3-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimPending, annBoundByController, annBindCompleted), newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimPending, annBoundByController, annBindCompleted),
newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncClaim with claim bound to volume with missing (or different) // syncClaim with claim bound to volume with missing (or different)
// volume.Spec.ClaimRef.UID. Check that the claim is marked as lost. // volume.Spec.ClaimRef.UID. Check that the claim is marked as lost.
"3-4 - bound claim with prebound volume", "3-4 - bound claim with prebound volume",
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", api.ClaimPending, annBoundByController, annBindCompleted), newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", v1.ClaimPending, annBoundByController, annBindCompleted),
newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", api.ClaimLost, annBoundByController, annBindCompleted), newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", v1.ClaimLost, annBoundByController, annBindCompleted),
[]string{"Warning ClaimMisbound"}, noerrors, testSyncClaim, []string{"Warning ClaimMisbound"}, noerrors, testSyncClaim,
}, },
{ {
@@ -304,10 +304,10 @@ func TestSync(t *testing.T) {
// controller does not do anything. Also check that Pending phase is // controller does not do anything. Also check that Pending phase is
// set to Bound // set to Bound
"3-5 - bound claim with bound volume", "3-5 - bound claim with bound volume",
newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", api.VolumeBound, api.PersistentVolumeReclaimRetain), newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", api.ClaimPending, annBindCompleted), newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", v1.ClaimPending, annBindCompleted),
newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", api.ClaimBound, annBindCompleted), newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", v1.ClaimBound, annBindCompleted),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
@@ -315,10 +315,10 @@ func TestSync(t *testing.T) {
// claim. Check that the claim is marked as lost. // claim. Check that the claim is marked as lost.
// TODO: test that an event is emitted // TODO: test that an event is emitted
"3-6 - bound claim with bound volume", "3-6 - bound claim with bound volume",
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", api.ClaimPending, annBindCompleted), newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", v1.ClaimPending, annBindCompleted),
newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", api.ClaimLost, annBindCompleted), newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", v1.ClaimLost, annBindCompleted),
[]string{"Warning ClaimMisbound"}, noerrors, testSyncClaim, []string{"Warning ClaimMisbound"}, noerrors, testSyncClaim,
}, },
{ {
@@ -326,18 +326,18 @@ func TestSync(t *testing.T) {
// even if the claim's selector doesn't match the volume. Also // even if the claim's selector doesn't match the volume. Also
// check that Pending phase is set to Bound // check that Pending phase is set to Bound
"3-7 - bound claim with unbound volume where selector doesn't match", "3-7 - bound claim with unbound volume where selector doesn't match",
newVolumeArray("volume3-3", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume3-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
withLabelSelector(labels, newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimPending, annBoundByController, annBindCompleted)), withLabelSelector(labels, newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimPending, annBoundByController, annBindCompleted)),
withLabelSelector(labels, newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimBound, annBoundByController, annBindCompleted)), withLabelSelector(labels, newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimBound, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
// [Unit test set 4] All syncVolume tests. // [Unit test set 4] All syncVolume tests.
{ {
// syncVolume with pending volume. Check it's marked as Available. // syncVolume with pending volume. Check it's marked as Available.
"4-1 - pending volume", "4-1 - pending volume",
newVolumeArray("volume4-1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume4-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume4-1", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), newVolumeArray("volume4-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain),
noclaims, noclaims,
noclaims, noclaims,
noevents, noerrors, testSyncVolume, noevents, noerrors, testSyncVolume,
@@ -346,8 +346,8 @@ func TestSync(t *testing.T) {
// syncVolume with prebound pending volume. Check it's marked as // syncVolume with prebound pending volume. Check it's marked as
// Available. // Available.
"4-2 - pending prebound volume", "4-2 - pending prebound volume",
newVolumeArray("volume4-2", "10Gi", "", "claim4-2", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume4-2", "10Gi", "", "claim4-2", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume4-2", "10Gi", "", "claim4-2", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), newVolumeArray("volume4-2", "10Gi", "", "claim4-2", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain),
noclaims, noclaims,
noclaims, noclaims,
noevents, noerrors, testSyncVolume, noevents, noerrors, testSyncVolume,
@@ -356,8 +356,8 @@ func TestSync(t *testing.T) {
// syncVolume with volume bound to missing claim. // syncVolume with volume bound to missing claim.
// Check the volume gets Released // Check the volume gets Released
"4-3 - bound volume with missing claim", "4-3 - bound volume with missing claim",
newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", api.VolumeBound, api.PersistentVolumeReclaimRetain), newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", api.VolumeReleased, api.PersistentVolumeReclaimRetain), newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", v1.VolumeReleased, v1.PersistentVolumeReclaimRetain),
noclaims, noclaims,
noclaims, noclaims,
noevents, noerrors, testSyncVolume, noevents, noerrors, testSyncVolume,
@@ -366,50 +366,50 @@ func TestSync(t *testing.T) {
// syncVolume with volume bound to claim with different UID. // syncVolume with volume bound to claim with different UID.
// Check the volume gets Released. // Check the volume gets Released.
"4-4 - volume bound to claim with different UID", "4-4 - volume bound to claim with different UID",
newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", api.VolumeBound, api.PersistentVolumeReclaimRetain), newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", api.VolumeReleased, api.PersistentVolumeReclaimRetain), newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", v1.VolumeReleased, v1.PersistentVolumeReclaimRetain),
newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", api.ClaimBound, annBindCompleted), newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", v1.ClaimBound, annBindCompleted),
newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", api.ClaimBound, annBindCompleted), newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", v1.ClaimBound, annBindCompleted),
noevents, noerrors, testSyncVolume, noevents, noerrors, testSyncVolume,
}, },
{ {
// syncVolume with volume bound by controller to unbound claim. // syncVolume with volume bound by controller to unbound claim.
// Check syncVolume does not do anything. // Check syncVolume does not do anything.
"4-5 - volume bound by controller to unbound claim", "4-5 - volume bound by controller to unbound claim",
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), newClaimArray("claim4-5", "uid4-5", "10Gi", "", v1.ClaimPending),
newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), newClaimArray("claim4-5", "uid4-5", "10Gi", "", v1.ClaimPending),
noevents, noerrors, testSyncVolume, noevents, noerrors, testSyncVolume,
}, },
{ {
// syncVolume with volume bound by user to unbound claim. // syncVolume with volume bound by user to unbound claim.
// Check syncVolume does not do anything. // Check syncVolume does not do anything.
"4-5 - volume bound by user to bound claim", "4-5 - volume bound by user to bound claim",
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain), newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain), newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), newClaimArray("claim4-5", "uid4-5", "10Gi", "", v1.ClaimPending),
newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending), newClaimArray("claim4-5", "uid4-5", "10Gi", "", v1.ClaimPending),
noevents, noerrors, testSyncVolume, noevents, noerrors, testSyncVolume,
}, },
{ {
// syncVolume with volume bound to bound claim. // syncVolume with volume bound to bound claim.
// Check that the volume is marked as Bound. // Check that the volume is marked as Bound.
"4-6 - volume bound by to bound claim", "4-6 - volume bound by to bound claim",
newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", api.VolumeBound, api.PersistentVolumeReclaimRetain), newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", api.ClaimBound), newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", v1.ClaimBound),
newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", api.ClaimBound), newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", v1.ClaimBound),
noevents, noerrors, testSyncVolume, noevents, noerrors, testSyncVolume,
}, },
{ {
// syncVolume with volume bound by controller to claim bound to // syncVolume with volume bound by controller to claim bound to
// another volume. Check that the volume is rolled back. // another volume. Check that the volume is rolled back.
"4-7 - volume bound by controller to claim bound somewhere else", "4-7 - volume bound by controller to claim bound somewhere else",
newVolumeArray("volume4-7", "10Gi", "uid4-7", "claim4-7", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolumeArray("volume4-7", "10Gi", "uid4-7", "claim4-7", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
newVolumeArray("volume4-7", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), newVolumeArray("volume4-7", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain),
newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", api.ClaimBound), newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", v1.ClaimBound),
newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", api.ClaimBound), newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", v1.ClaimBound),
noevents, noerrors, testSyncVolume, noevents, noerrors, testSyncVolume,
}, },
{ {
@@ -417,10 +417,10 @@ func TestSync(t *testing.T) {
// another volume. Check that the volume is marked as Available // another volume. Check that the volume is marked as Available
// and its UID is reset. // and its UID is reset.
"4-8 - volume bound by user to claim bound somewhere else", "4-8 - volume bound by user to claim bound somewhere else",
newVolumeArray("volume4-8", "10Gi", "uid4-8", "claim4-8", api.VolumeBound, api.PersistentVolumeReclaimRetain), newVolumeArray("volume4-8", "10Gi", "uid4-8", "claim4-8", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume4-8", "10Gi", "", "claim4-8", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), newVolumeArray("volume4-8", "10Gi", "", "claim4-8", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain),
newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", api.ClaimBound), newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", v1.ClaimBound),
newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", api.ClaimBound), newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", v1.ClaimBound),
noevents, noerrors, testSyncVolume, noevents, noerrors, testSyncVolume,
}, },
@@ -429,78 +429,78 @@ func TestSync(t *testing.T) {
// syncVolume binds a claim to requested class even if there is a // syncVolume binds a claim to requested class even if there is a
// smaller PV available // smaller PV available
"13-1 - binding to class", "13-1 - binding to class",
[]*api.PersistentVolume{ []*v1.PersistentVolume{
newVolume("volume13-1-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolume("volume13-1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolume("volume13-1-2", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain, storageutil.StorageClassAnnotation), newVolume("volume13-1-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, storageutil.StorageClassAnnotation),
}, },
[]*api.PersistentVolume{ []*v1.PersistentVolume{
newVolume("volume13-1-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolume("volume13-1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolume("volume13-1-2", "10Gi", "uid13-1", "claim13-1", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController, storageutil.StorageClassAnnotation), newVolume("volume13-1-2", "10Gi", "uid13-1", "claim13-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController, storageutil.StorageClassAnnotation),
}, },
newClaimArray("claim13-1", "uid13-1", "1Gi", "", api.ClaimPending, storageutil.StorageClassAnnotation), newClaimArray("claim13-1", "uid13-1", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation),
withExpectedCapacity("10Gi", newClaimArray("claim13-1", "uid13-1", "1Gi", "volume13-1-2", api.ClaimBound, annBoundByController, storageutil.StorageClassAnnotation, annBindCompleted)), withExpectedCapacity("10Gi", newClaimArray("claim13-1", "uid13-1", "1Gi", "volume13-1-2", v1.ClaimBound, annBoundByController, storageutil.StorageClassAnnotation, annBindCompleted)),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncVolume binds a claim without a class even if there is a // syncVolume binds a claim without a class even if there is a
// smaller PV with a class available // smaller PV with a class available
"13-2 - binding without a class", "13-2 - binding without a class",
[]*api.PersistentVolume{ []*v1.PersistentVolume{
newVolume("volume13-2-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain, storageutil.StorageClassAnnotation), newVolume("volume13-2-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, storageutil.StorageClassAnnotation),
newVolume("volume13-2-2", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolume("volume13-2-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
}, },
[]*api.PersistentVolume{ []*v1.PersistentVolume{
newVolume("volume13-2-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain, storageutil.StorageClassAnnotation), newVolume("volume13-2-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, storageutil.StorageClassAnnotation),
newVolume("volume13-2-2", "10Gi", "uid13-2", "claim13-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolume("volume13-2-2", "10Gi", "uid13-2", "claim13-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
}, },
newClaimArray("claim13-2", "uid13-2", "1Gi", "", api.ClaimPending), newClaimArray("claim13-2", "uid13-2", "1Gi", "", v1.ClaimPending),
withExpectedCapacity("10Gi", newClaimArray("claim13-2", "uid13-2", "1Gi", "volume13-2-2", api.ClaimBound, annBoundByController, annBindCompleted)), withExpectedCapacity("10Gi", newClaimArray("claim13-2", "uid13-2", "1Gi", "volume13-2-2", v1.ClaimBound, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncVolume binds a claim with given class even if there is a // syncVolume binds a claim with given class even if there is a
// smaller PV with different class available // smaller PV with different class available
"13-3 - binding to specific a class", "13-3 - binding to specific a class",
volumeWithClass("silver", []*api.PersistentVolume{ volumeWithClass("silver", []*v1.PersistentVolume{
newVolume("volume13-3-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolume("volume13-3-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolume("volume13-3-2", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain, storageutil.StorageClassAnnotation), newVolume("volume13-3-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, storageutil.StorageClassAnnotation),
}), }),
volumeWithClass("silver", []*api.PersistentVolume{ volumeWithClass("silver", []*v1.PersistentVolume{
newVolume("volume13-3-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolume("volume13-3-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolume("volume13-3-2", "10Gi", "uid13-3", "claim13-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController, storageutil.StorageClassAnnotation), newVolume("volume13-3-2", "10Gi", "uid13-3", "claim13-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController, storageutil.StorageClassAnnotation),
}), }),
newClaimArray("claim13-3", "uid13-3", "1Gi", "", api.ClaimPending, storageutil.StorageClassAnnotation), newClaimArray("claim13-3", "uid13-3", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation),
withExpectedCapacity("10Gi", newClaimArray("claim13-3", "uid13-3", "1Gi", "volume13-3-2", api.ClaimBound, annBoundByController, annBindCompleted, storageutil.StorageClassAnnotation)), withExpectedCapacity("10Gi", newClaimArray("claim13-3", "uid13-3", "1Gi", "volume13-3-2", v1.ClaimBound, annBoundByController, annBindCompleted, storageutil.StorageClassAnnotation)),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncVolume binds claim requesting class "" to claim to PV with // syncVolume binds claim requesting class "" to claim to PV with
// class="" // class=""
"13-4 - empty class", "13-4 - empty class",
volumeWithClass("", newVolumeArray("volume13-4", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain)), volumeWithClass("", newVolumeArray("volume13-4", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain)),
volumeWithClass("", newVolumeArray("volume13-4", "1Gi", "uid13-4", "claim13-4", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController)), volumeWithClass("", newVolumeArray("volume13-4", "1Gi", "uid13-4", "claim13-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController)),
claimWithClass("", newClaimArray("claim13-4", "uid13-4", "1Gi", "", api.ClaimPending)), claimWithClass("", newClaimArray("claim13-4", "uid13-4", "1Gi", "", v1.ClaimPending)),
claimWithClass("", newClaimArray("claim13-4", "uid13-4", "1Gi", "volume13-4", api.ClaimBound, annBoundByController, annBindCompleted)), claimWithClass("", newClaimArray("claim13-4", "uid13-4", "1Gi", "volume13-4", v1.ClaimBound, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncVolume binds claim requesting class nil to claim to PV with // syncVolume binds claim requesting class nil to claim to PV with
// class = "" // class = ""
"13-5 - nil class", "13-5 - nil class",
volumeWithClass("", newVolumeArray("volume13-5", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain)), volumeWithClass("", newVolumeArray("volume13-5", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain)),
volumeWithClass("", newVolumeArray("volume13-5", "1Gi", "uid13-5", "claim13-5", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController)), volumeWithClass("", newVolumeArray("volume13-5", "1Gi", "uid13-5", "claim13-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController)),
newClaimArray("claim13-5", "uid13-5", "1Gi", "", api.ClaimPending), newClaimArray("claim13-5", "uid13-5", "1Gi", "", v1.ClaimPending),
newClaimArray("claim13-5", "uid13-5", "1Gi", "volume13-5", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim13-5", "uid13-5", "1Gi", "volume13-5", v1.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// syncVolume binds claim requesting class "" to claim to PV with // syncVolume binds claim requesting class "" to claim to PV with
// class=nil // class=nil
"13-6 - nil class in PV, '' class in claim", "13-6 - nil class in PV, '' class in claim",
newVolumeArray("volume13-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume13-6", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume13-6", "1Gi", "uid13-6", "claim13-6", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolumeArray("volume13-6", "1Gi", "uid13-6", "claim13-6", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
claimWithClass("", newClaimArray("claim13-6", "uid13-6", "1Gi", "", api.ClaimPending)), claimWithClass("", newClaimArray("claim13-6", "uid13-6", "1Gi", "", v1.ClaimPending)),
claimWithClass("", newClaimArray("claim13-6", "uid13-6", "1Gi", "volume13-6", api.ClaimBound, annBoundByController, annBindCompleted)), claimWithClass("", newClaimArray("claim13-6", "uid13-6", "1Gi", "volume13-6", v1.ClaimBound, annBoundByController, annBindCompleted)),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
} }
@@ -527,26 +527,26 @@ func TestMultiSync(t *testing.T) {
{ {
// syncClaim binds to a matching unbound volume. // syncClaim binds to a matching unbound volume.
"10-1 - successful bind", "10-1 - successful bind",
newVolumeArray("volume10-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain), newVolumeArray("volume10-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
newVolumeArray("volume10-1", "1Gi", "uid10-1", "claim10-1", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolumeArray("volume10-1", "1Gi", "uid10-1", "claim10-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
newClaimArray("claim10-1", "uid10-1", "1Gi", "", api.ClaimPending), newClaimArray("claim10-1", "uid10-1", "1Gi", "", v1.ClaimPending),
newClaimArray("claim10-1", "uid10-1", "1Gi", "volume10-1", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim10-1", "uid10-1", "1Gi", "volume10-1", v1.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
{ {
// Two controllers bound two PVs to single claim. Test one of them // Two controllers bound two PVs to single claim. Test one of them
// wins and the second rolls back. // wins and the second rolls back.
"10-2 - bind PV race", "10-2 - bind PV race",
[]*api.PersistentVolume{ []*v1.PersistentVolume{
newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
newVolume("volume10-2-2", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolume("volume10-2-2", "1Gi", "uid10-2", "claim10-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
}, },
[]*api.PersistentVolume{ []*v1.PersistentVolume{
newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController), newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
newVolume("volume10-2-2", "1Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain), newVolume("volume10-2-2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain),
}, },
newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", v1.ClaimBound, annBoundByController, annBindCompleted),
newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", api.ClaimBound, annBoundByController, annBindCompleted), newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", v1.ClaimBound, annBoundByController, annBindCompleted),
noevents, noerrors, testSyncClaim, noevents, noerrors, testSyncClaim,
}, },
} }

View File

@@ -20,8 +20,8 @@ import (
"errors" "errors"
"testing" "testing"
"k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/pkg/apis/storage" storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
) )
// Test single call to syncVolume, expecting recycling to happen. // Test single call to syncVolume, expecting recycling to happen.
@@ -33,7 +33,7 @@ func TestDeleteSync(t *testing.T) {
{ {
// delete volume bound by controller // delete volume bound by controller
"8-1 - successful delete", "8-1 - successful delete",
newVolumeArray("volume8-1", "1Gi", "uid8-1", "claim8-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController), newVolumeArray("volume8-1", "1Gi", "uid8-1", "claim8-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController),
novolumes, novolumes,
noclaims, noclaims,
noclaims, noclaims,
@@ -45,7 +45,7 @@ func TestDeleteSync(t *testing.T) {
{ {
// delete volume bound by user // delete volume bound by user
"8-2 - successful delete with prebound volume", "8-2 - successful delete with prebound volume",
newVolumeArray("volume8-2", "1Gi", "uid8-2", "claim8-2", api.VolumeBound, api.PersistentVolumeReclaimDelete), newVolumeArray("volume8-2", "1Gi", "uid8-2", "claim8-2", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
novolumes, novolumes,
noclaims, noclaims,
noclaims, noclaims,
@@ -57,8 +57,8 @@ func TestDeleteSync(t *testing.T) {
{ {
// delete failure - plugin not found // delete failure - plugin not found
"8-3 - plugin not found", "8-3 - plugin not found",
newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", api.VolumeBound, api.PersistentVolumeReclaimDelete), newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
withMessage("Error getting deleter volume plugin for volume \"volume8-3\": no volume plugin matched", newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", api.VolumeFailed, api.PersistentVolumeReclaimDelete)), withMessage("Error getting deleter volume plugin for volume \"volume8-3\": no volume plugin matched", newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", v1.VolumeFailed, v1.PersistentVolumeReclaimDelete)),
noclaims, noclaims,
noclaims, noclaims,
[]string{"Warning VolumeFailedDelete"}, noerrors, testSyncVolume, []string{"Warning VolumeFailedDelete"}, noerrors, testSyncVolume,
@@ -66,8 +66,8 @@ func TestDeleteSync(t *testing.T) {
{ {
// delete failure - newDeleter returns error // delete failure - newDeleter returns error
"8-4 - newDeleter returns error", "8-4 - newDeleter returns error",
newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", api.VolumeBound, api.PersistentVolumeReclaimDelete), newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
withMessage("Failed to create deleter for volume \"volume8-4\": Mock plugin error: no deleteCalls configured", newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", api.VolumeFailed, api.PersistentVolumeReclaimDelete)), withMessage("Failed to create deleter for volume \"volume8-4\": Mock plugin error: no deleteCalls configured", newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", v1.VolumeFailed, v1.PersistentVolumeReclaimDelete)),
noclaims, noclaims,
noclaims, noclaims,
[]string{"Warning VolumeFailedDelete"}, noerrors, []string{"Warning VolumeFailedDelete"}, noerrors,
@@ -76,8 +76,8 @@ func TestDeleteSync(t *testing.T) {
{ {
// delete failure - delete() returns error // delete failure - delete() returns error
"8-5 - delete returns error", "8-5 - delete returns error",
newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", api.VolumeBound, api.PersistentVolumeReclaimDelete), newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
withMessage("Mock delete error", newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", api.VolumeFailed, api.PersistentVolumeReclaimDelete)), withMessage("Mock delete error", newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", v1.VolumeFailed, v1.PersistentVolumeReclaimDelete)),
noclaims, noclaims,
noclaims, noclaims,
[]string{"Warning VolumeFailedDelete"}, noerrors, []string{"Warning VolumeFailedDelete"}, noerrors,
@@ -86,7 +86,7 @@ func TestDeleteSync(t *testing.T) {
{ {
// delete success(?) - volume is deleted before doDelete() starts // delete success(?) - volume is deleted before doDelete() starts
"8-6 - volume is deleted before deleting", "8-6 - volume is deleted before deleting",
newVolumeArray("volume8-6", "1Gi", "uid8-6", "claim8-6", api.VolumeBound, api.PersistentVolumeReclaimDelete), newVolumeArray("volume8-6", "1Gi", "uid8-6", "claim8-6", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
novolumes, novolumes,
noclaims, noclaims,
noclaims, noclaims,
@@ -103,31 +103,31 @@ func TestDeleteSync(t *testing.T) {
// starts. This simulates "volume no longer needs recycling, // starts. This simulates "volume no longer needs recycling,
// skipping". // skipping".
"8-7 - volume is bound before deleting", "8-7 - volume is bound before deleting",
newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController), newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController),
newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController), newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController),
noclaims, noclaims,
newClaimArray("claim8-7", "uid8-7", "10Gi", "volume8-7", api.ClaimBound), newClaimArray("claim8-7", "uid8-7", "10Gi", "volume8-7", v1.ClaimBound),
noevents, noerrors, noevents, noerrors,
wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) { wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
reactor.lock.Lock() reactor.lock.Lock()
defer reactor.lock.Unlock() defer reactor.lock.Unlock()
// Bind the volume to resurrected claim (this should never // Bind the volume to resurrected claim (this should never
// happen) // happen)
claim := newClaim("claim8-7", "uid8-7", "10Gi", "volume8-7", api.ClaimBound) claim := newClaim("claim8-7", "uid8-7", "10Gi", "volume8-7", v1.ClaimBound)
reactor.claims[claim.Name] = claim reactor.claims[claim.Name] = claim
ctrl.claims.Add(claim) ctrl.claims.Add(claim)
volume := reactor.volumes["volume8-7"] volume := reactor.volumes["volume8-7"]
volume.Status.Phase = api.VolumeBound volume.Status.Phase = v1.VolumeBound
}), }),
}, },
{ {
// delete success - volume bound by user is deleted, while a new // delete success - volume bound by user is deleted, while a new
// claim is created with another UID. // claim is created with another UID.
"8-9 - prebound volume is deleted while the claim exists", "8-9 - prebound volume is deleted while the claim exists",
newVolumeArray("volume8-9", "1Gi", "uid8-9", "claim8-9", api.VolumeBound, api.PersistentVolumeReclaimDelete), newVolumeArray("volume8-9", "1Gi", "uid8-9", "claim8-9", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
novolumes, novolumes,
newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", api.ClaimPending), newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", v1.ClaimPending),
newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", api.ClaimPending), newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", v1.ClaimPending),
noevents, noerrors, noevents, noerrors,
// Inject deleter into the controller and call syncVolume. The // Inject deleter into the controller and call syncVolume. The
// deleter simulates one delete() call that succeeds. // deleter simulates one delete() call that succeeds.
@@ -136,8 +136,8 @@ func TestDeleteSync(t *testing.T) {
{ {
// PV requires external deleter // PV requires external deleter
"8-10 - external deleter", "8-10 - external deleter",
newVolumeArray("volume8-10", "1Gi", "uid10-1", "claim10-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController), newVolumeArray("volume8-10", "1Gi", "uid10-1", "claim10-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController),
newVolumeArray("volume8-10", "1Gi", "uid10-1", "claim10-1", api.VolumeReleased, api.PersistentVolumeReclaimDelete, annBoundByController), newVolumeArray("volume8-10", "1Gi", "uid10-1", "claim10-1", v1.VolumeReleased, v1.PersistentVolumeReclaimDelete, annBoundByController),
noclaims, noclaims,
noclaims, noclaims,
noevents, noerrors, noevents, noerrors,
@@ -152,16 +152,16 @@ func TestDeleteSync(t *testing.T) {
// delete success - two PVs are provisioned for a single claim. // delete success - two PVs are provisioned for a single claim.
// One of the PVs is deleted. // One of the PVs is deleted.
"8-11 - two PVs provisioned for a single claim", "8-11 - two PVs provisioned for a single claim",
[]*api.PersistentVolume{ []*v1.PersistentVolume{
newVolume("volume8-11-1", "1Gi", "uid8-11", "claim8-11", api.VolumeBound, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned), newVolume("volume8-11-1", "1Gi", "uid8-11", "claim8-11", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
newVolume("volume8-11-2", "1Gi", "uid8-11", "claim8-11", api.VolumeBound, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned), newVolume("volume8-11-2", "1Gi", "uid8-11", "claim8-11", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
}, },
[]*api.PersistentVolume{ []*v1.PersistentVolume{
newVolume("volume8-11-2", "1Gi", "uid8-11", "claim8-11", api.VolumeBound, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned), newVolume("volume8-11-2", "1Gi", "uid8-11", "claim8-11", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
}, },
// the claim is bound to volume8-11-2 -> volume8-11-1 has lost the race and will be deleted // the claim is bound to volume8-11-2 -> volume8-11-1 has lost the race and will be deleted
newClaimArray("claim8-11", "uid8-11", "10Gi", "volume8-11-2", api.ClaimBound), newClaimArray("claim8-11", "uid8-11", "10Gi", "volume8-11-2", v1.ClaimBound),
newClaimArray("claim8-11", "uid8-11", "10Gi", "volume8-11-2", api.ClaimBound), newClaimArray("claim8-11", "uid8-11", "10Gi", "volume8-11-2", v1.ClaimBound),
noevents, noerrors, noevents, noerrors,
// Inject deleter into the controller and call syncVolume. The // Inject deleter into the controller and call syncVolume. The
// deleter simulates one delete() call that succeeds. // deleter simulates one delete() call that succeeds.
@@ -172,17 +172,17 @@ func TestDeleteSync(t *testing.T) {
// claim. One of the PVs is marked as Released to be deleted by the // claim. One of the PVs is marked as Released to be deleted by the
// external provisioner. // external provisioner.
"8-12 - two PVs externally provisioned for a single claim", "8-12 - two PVs externally provisioned for a single claim",
[]*api.PersistentVolume{ []*v1.PersistentVolume{
newVolume("volume8-12-1", "1Gi", "uid8-12", "claim8-12", api.VolumeBound, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned), newVolume("volume8-12-1", "1Gi", "uid8-12", "claim8-12", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
newVolume("volume8-12-2", "1Gi", "uid8-12", "claim8-12", api.VolumeBound, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned), newVolume("volume8-12-2", "1Gi", "uid8-12", "claim8-12", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
}, },
[]*api.PersistentVolume{ []*v1.PersistentVolume{
newVolume("volume8-12-1", "1Gi", "uid8-12", "claim8-12", api.VolumeReleased, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned), newVolume("volume8-12-1", "1Gi", "uid8-12", "claim8-12", v1.VolumeReleased, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
newVolume("volume8-12-2", "1Gi", "uid8-12", "claim8-12", api.VolumeBound, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned), newVolume("volume8-12-2", "1Gi", "uid8-12", "claim8-12", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
}, },
// the claim is bound to volume8-12-2 -> volume8-12-1 has lost the race and will be "Released" // the claim is bound to volume8-12-2 -> volume8-12-1 has lost the race and will be "Released"
newClaimArray("claim8-12", "uid8-12", "10Gi", "volume8-12-2", api.ClaimBound), newClaimArray("claim8-12", "uid8-12", "10Gi", "volume8-12-2", v1.ClaimBound),
newClaimArray("claim8-12", "uid8-12", "10Gi", "volume8-12-2", api.ClaimBound), newClaimArray("claim8-12", "uid8-12", "10Gi", "volume8-12-2", v1.ClaimBound),
noevents, noerrors, noevents, noerrors,
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error { func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
// Inject external deleter annotation // Inject external deleter annotation
@@ -215,7 +215,7 @@ func TestDeleteMultiSync(t *testing.T) {
// delete failure - delete returns error. The controller should // delete failure - delete returns error. The controller should
// try again. // try again.
"9-1 - delete returns error", "9-1 - delete returns error",
newVolumeArray("volume9-1", "1Gi", "uid9-1", "claim9-1", api.VolumeBound, api.PersistentVolumeReclaimDelete), newVolumeArray("volume9-1", "1Gi", "uid9-1", "claim9-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
novolumes, novolumes,
noclaims, noclaims,
noclaims, noclaims,

Some files were not shown because too many files have changed in this diff Show More