cmd/kube-controller-manager
This commit is contained in:
@@ -34,10 +34,11 @@ import (
|
||||
"k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelection"
|
||||
"k8s.io/kubernetes/pkg/client/leaderelection/resourcelock"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
@@ -159,8 +160,8 @@ func Run(s *options.CMServer) error {
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controller-manager"})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(v1.EventSource{Component: "controller-manager"})
|
||||
|
||||
run := func(stop <-chan struct{}) {
|
||||
rootClientBuilder := controller.SimpleControllerClientBuilder{
|
||||
@@ -194,7 +195,7 @@ func Run(s *options.CMServer) error {
|
||||
|
||||
// TODO: enable other lock types
|
||||
rl := resourcelock.EndpointsLock{
|
||||
EndpointsMeta: api.ObjectMeta{
|
||||
EndpointsMeta: v1.ObjectMeta{
|
||||
Namespace: "kube-system",
|
||||
Name: "kube-controller-manager",
|
||||
},
|
||||
@@ -225,7 +226,7 @@ func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootCl
|
||||
return rootClientBuilder.ClientOrDie(serviceAccountName)
|
||||
}
|
||||
discoveryClient := client("controller-discovery").Discovery()
|
||||
sharedInformers := informers.NewSharedInformerFactory(client("shared-informers"), ResyncPeriod(s)())
|
||||
sharedInformers := informers.NewSharedInformerFactory(client("shared-informers"), nil, ResyncPeriod(s)())
|
||||
|
||||
// always start the SA token controller first using a full-power client, since it needs to mint tokens for the rest
|
||||
if len(s.ServiceAccountKeyFile) > 0 {
|
||||
@@ -392,7 +393,7 @@ func StartControllers(s *options.CMServer, kubeconfig *restclient.Config, rootCl
|
||||
return gvr, nil
|
||||
}
|
||||
}
|
||||
namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, gvrFn, s.NamespaceSyncPeriod.Duration, api.FinalizerKubernetes)
|
||||
namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, gvrFn, s.NamespaceSyncPeriod.Duration, v1.FinalizerKubernetes)
|
||||
go namespaceController.Run(int(s.ConcurrentNamespaceSyncs), wait.NeverStop)
|
||||
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
|
||||
|
||||
|
||||
@@ -22,11 +22,11 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/certificates"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
@@ -62,7 +62,7 @@ func NewCertificateController(kubeClient clientset.Interface, syncPeriod time.Du
|
||||
// Send events to the apiserver
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
|
||||
// Configure cfssl signer
|
||||
// TODO: support non-default policy and remote/pkcs11 signing
|
||||
@@ -84,10 +84,10 @@ func NewCertificateController(kubeClient clientset.Interface, syncPeriod time.Du
|
||||
// Manage the addition/update of certificate requests
|
||||
cc.csrStore.Store, cc.csrController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return cc.kubeClient.Certificates().CertificateSigningRequests().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return cc.kubeClient.Certificates().CertificateSigningRequests().Watch(options)
|
||||
},
|
||||
},
|
||||
@@ -240,7 +240,7 @@ func (cc *CertificateController) maybeAutoApproveCSR(csr *certificates.Certifica
|
||||
return csr, nil
|
||||
}
|
||||
|
||||
x509cr, err := certutil.ParseCSR(csr)
|
||||
x509cr, err := certutil.ParseCSRV1alpha1(csr)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to parse csr %q: %v", csr.Name, err))
|
||||
return csr, nil
|
||||
|
||||
@@ -16,7 +16,7 @@ limitations under the License.
|
||||
|
||||
package certificates
|
||||
|
||||
import "k8s.io/kubernetes/pkg/apis/certificates"
|
||||
import certificates "k8s.io/kubernetes/pkg/apis/certificates/v1alpha1"
|
||||
|
||||
// IsCertificateRequestApproved returns true if a certificate request has the
|
||||
// "Approved" condition and no "Denied" conditions; false otherwise.
|
||||
|
||||
@@ -22,9 +22,10 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
apierrors "k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
@@ -76,7 +77,7 @@ type SAControllerClientBuilder struct {
|
||||
|
||||
// CoreClient is used to provision service accounts if needed and watch for their associated tokens
|
||||
// to construct a controller client
|
||||
CoreClient unversionedcore.CoreInterface
|
||||
CoreClient v1core.CoreV1Interface
|
||||
|
||||
// Namespace is the namespace used to host the service accounts that will back the
|
||||
// controllers. It must be highly privileged namespace which normal users cannot inspect.
|
||||
@@ -96,26 +97,26 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro
|
||||
// check to see if the namespace exists. If it isn't a NotFound, just try to create the SA.
|
||||
// It'll probably fail, but perhaps that will have a better message.
|
||||
if _, err := b.CoreClient.Namespaces().Get(b.Namespace); apierrors.IsNotFound(err) {
|
||||
_, err = b.CoreClient.Namespaces().Create(&api.Namespace{ObjectMeta: api.ObjectMeta{Name: b.Namespace}})
|
||||
_, err = b.CoreClient.Namespaces().Create(&v1.Namespace{ObjectMeta: v1.ObjectMeta{Name: b.Namespace}})
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
sa, err = b.CoreClient.ServiceAccounts(b.Namespace).Create(
|
||||
&api.ServiceAccount{ObjectMeta: api.ObjectMeta{Namespace: b.Namespace, Name: name}})
|
||||
&v1.ServiceAccount{ObjectMeta: v1.ObjectMeta{Namespace: b.Namespace, Name: name}})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
lw := &cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)})
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(v1.SecretTypeServiceAccountToken)}).String()
|
||||
return b.CoreClient.Secrets(b.Namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)})
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(v1.SecretTypeServiceAccountToken)}).String()
|
||||
return b.CoreClient.Secrets(b.Namespace).Watch(options)
|
||||
},
|
||||
}
|
||||
@@ -128,13 +129,13 @@ func (b SAControllerClientBuilder) Config(name string) (*restclient.Config, erro
|
||||
return false, fmt.Errorf("error watching")
|
||||
|
||||
case watch.Added, watch.Modified:
|
||||
secret := event.Object.(*api.Secret)
|
||||
secret := event.Object.(*v1.Secret)
|
||||
if !serviceaccount.IsServiceAccountToken(secret, sa) ||
|
||||
len(secret.Data[api.ServiceAccountTokenKey]) == 0 {
|
||||
len(secret.Data[v1.ServiceAccountTokenKey]) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
// TODO maybe verify the token is valid
|
||||
clientConfig.BearerToken = string(secret.Data[api.ServiceAccountTokenKey])
|
||||
clientConfig.BearerToken = string(secret.Data[v1.ServiceAccountTokenKey])
|
||||
restclient.AddUserAgent(clientConfig, serviceaccount.MakeUsername(b.Namespace, name))
|
||||
return true, nil
|
||||
|
||||
|
||||
@@ -21,15 +21,15 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
)
|
||||
|
||||
type PodControllerRefManager struct {
|
||||
podControl PodControlInterface
|
||||
controllerObject api.ObjectMeta
|
||||
controllerObject v1.ObjectMeta
|
||||
controllerSelector labels.Selector
|
||||
controllerKind unversioned.GroupVersionKind
|
||||
}
|
||||
@@ -38,7 +38,7 @@ type PodControllerRefManager struct {
|
||||
// methods to manage the controllerRef of pods.
|
||||
func NewPodControllerRefManager(
|
||||
podControl PodControlInterface,
|
||||
controllerObject api.ObjectMeta,
|
||||
controllerObject v1.ObjectMeta,
|
||||
controllerSelector labels.Selector,
|
||||
controllerKind unversioned.GroupVersionKind,
|
||||
) *PodControllerRefManager {
|
||||
@@ -53,10 +53,10 @@ func NewPodControllerRefManager(
|
||||
// controllerRef pointing to other object are ignored) 3. controlledDoesNotMatch
|
||||
// are the pods that have a controllerRef pointing to the controller, but their
|
||||
// labels no longer match the selector.
|
||||
func (m *PodControllerRefManager) Classify(pods []*api.Pod) (
|
||||
matchesAndControlled []*api.Pod,
|
||||
matchesNeedsController []*api.Pod,
|
||||
controlledDoesNotMatch []*api.Pod) {
|
||||
func (m *PodControllerRefManager) Classify(pods []*v1.Pod) (
|
||||
matchesAndControlled []*v1.Pod,
|
||||
matchesNeedsController []*v1.Pod,
|
||||
controlledDoesNotMatch []*v1.Pod) {
|
||||
for i := range pods {
|
||||
pod := pods[i]
|
||||
if !IsPodActive(pod) {
|
||||
@@ -91,7 +91,7 @@ func (m *PodControllerRefManager) Classify(pods []*api.Pod) (
|
||||
|
||||
// getControllerOf returns the controllerRef if controllee has a controller,
|
||||
// otherwise returns nil.
|
||||
func getControllerOf(controllee api.ObjectMeta) *api.OwnerReference {
|
||||
func getControllerOf(controllee v1.ObjectMeta) *v1.OwnerReference {
|
||||
for _, owner := range controllee.OwnerReferences {
|
||||
// controlled by other controller
|
||||
if owner.Controller != nil && *owner.Controller == true {
|
||||
@@ -103,7 +103,7 @@ func getControllerOf(controllee api.ObjectMeta) *api.OwnerReference {
|
||||
|
||||
// AdoptPod sends a patch to take control of the pod. It returns the error if
|
||||
// the patching fails.
|
||||
func (m *PodControllerRefManager) AdoptPod(pod *api.Pod) error {
|
||||
func (m *PodControllerRefManager) AdoptPod(pod *v1.Pod) error {
|
||||
// we should not adopt any pods if the controller is about to be deleted
|
||||
if m.controllerObject.DeletionTimestamp != nil {
|
||||
return fmt.Errorf("cancel the adopt attempt for pod %s because the controlller is being deleted",
|
||||
@@ -118,7 +118,7 @@ func (m *PodControllerRefManager) AdoptPod(pod *api.Pod) error {
|
||||
|
||||
// ReleasePod sends a patch to free the pod from the control of the controller.
|
||||
// It returns the error if the patching fails. 404 and 422 errors are ignored.
|
||||
func (m *PodControllerRefManager) ReleasePod(pod *api.Pod) error {
|
||||
func (m *PodControllerRefManager) ReleasePod(pod *v1.Pod) error {
|
||||
glog.V(2).Infof("patching pod %s_%s to remove its controllerRef to %s/%s:%s",
|
||||
pod.Namespace, pod.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.controllerObject.Name)
|
||||
deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.controllerObject.UID, pod.UID)
|
||||
|
||||
@@ -26,11 +26,13 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api/validation"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/conversion"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
@@ -363,11 +365,11 @@ const (
|
||||
// created as an interface to allow testing.
|
||||
type PodControlInterface interface {
|
||||
// CreatePods creates new pods according to the spec.
|
||||
CreatePods(namespace string, template *api.PodTemplateSpec, object runtime.Object) error
|
||||
CreatePods(namespace string, template *v1.PodTemplateSpec, object runtime.Object) error
|
||||
// CreatePodsOnNode creates a new pod according to the spec on the specified node.
|
||||
CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error
|
||||
CreatePodsOnNode(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object) error
|
||||
// CreatePodsWithControllerRef creates new pods according to the spec, and sets object as the pod's controller.
|
||||
CreatePodsWithControllerRef(namespace string, template *api.PodTemplateSpec, object runtime.Object, controllerRef *api.OwnerReference) error
|
||||
CreatePodsWithControllerRef(namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *v1.OwnerReference) error
|
||||
// DeletePod deletes the pod identified by podID.
|
||||
DeletePod(namespace string, podID string, object runtime.Object) error
|
||||
// PatchPod patches the pod.
|
||||
@@ -382,7 +384,7 @@ type RealPodControl struct {
|
||||
|
||||
var _ PodControlInterface = &RealPodControl{}
|
||||
|
||||
func getPodsLabelSet(template *api.PodTemplateSpec) labels.Set {
|
||||
func getPodsLabelSet(template *v1.PodTemplateSpec) labels.Set {
|
||||
desiredLabels := make(labels.Set)
|
||||
for k, v := range template.Labels {
|
||||
desiredLabels[k] = v
|
||||
@@ -390,18 +392,18 @@ func getPodsLabelSet(template *api.PodTemplateSpec) labels.Set {
|
||||
return desiredLabels
|
||||
}
|
||||
|
||||
func getPodsFinalizers(template *api.PodTemplateSpec) []string {
|
||||
func getPodsFinalizers(template *v1.PodTemplateSpec) []string {
|
||||
desiredFinalizers := make([]string, len(template.Finalizers))
|
||||
copy(desiredFinalizers, template.Finalizers)
|
||||
return desiredFinalizers
|
||||
}
|
||||
|
||||
func getPodsAnnotationSet(template *api.PodTemplateSpec, object runtime.Object) (labels.Set, error) {
|
||||
func getPodsAnnotationSet(template *v1.PodTemplateSpec, object runtime.Object) (labels.Set, error) {
|
||||
desiredAnnotations := make(labels.Set)
|
||||
for k, v := range template.Annotations {
|
||||
desiredAnnotations[k] = v
|
||||
}
|
||||
createdByRef, err := api.GetReference(object)
|
||||
createdByRef, err := v1.GetReference(object)
|
||||
if err != nil {
|
||||
return desiredAnnotations, fmt.Errorf("unable to get controller reference: %v", err)
|
||||
}
|
||||
@@ -409,15 +411,15 @@ func getPodsAnnotationSet(template *api.PodTemplateSpec, object runtime.Object)
|
||||
// TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients
|
||||
// would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment.
|
||||
// We need to consistently handle this case of annotation versioning.
|
||||
codec := api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: api.GroupName, Version: "v1"})
|
||||
codec := api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: v1.GroupName, Version: "v1"})
|
||||
|
||||
createdByRefJson, err := runtime.Encode(codec, &api.SerializedReference{
|
||||
createdByRefJson, err := runtime.Encode(codec, &v1.SerializedReference{
|
||||
Reference: *createdByRef,
|
||||
})
|
||||
if err != nil {
|
||||
return desiredAnnotations, fmt.Errorf("unable to serialize controller reference: %v", err)
|
||||
}
|
||||
desiredAnnotations[api.CreatedByAnnotation] = string(createdByRefJson)
|
||||
desiredAnnotations[v1.CreatedByAnnotation] = string(createdByRefJson)
|
||||
return desiredAnnotations, nil
|
||||
}
|
||||
|
||||
@@ -430,11 +432,11 @@ func getPodsPrefix(controllerName string) string {
|
||||
return prefix
|
||||
}
|
||||
|
||||
func (r RealPodControl) CreatePods(namespace string, template *api.PodTemplateSpec, object runtime.Object) error {
|
||||
func (r RealPodControl) CreatePods(namespace string, template *v1.PodTemplateSpec, object runtime.Object) error {
|
||||
return r.createPods("", namespace, template, object, nil)
|
||||
}
|
||||
|
||||
func (r RealPodControl) CreatePodsWithControllerRef(namespace string, template *api.PodTemplateSpec, controllerObject runtime.Object, controllerRef *api.OwnerReference) error {
|
||||
func (r RealPodControl) CreatePodsWithControllerRef(namespace string, template *v1.PodTemplateSpec, controllerObject runtime.Object, controllerRef *v1.OwnerReference) error {
|
||||
if controllerRef == nil {
|
||||
return fmt.Errorf("controllerRef is nil")
|
||||
}
|
||||
@@ -450,7 +452,7 @@ func (r RealPodControl) CreatePodsWithControllerRef(namespace string, template *
|
||||
return r.createPods("", namespace, template, controllerObject, controllerRef)
|
||||
}
|
||||
|
||||
func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error {
|
||||
func (r RealPodControl) CreatePodsOnNode(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object) error {
|
||||
return r.createPods(nodeName, namespace, template, object, nil)
|
||||
}
|
||||
|
||||
@@ -459,7 +461,7 @@ func (r RealPodControl) PatchPod(namespace, name string, data []byte) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func GetPodFromTemplate(template *api.PodTemplateSpec, parentObject runtime.Object, controllerRef *api.OwnerReference) (*api.Pod, error) {
|
||||
func GetPodFromTemplate(template *v1.PodTemplateSpec, parentObject runtime.Object, controllerRef *v1.OwnerReference) (*v1.Pod, error) {
|
||||
desiredLabels := getPodsLabelSet(template)
|
||||
desiredFinalizers := getPodsFinalizers(template)
|
||||
desiredAnnotations, err := getPodsAnnotationSet(template, parentObject)
|
||||
@@ -472,8 +474,8 @@ func GetPodFromTemplate(template *api.PodTemplateSpec, parentObject runtime.Obje
|
||||
}
|
||||
prefix := getPodsPrefix(accessor.GetName())
|
||||
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: desiredLabels,
|
||||
Annotations: desiredAnnotations,
|
||||
GenerateName: prefix,
|
||||
@@ -483,13 +485,15 @@ func GetPodFromTemplate(template *api.PodTemplateSpec, parentObject runtime.Obje
|
||||
if controllerRef != nil {
|
||||
pod.OwnerReferences = append(pod.OwnerReferences, *controllerRef)
|
||||
}
|
||||
if err := api.Scheme.Convert(&template.Spec, &pod.Spec, nil); err != nil {
|
||||
return nil, fmt.Errorf("unable to convert pod template: %v", err)
|
||||
clone, err := conversion.NewCloner().DeepCopy(&template.Spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pod.Spec = *clone.(*v1.PodSpec)
|
||||
return pod, nil
|
||||
}
|
||||
|
||||
func (r RealPodControl) createPods(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object, controllerRef *api.OwnerReference) error {
|
||||
func (r RealPodControl) createPods(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object, controllerRef *v1.OwnerReference) error {
|
||||
pod, err := GetPodFromTemplate(template, object, controllerRef)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -501,7 +505,7 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *api.Pod
|
||||
return fmt.Errorf("unable to create pods, no labels")
|
||||
}
|
||||
if newPod, err := r.KubeClient.Core().Pods(namespace).Create(pod); err != nil {
|
||||
r.Recorder.Eventf(object, api.EventTypeWarning, FailedCreatePodReason, "Error creating: %v", err)
|
||||
r.Recorder.Eventf(object, v1.EventTypeWarning, FailedCreatePodReason, "Error creating: %v", err)
|
||||
return fmt.Errorf("unable to create pods: %v", err)
|
||||
} else {
|
||||
accessor, err := meta.Accessor(object)
|
||||
@@ -510,7 +514,7 @@ func (r RealPodControl) createPods(nodeName, namespace string, template *api.Pod
|
||||
return nil
|
||||
}
|
||||
glog.V(4).Infof("Controller %v created pod %v", accessor.GetName(), newPod.Name)
|
||||
r.Recorder.Eventf(object, api.EventTypeNormal, SuccessfulCreatePodReason, "Created pod: %v", newPod.Name)
|
||||
r.Recorder.Eventf(object, v1.EventTypeNormal, SuccessfulCreatePodReason, "Created pod: %v", newPod.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -522,18 +526,18 @@ func (r RealPodControl) DeletePod(namespace string, podID string, object runtime
|
||||
}
|
||||
glog.V(2).Infof("Controller %v deleting pod %v/%v", accessor.GetName(), namespace, podID)
|
||||
if err := r.KubeClient.Core().Pods(namespace).Delete(podID, nil); err != nil {
|
||||
r.Recorder.Eventf(object, api.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err)
|
||||
r.Recorder.Eventf(object, v1.EventTypeWarning, FailedDeletePodReason, "Error deleting: %v", err)
|
||||
return fmt.Errorf("unable to delete pods: %v", err)
|
||||
} else {
|
||||
r.Recorder.Eventf(object, api.EventTypeNormal, SuccessfulDeletePodReason, "Deleted pod: %v", podID)
|
||||
r.Recorder.Eventf(object, v1.EventTypeNormal, SuccessfulDeletePodReason, "Deleted pod: %v", podID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type FakePodControl struct {
|
||||
sync.Mutex
|
||||
Templates []api.PodTemplateSpec
|
||||
ControllerRefs []api.OwnerReference
|
||||
Templates []v1.PodTemplateSpec
|
||||
ControllerRefs []v1.OwnerReference
|
||||
DeletePodName []string
|
||||
Patches [][]byte
|
||||
Err error
|
||||
@@ -551,7 +555,7 @@ func (f *FakePodControl) PatchPod(namespace, name string, data []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakePodControl) CreatePods(namespace string, spec *api.PodTemplateSpec, object runtime.Object) error {
|
||||
func (f *FakePodControl) CreatePods(namespace string, spec *v1.PodTemplateSpec, object runtime.Object) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.Templates = append(f.Templates, *spec)
|
||||
@@ -561,7 +565,7 @@ func (f *FakePodControl) CreatePods(namespace string, spec *api.PodTemplateSpec,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakePodControl) CreatePodsWithControllerRef(namespace string, spec *api.PodTemplateSpec, object runtime.Object, controllerRef *api.OwnerReference) error {
|
||||
func (f *FakePodControl) CreatePodsWithControllerRef(namespace string, spec *v1.PodTemplateSpec, object runtime.Object, controllerRef *v1.OwnerReference) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.Templates = append(f.Templates, *spec)
|
||||
@@ -572,7 +576,7 @@ func (f *FakePodControl) CreatePodsWithControllerRef(namespace string, spec *api
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *FakePodControl) CreatePodsOnNode(nodeName, namespace string, template *api.PodTemplateSpec, object runtime.Object) error {
|
||||
func (f *FakePodControl) CreatePodsOnNode(nodeName, namespace string, template *v1.PodTemplateSpec, object runtime.Object) error {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.Templates = append(f.Templates, *template)
|
||||
@@ -596,13 +600,13 @@ func (f *FakePodControl) Clear() {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.DeletePodName = []string{}
|
||||
f.Templates = []api.PodTemplateSpec{}
|
||||
f.ControllerRefs = []api.OwnerReference{}
|
||||
f.Templates = []v1.PodTemplateSpec{}
|
||||
f.ControllerRefs = []v1.OwnerReference{}
|
||||
f.Patches = [][]byte{}
|
||||
}
|
||||
|
||||
// ByLogging allows custom sorting of pods so the best one can be picked for getting its logs.
|
||||
type ByLogging []*api.Pod
|
||||
type ByLogging []*v1.Pod
|
||||
|
||||
func (s ByLogging) Len() int { return len(s) }
|
||||
func (s ByLogging) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
@@ -613,18 +617,18 @@ func (s ByLogging) Less(i, j int) bool {
|
||||
return len(s[i].Spec.NodeName) > 0
|
||||
}
|
||||
// 2. PodRunning < PodUnknown < PodPending
|
||||
m := map[api.PodPhase]int{api.PodRunning: 0, api.PodUnknown: 1, api.PodPending: 2}
|
||||
m := map[v1.PodPhase]int{v1.PodRunning: 0, v1.PodUnknown: 1, v1.PodPending: 2}
|
||||
if m[s[i].Status.Phase] != m[s[j].Status.Phase] {
|
||||
return m[s[i].Status.Phase] < m[s[j].Status.Phase]
|
||||
}
|
||||
// 3. ready < not ready
|
||||
if api.IsPodReady(s[i]) != api.IsPodReady(s[j]) {
|
||||
return api.IsPodReady(s[i])
|
||||
if v1.IsPodReady(s[i]) != v1.IsPodReady(s[j]) {
|
||||
return v1.IsPodReady(s[i])
|
||||
}
|
||||
// TODO: take availability into account when we push minReadySeconds information from deployment into pods,
|
||||
// see https://github.com/kubernetes/kubernetes/issues/22065
|
||||
// 4. Been ready for more time < less time < empty time
|
||||
if api.IsPodReady(s[i]) && api.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
|
||||
if v1.IsPodReady(s[i]) && v1.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
|
||||
return afterOrZero(podReadyTime(s[j]), podReadyTime(s[i]))
|
||||
}
|
||||
// 5. Pods with containers with higher restart counts < lower restart counts
|
||||
@@ -639,7 +643,7 @@ func (s ByLogging) Less(i, j int) bool {
|
||||
}
|
||||
|
||||
// ActivePods type allows custom sorting of pods so a controller can pick the best ones to delete.
|
||||
type ActivePods []*api.Pod
|
||||
type ActivePods []*v1.Pod
|
||||
|
||||
func (s ActivePods) Len() int { return len(s) }
|
||||
func (s ActivePods) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||
@@ -651,20 +655,20 @@ func (s ActivePods) Less(i, j int) bool {
|
||||
return len(s[i].Spec.NodeName) == 0
|
||||
}
|
||||
// 2. PodPending < PodUnknown < PodRunning
|
||||
m := map[api.PodPhase]int{api.PodPending: 0, api.PodUnknown: 1, api.PodRunning: 2}
|
||||
m := map[v1.PodPhase]int{v1.PodPending: 0, v1.PodUnknown: 1, v1.PodRunning: 2}
|
||||
if m[s[i].Status.Phase] != m[s[j].Status.Phase] {
|
||||
return m[s[i].Status.Phase] < m[s[j].Status.Phase]
|
||||
}
|
||||
// 3. Not ready < ready
|
||||
// If only one of the pods is not ready, the not ready one is smaller
|
||||
if api.IsPodReady(s[i]) != api.IsPodReady(s[j]) {
|
||||
return !api.IsPodReady(s[i])
|
||||
if v1.IsPodReady(s[i]) != v1.IsPodReady(s[j]) {
|
||||
return !v1.IsPodReady(s[i])
|
||||
}
|
||||
// TODO: take availability into account when we push minReadySeconds information from deployment into pods,
|
||||
// see https://github.com/kubernetes/kubernetes/issues/22065
|
||||
// 4. Been ready for empty time < less time < more time
|
||||
// If both pods are ready, the latest ready one is smaller
|
||||
if api.IsPodReady(s[i]) && api.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
|
||||
if v1.IsPodReady(s[i]) && v1.IsPodReady(s[j]) && !podReadyTime(s[i]).Equal(podReadyTime(s[j])) {
|
||||
return afterOrZero(podReadyTime(s[i]), podReadyTime(s[j]))
|
||||
}
|
||||
// 5. Pods with containers with higher restart counts < lower restart counts
|
||||
@@ -687,11 +691,11 @@ func afterOrZero(t1, t2 unversioned.Time) bool {
|
||||
return t1.After(t2.Time)
|
||||
}
|
||||
|
||||
func podReadyTime(pod *api.Pod) unversioned.Time {
|
||||
if api.IsPodReady(pod) {
|
||||
func podReadyTime(pod *v1.Pod) unversioned.Time {
|
||||
if v1.IsPodReady(pod) {
|
||||
for _, c := range pod.Status.Conditions {
|
||||
// we only care about pod ready conditions
|
||||
if c.Type == api.PodReady && c.Status == api.ConditionTrue {
|
||||
if c.Type == v1.PodReady && c.Status == v1.ConditionTrue {
|
||||
return c.LastTransitionTime
|
||||
}
|
||||
}
|
||||
@@ -699,7 +703,7 @@ func podReadyTime(pod *api.Pod) unversioned.Time {
|
||||
return unversioned.Time{}
|
||||
}
|
||||
|
||||
func maxContainerRestarts(pod *api.Pod) int {
|
||||
func maxContainerRestarts(pod *v1.Pod) int {
|
||||
maxRestarts := 0
|
||||
for _, c := range pod.Status.ContainerStatuses {
|
||||
maxRestarts = integer.IntMax(maxRestarts, int(c.RestartCount))
|
||||
@@ -708,8 +712,8 @@ func maxContainerRestarts(pod *api.Pod) int {
|
||||
}
|
||||
|
||||
// FilterActivePods returns pods that have not terminated.
|
||||
func FilterActivePods(pods []*api.Pod) []*api.Pod {
|
||||
var result []*api.Pod
|
||||
func FilterActivePods(pods []*v1.Pod) []*v1.Pod {
|
||||
var result []*v1.Pod
|
||||
for _, p := range pods {
|
||||
if IsPodActive(p) {
|
||||
result = append(result, p)
|
||||
@@ -721,9 +725,9 @@ func FilterActivePods(pods []*api.Pod) []*api.Pod {
|
||||
return result
|
||||
}
|
||||
|
||||
func IsPodActive(p *api.Pod) bool {
|
||||
return api.PodSucceeded != p.Status.Phase &&
|
||||
api.PodFailed != p.Status.Phase &&
|
||||
func IsPodActive(p *v1.Pod) bool {
|
||||
return v1.PodSucceeded != p.Status.Phase &&
|
||||
v1.PodFailed != p.Status.Phase &&
|
||||
p.DeletionTimestamp == nil
|
||||
}
|
||||
|
||||
@@ -733,7 +737,7 @@ func FilterActiveReplicaSets(replicaSets []*extensions.ReplicaSet) []*extensions
|
||||
for i := range replicaSets {
|
||||
rs := replicaSets[i]
|
||||
|
||||
if rs != nil && rs.Spec.Replicas > 0 {
|
||||
if rs != nil && *(rs.Spec.Replicas) > 0 {
|
||||
active = append(active, replicaSets[i])
|
||||
}
|
||||
}
|
||||
@@ -744,12 +748,12 @@ func FilterActiveReplicaSets(replicaSets []*extensions.ReplicaSet) []*extensions
|
||||
// It's used so we consistently use the same key scheme in this module.
|
||||
// It does exactly what cache.MetaNamespaceKeyFunc would have done
|
||||
// except there's not possibility for error since we know the exact type.
|
||||
func PodKey(pod *api.Pod) string {
|
||||
func PodKey(pod *v1.Pod) string {
|
||||
return fmt.Sprintf("%v/%v", pod.Namespace, pod.Name)
|
||||
}
|
||||
|
||||
// ControllersByCreationTimestamp sorts a list of ReplicationControllers by creation timestamp, using their names as a tie breaker.
|
||||
type ControllersByCreationTimestamp []*api.ReplicationController
|
||||
type ControllersByCreationTimestamp []*v1.ReplicationController
|
||||
|
||||
func (o ControllersByCreationTimestamp) Len() int { return len(o) }
|
||||
func (o ControllersByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
@@ -779,10 +783,10 @@ type ReplicaSetsBySizeOlder []*extensions.ReplicaSet
|
||||
func (o ReplicaSetsBySizeOlder) Len() int { return len(o) }
|
||||
func (o ReplicaSetsBySizeOlder) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
func (o ReplicaSetsBySizeOlder) Less(i, j int) bool {
|
||||
if o[i].Spec.Replicas == o[j].Spec.Replicas {
|
||||
if *(o[i].Spec.Replicas) == *(o[j].Spec.Replicas) {
|
||||
return ReplicaSetsByCreationTimestamp(o).Less(i, j)
|
||||
}
|
||||
return o[i].Spec.Replicas > o[j].Spec.Replicas
|
||||
return *(o[i].Spec.Replicas) > *(o[j].Spec.Replicas)
|
||||
}
|
||||
|
||||
// ReplicaSetsBySizeNewer sorts a list of ReplicaSet by size in descending order, using their creation timestamp or name as a tie breaker.
|
||||
@@ -792,8 +796,8 @@ type ReplicaSetsBySizeNewer []*extensions.ReplicaSet
|
||||
func (o ReplicaSetsBySizeNewer) Len() int { return len(o) }
|
||||
func (o ReplicaSetsBySizeNewer) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
func (o ReplicaSetsBySizeNewer) Less(i, j int) bool {
|
||||
if o[i].Spec.Replicas == o[j].Spec.Replicas {
|
||||
if *(o[i].Spec.Replicas) == *(o[j].Spec.Replicas) {
|
||||
return ReplicaSetsByCreationTimestamp(o).Less(j, i)
|
||||
}
|
||||
return o[i].Spec.Replicas > o[j].Spec.Replicas
|
||||
return *(o[i].Spec.Replicas) > *(o[j].Spec.Replicas)
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package controller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net/http/httptest"
|
||||
@@ -26,12 +27,12 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
@@ -52,36 +53,36 @@ func NewFakeControllerExpectationsLookup(ttl time.Duration) (*ControllerExpectat
|
||||
return &ControllerExpectations{ttlStore}, fakeClock
|
||||
}
|
||||
|
||||
func newReplicationController(replicas int) *api.ReplicationController {
|
||||
rc := &api.ReplicationController{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func newReplicationController(replicas int) *v1.ReplicationController {
|
||||
rc := &v1.ReplicationController{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Replicas: int32(replicas),
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Template: &api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
"type": "production",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "foo/bar",
|
||||
TerminationMessagePath: api.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: api.PullIfNotPresent,
|
||||
TerminationMessagePath: v1.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
|
||||
},
|
||||
},
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSDefault,
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
DNSPolicy: v1.DNSDefault,
|
||||
NodeSelector: map[string]string{
|
||||
"baz": "blah",
|
||||
},
|
||||
@@ -93,23 +94,23 @@ func newReplicationController(replicas int) *api.ReplicationController {
|
||||
}
|
||||
|
||||
// create count pods with the given phase for the given rc (same selectors and namespace), and add them to the store.
|
||||
func newPodList(store cache.Store, count int, status api.PodPhase, rc *api.ReplicationController) *api.PodList {
|
||||
pods := []api.Pod{}
|
||||
func newPodList(store cache.Store, count int, status v1.PodPhase, rc *v1.ReplicationController) *v1.PodList {
|
||||
pods := []v1.Pod{}
|
||||
for i := 0; i < count; i++ {
|
||||
newPod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
newPod := v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod%d", i),
|
||||
Labels: rc.Spec.Selector,
|
||||
Namespace: rc.Namespace,
|
||||
},
|
||||
Status: api.PodStatus{Phase: status},
|
||||
Status: v1.PodStatus{Phase: status},
|
||||
}
|
||||
if store != nil {
|
||||
store.Add(&newPod)
|
||||
}
|
||||
pods = append(pods, newPod)
|
||||
}
|
||||
return &api.PodList{
|
||||
return &v1.PodList{
|
||||
Items: pods,
|
||||
}
|
||||
}
|
||||
@@ -187,7 +188,7 @@ func TestControllerExpectations(t *testing.T) {
|
||||
|
||||
func TestUIDExpectations(t *testing.T) {
|
||||
uidExp := NewUIDTrackingControllerExpectations(NewControllerExpectations())
|
||||
rcList := []*api.ReplicationController{
|
||||
rcList := []*v1.ReplicationController{
|
||||
newReplicationController(2),
|
||||
newReplicationController(1),
|
||||
newReplicationController(0),
|
||||
@@ -200,7 +201,7 @@ func TestUIDExpectations(t *testing.T) {
|
||||
rcName := fmt.Sprintf("rc-%v", i)
|
||||
rc.Name = rcName
|
||||
rc.Spec.Selector[rcName] = rcName
|
||||
podList := newPodList(nil, 5, api.PodRunning, rc)
|
||||
podList := newPodList(nil, 5, v1.PodRunning, rc)
|
||||
rcKey, err := KeyFunc(rc)
|
||||
if err != nil {
|
||||
t.Fatalf("Couldn't get key for object %#v: %v", rc, err)
|
||||
@@ -237,15 +238,15 @@ func TestUIDExpectations(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCreatePods(t *testing.T) {
|
||||
ns := api.NamespaceDefault
|
||||
body := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Pod{ObjectMeta: api.ObjectMeta{Name: "empty_pod"}})
|
||||
ns := v1.NamespaceDefault
|
||||
body := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "empty_pod"}})
|
||||
fakeHandler := utiltesting.FakeHandler{
|
||||
StatusCode: 200,
|
||||
ResponseBody: string(body),
|
||||
}
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
podControl := RealPodControl{
|
||||
KubeClient: clientset,
|
||||
@@ -259,19 +260,20 @@ func TestCreatePods(t *testing.T) {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
expectedPod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
expectedPod := v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: controllerSpec.Spec.Template.Labels,
|
||||
GenerateName: fmt.Sprintf("%s-", controllerSpec.Name),
|
||||
},
|
||||
Spec: controllerSpec.Spec.Template.Spec,
|
||||
}
|
||||
fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath("pods", api.NamespaceDefault, ""), "POST", nil)
|
||||
actualPod, err := runtime.Decode(testapi.Default.Codec(), []byte(fakeHandler.RequestBody))
|
||||
fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath("pods", v1.NamespaceDefault, ""), "POST", nil)
|
||||
var actualPod = &v1.Pod{}
|
||||
err := json.Unmarshal([]byte(fakeHandler.RequestBody), actualPod)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
if !api.Semantic.DeepDerivative(&expectedPod, actualPod) {
|
||||
if !v1.Semantic.DeepDerivative(&expectedPod, actualPod) {
|
||||
t.Logf("Body: %s", fakeHandler.RequestBody)
|
||||
t.Errorf("Unexpected mismatch. Expected\n %#v,\n Got:\n %#v", &expectedPod, actualPod)
|
||||
}
|
||||
@@ -280,15 +282,15 @@ func TestCreatePods(t *testing.T) {
|
||||
func TestActivePodFiltering(t *testing.T) {
|
||||
// This rc is not needed by the test, only the newPodList to give the pods labels/a namespace.
|
||||
rc := newReplicationController(0)
|
||||
podList := newPodList(nil, 5, api.PodRunning, rc)
|
||||
podList.Items[0].Status.Phase = api.PodSucceeded
|
||||
podList.Items[1].Status.Phase = api.PodFailed
|
||||
podList := newPodList(nil, 5, v1.PodRunning, rc)
|
||||
podList.Items[0].Status.Phase = v1.PodSucceeded
|
||||
podList.Items[1].Status.Phase = v1.PodFailed
|
||||
expectedNames := sets.NewString()
|
||||
for _, pod := range podList.Items[2:] {
|
||||
expectedNames.Insert(pod.Name)
|
||||
}
|
||||
|
||||
var podPointers []*api.Pod
|
||||
var podPointers []*v1.Pod
|
||||
for i := range podList.Items {
|
||||
podPointers = append(podPointers, &podList.Items[i])
|
||||
}
|
||||
@@ -306,55 +308,55 @@ func TestSortingActivePods(t *testing.T) {
|
||||
numPods := 9
|
||||
// This rc is not needed by the test, only the newPodList to give the pods labels/a namespace.
|
||||
rc := newReplicationController(0)
|
||||
podList := newPodList(nil, numPods, api.PodRunning, rc)
|
||||
podList := newPodList(nil, numPods, v1.PodRunning, rc)
|
||||
|
||||
pods := make([]*api.Pod, len(podList.Items))
|
||||
pods := make([]*v1.Pod, len(podList.Items))
|
||||
for i := range podList.Items {
|
||||
pods[i] = &podList.Items[i]
|
||||
}
|
||||
// pods[0] is not scheduled yet.
|
||||
pods[0].Spec.NodeName = ""
|
||||
pods[0].Status.Phase = api.PodPending
|
||||
pods[0].Status.Phase = v1.PodPending
|
||||
// pods[1] is scheduled but pending.
|
||||
pods[1].Spec.NodeName = "bar"
|
||||
pods[1].Status.Phase = api.PodPending
|
||||
pods[1].Status.Phase = v1.PodPending
|
||||
// pods[2] is unknown.
|
||||
pods[2].Spec.NodeName = "foo"
|
||||
pods[2].Status.Phase = api.PodUnknown
|
||||
pods[2].Status.Phase = v1.PodUnknown
|
||||
// pods[3] is running but not ready.
|
||||
pods[3].Spec.NodeName = "foo"
|
||||
pods[3].Status.Phase = api.PodRunning
|
||||
pods[3].Status.Phase = v1.PodRunning
|
||||
// pods[4] is running and ready but without LastTransitionTime.
|
||||
now := unversioned.Now()
|
||||
pods[4].Spec.NodeName = "foo"
|
||||
pods[4].Status.Phase = api.PodRunning
|
||||
pods[4].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue}}
|
||||
pods[4].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}}
|
||||
pods[4].Status.Phase = v1.PodRunning
|
||||
pods[4].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
|
||||
pods[4].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}}
|
||||
// pods[5] is running and ready and with LastTransitionTime.
|
||||
pods[5].Spec.NodeName = "foo"
|
||||
pods[5].Status.Phase = api.PodRunning
|
||||
pods[5].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue, LastTransitionTime: now}}
|
||||
pods[5].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}}
|
||||
pods[5].Status.Phase = v1.PodRunning
|
||||
pods[5].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: now}}
|
||||
pods[5].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}}
|
||||
// pods[6] is running ready for a longer time than pods[5].
|
||||
then := unversioned.Time{Time: now.AddDate(0, -1, 0)}
|
||||
pods[6].Spec.NodeName = "foo"
|
||||
pods[6].Status.Phase = api.PodRunning
|
||||
pods[6].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue, LastTransitionTime: then}}
|
||||
pods[6].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}}
|
||||
pods[6].Status.Phase = v1.PodRunning
|
||||
pods[6].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: then}}
|
||||
pods[6].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 3}, {RestartCount: 0}}
|
||||
// pods[7] has lower container restart count than pods[6].
|
||||
pods[7].Spec.NodeName = "foo"
|
||||
pods[7].Status.Phase = api.PodRunning
|
||||
pods[7].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue, LastTransitionTime: then}}
|
||||
pods[7].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 2}, {RestartCount: 1}}
|
||||
pods[7].Status.Phase = v1.PodRunning
|
||||
pods[7].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: then}}
|
||||
pods[7].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 2}, {RestartCount: 1}}
|
||||
pods[7].CreationTimestamp = now
|
||||
// pods[8] is older than pods[7].
|
||||
pods[8].Spec.NodeName = "foo"
|
||||
pods[8].Status.Phase = api.PodRunning
|
||||
pods[8].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue, LastTransitionTime: then}}
|
||||
pods[8].Status.ContainerStatuses = []api.ContainerStatus{{RestartCount: 2}, {RestartCount: 1}}
|
||||
pods[8].Status.Phase = v1.PodRunning
|
||||
pods[8].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: then}}
|
||||
pods[8].Status.ContainerStatuses = []v1.ContainerStatus{{RestartCount: 2}, {RestartCount: 1}}
|
||||
pods[8].CreationTimestamp = then
|
||||
|
||||
getOrder := func(pods []*api.Pod) []string {
|
||||
getOrder := func(pods []*v1.Pod) []string {
|
||||
names := make([]string, len(pods))
|
||||
for i := range pods {
|
||||
names[i] = pods[i].Name
|
||||
@@ -366,7 +368,7 @@ func TestSortingActivePods(t *testing.T) {
|
||||
|
||||
for i := 0; i < 20; i++ {
|
||||
idx := rand.Perm(numPods)
|
||||
randomizedPods := make([]*api.Pod, numPods)
|
||||
randomizedPods := make([]*v1.Pod, numPods)
|
||||
for j := 0; j < numPods; j++ {
|
||||
randomizedPods[j] = pods[idx[j]]
|
||||
}
|
||||
|
||||
@@ -34,14 +34,13 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller/job"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
@@ -63,7 +62,7 @@ func NewCronJobController(kubeClient clientset.Interface) *CronJobController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
|
||||
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("cronjob_controller", kubeClient.Core().RESTClient().GetRateLimiter())
|
||||
@@ -74,7 +73,7 @@ func NewCronJobController(kubeClient clientset.Interface) *CronJobController {
|
||||
jobControl: realJobControl{KubeClient: kubeClient},
|
||||
sjControl: &realSJControl{KubeClient: kubeClient},
|
||||
podControl: &realPodControl{KubeClient: kubeClient},
|
||||
recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "cronjob-controller"}),
|
||||
recorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "cronjob-controller"}),
|
||||
}
|
||||
|
||||
return jm
|
||||
@@ -97,7 +96,7 @@ func (jm *CronJobController) Run(stopCh <-chan struct{}) {
|
||||
|
||||
// SyncAll lists all the CronJobs and Jobs and reconciles them.
|
||||
func (jm *CronJobController) SyncAll() {
|
||||
sjl, err := jm.kubeClient.Batch().CronJobs(api.NamespaceAll).List(api.ListOptions{})
|
||||
sjl, err := jm.kubeClient.BatchV2alpha1().CronJobs(v1.NamespaceAll).List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("Error listing cronjobs: %v", err)
|
||||
return
|
||||
@@ -105,7 +104,7 @@ func (jm *CronJobController) SyncAll() {
|
||||
sjs := sjl.Items
|
||||
glog.V(4).Infof("Found %d cronjobs", len(sjs))
|
||||
|
||||
jl, err := jm.kubeClient.Batch().Jobs(api.NamespaceAll).List(api.ListOptions{})
|
||||
jl, err := jm.kubeClient.BatchV2alpha1().Jobs(v1.NamespaceAll).List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("Error listing jobs")
|
||||
return
|
||||
@@ -131,8 +130,8 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
|
||||
for i := range js {
|
||||
j := js[i]
|
||||
found := inActiveList(sj, j.ObjectMeta.UID)
|
||||
if !found && !job.IsJobFinished(&j) {
|
||||
recorder.Eventf(&sj, api.EventTypeWarning, "UnexpectedJob", "Saw a job that the controller did not create or forgot: %v", j.Name)
|
||||
if !found && !IsJobFinished(&j) {
|
||||
recorder.Eventf(&sj, v1.EventTypeWarning, "UnexpectedJob", "Saw a job that the controller did not create or forgot: %v", j.Name)
|
||||
// We found an unfinished job that has us as the parent, but it is not in our Active list.
|
||||
// This could happen if we crashed right after creating the Job and before updating the status,
|
||||
// or if our jobs list is newer than our sj status after a relist, or if someone intentionally created
|
||||
@@ -143,10 +142,10 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
|
||||
// user has permission to create a job within a namespace, then they have permission to make any scheduledJob
|
||||
// in the same namespace "adopt" that job. ReplicaSets and their Pods work the same way.
|
||||
// TBS: how to update sj.Status.LastScheduleTime if the adopted job is newer than any we knew about?
|
||||
} else if found && job.IsJobFinished(&j) {
|
||||
} else if found && IsJobFinished(&j) {
|
||||
deleteFromActiveList(&sj, j.ObjectMeta.UID)
|
||||
// TODO: event to call out failure vs success.
|
||||
recorder.Eventf(&sj, api.EventTypeNormal, "SawCompletedJob", "Saw completed job: %v", j.Name)
|
||||
recorder.Eventf(&sj, v1.EventTypeNormal, "SawCompletedJob", "Saw completed job: %v", j.Name)
|
||||
}
|
||||
}
|
||||
updatedSJ, err := sjc.UpdateStatus(&sj)
|
||||
@@ -209,7 +208,7 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
|
||||
glog.V(4).Infof("Deleting job %s of %s that was still running at next scheduled start time", j.Name, nameForLog)
|
||||
job, err := jc.GetJob(j.Namespace, j.Name)
|
||||
if err != nil {
|
||||
recorder.Eventf(&sj, api.EventTypeWarning, "FailedGet", "Get job: %v", err)
|
||||
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedGet", "Get job: %v", err)
|
||||
return
|
||||
}
|
||||
// scale job down to 0
|
||||
@@ -218,16 +217,16 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
|
||||
job.Spec.Parallelism = &zero
|
||||
job, err = jc.UpdateJob(job.Namespace, job)
|
||||
if err != nil {
|
||||
recorder.Eventf(&sj, api.EventTypeWarning, "FailedUpdate", "Update job: %v", err)
|
||||
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedUpdate", "Update job: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
// remove all pods...
|
||||
selector, _ := unversioned.LabelSelectorAsSelector(job.Spec.Selector)
|
||||
options := api.ListOptions{LabelSelector: selector}
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
podList, err := pc.ListPods(job.Namespace, options)
|
||||
if err != nil {
|
||||
recorder.Eventf(&sj, api.EventTypeWarning, "FailedList", "List job-pods: %v", err)
|
||||
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedList", "List job-pods: %v", err)
|
||||
}
|
||||
errList := []error{}
|
||||
for _, pod := range podList.Items {
|
||||
@@ -240,18 +239,18 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
|
||||
}
|
||||
}
|
||||
if len(errList) != 0 {
|
||||
recorder.Eventf(&sj, api.EventTypeWarning, "FailedDelete", "Deleted job-pods: %v", utilerrors.NewAggregate(errList))
|
||||
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedDelete", "Deleted job-pods: %v", utilerrors.NewAggregate(errList))
|
||||
return
|
||||
}
|
||||
// ... the job itself...
|
||||
if err := jc.DeleteJob(job.Namespace, job.Name); err != nil {
|
||||
recorder.Eventf(&sj, api.EventTypeWarning, "FailedDelete", "Deleted job: %v", err)
|
||||
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedDelete", "Deleted job: %v", err)
|
||||
glog.Errorf("Error deleting job %s from %s: %v", job.Name, nameForLog, err)
|
||||
return
|
||||
}
|
||||
// ... and its reference from active list
|
||||
deleteFromActiveList(&sj, job.ObjectMeta.UID)
|
||||
recorder.Eventf(&sj, api.EventTypeNormal, "SuccessfulDelete", "Deleted job %v", j.Name)
|
||||
recorder.Eventf(&sj, v1.EventTypeNormal, "SuccessfulDelete", "Deleted job %v", j.Name)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -262,11 +261,11 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
|
||||
}
|
||||
jobResp, err := jc.CreateJob(sj.Namespace, jobReq)
|
||||
if err != nil {
|
||||
recorder.Eventf(&sj, api.EventTypeWarning, "FailedCreate", "Error creating job: %v", err)
|
||||
recorder.Eventf(&sj, v1.EventTypeWarning, "FailedCreate", "Error creating job: %v", err)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Created Job %s for %s", jobResp.Name, nameForLog)
|
||||
recorder.Eventf(&sj, api.EventTypeNormal, "SuccessfulCreate", "Created job %v", jobResp.Name)
|
||||
recorder.Eventf(&sj, v1.EventTypeNormal, "SuccessfulCreate", "Created job %v", jobResp.Name)
|
||||
|
||||
// ------------------------------------------------------------------ //
|
||||
|
||||
@@ -293,6 +292,6 @@ func SyncOne(sj batch.CronJob, js []batch.Job, now time.Time, jc jobControlInter
|
||||
return
|
||||
}
|
||||
|
||||
func getRef(object runtime.Object) (*api.ObjectReference, error) {
|
||||
return api.GetReference(object)
|
||||
func getRef(object runtime.Object) (*v1.ObjectReference, error) {
|
||||
return v1.GetReference(object)
|
||||
}
|
||||
|
||||
@@ -20,9 +20,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
@@ -75,7 +75,7 @@ func justAfterThePriorHour() time.Time {
|
||||
// returns a cronJob with some fields filled in.
|
||||
func cronJob() batch.CronJob {
|
||||
return batch.CronJob{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "mycronjob",
|
||||
Namespace: "snazzycats",
|
||||
UID: types.UID("1a2b3c"),
|
||||
@@ -86,7 +86,7 @@ func cronJob() batch.CronJob {
|
||||
Schedule: "* * * * ?",
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{"a": "b"},
|
||||
Annotations: map[string]string{"x": "y"},
|
||||
},
|
||||
@@ -101,14 +101,14 @@ func jobSpec() batch.JobSpec {
|
||||
return batch.JobSpec{
|
||||
Parallelism: &one,
|
||||
Completions: &one,
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Image: "foo/bar"},
|
||||
},
|
||||
},
|
||||
@@ -118,10 +118,10 @@ func jobSpec() batch.JobSpec {
|
||||
|
||||
func newJob(UID string) batch.Job {
|
||||
return batch.Job{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: types.UID(UID),
|
||||
Name: "foobar",
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
SelfLink: "/apis/batch/v1/namespaces/snazzycats/jobs/myjob",
|
||||
},
|
||||
Spec: jobSpec(),
|
||||
@@ -213,7 +213,7 @@ func TestSyncOne_RunOrNot(t *testing.T) {
|
||||
job.UID = "1234"
|
||||
job.Namespace = ""
|
||||
if tc.stillActive {
|
||||
sj.Status.Active = []api.ObjectReference{{UID: job.UID}}
|
||||
sj.Status.Active = []v1.ObjectReference{{UID: job.UID}}
|
||||
js = append(js, *job)
|
||||
}
|
||||
} else {
|
||||
@@ -271,7 +271,7 @@ func TestSyncOne_RunOrNot(t *testing.T) {
|
||||
// TestSyncOne_Status tests sj.UpdateStatus in SyncOne
|
||||
func TestSyncOne_Status(t *testing.T) {
|
||||
finishedJob := newJob("1")
|
||||
finishedJob.Status.Conditions = append(finishedJob.Status.Conditions, batch.JobCondition{Type: batch.JobComplete, Status: api.ConditionTrue})
|
||||
finishedJob.Status.Conditions = append(finishedJob.Status.Conditions, batch.JobCondition{Type: batch.JobComplete, Status: v1.ConditionTrue})
|
||||
unexpectedJob := newJob("2")
|
||||
|
||||
testCases := map[string]struct {
|
||||
@@ -360,7 +360,7 @@ func TestSyncOne_Status(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("%s: test setup error: failed to get job's ref: %v.", name, err)
|
||||
}
|
||||
sj.Status.Active = []api.ObjectReference{*ref}
|
||||
sj.Status.Active = []v1.ObjectReference{*ref}
|
||||
jobs = append(jobs, finishedJob)
|
||||
}
|
||||
if tc.hasUnexpectedJob {
|
||||
|
||||
@@ -20,9 +20,9 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
)
|
||||
@@ -41,7 +41,7 @@ type realSJControl struct {
|
||||
var _ sjControlInterface = &realSJControl{}
|
||||
|
||||
func (c *realSJControl) UpdateStatus(sj *batch.CronJob) (*batch.CronJob, error) {
|
||||
return c.KubeClient.Batch().CronJobs(sj.Namespace).UpdateStatus(sj)
|
||||
return c.KubeClient.BatchV2alpha1().CronJobs(sj.Namespace).UpdateStatus(sj)
|
||||
}
|
||||
|
||||
// fakeSJControl is the default implementation of sjControlInterface.
|
||||
@@ -97,19 +97,19 @@ func copyAnnotations(template *batch.JobTemplateSpec) labels.Set {
|
||||
}
|
||||
|
||||
func (r realJobControl) GetJob(namespace, name string) (*batch.Job, error) {
|
||||
return r.KubeClient.Batch().Jobs(namespace).Get(name)
|
||||
return r.KubeClient.BatchV2alpha1().Jobs(namespace).Get(name)
|
||||
}
|
||||
|
||||
func (r realJobControl) UpdateJob(namespace string, job *batch.Job) (*batch.Job, error) {
|
||||
return r.KubeClient.Batch().Jobs(namespace).Update(job)
|
||||
return r.KubeClient.BatchV2alpha1().Jobs(namespace).Update(job)
|
||||
}
|
||||
|
||||
func (r realJobControl) CreateJob(namespace string, job *batch.Job) (*batch.Job, error) {
|
||||
return r.KubeClient.Batch().Jobs(namespace).Create(job)
|
||||
return r.KubeClient.BatchV2alpha1().Jobs(namespace).Create(job)
|
||||
}
|
||||
|
||||
func (r realJobControl) DeleteJob(namespace string, name string) error {
|
||||
return r.KubeClient.Batch().Jobs(namespace).Delete(name, nil)
|
||||
return r.KubeClient.BatchV2alpha1().Jobs(namespace).Delete(name, nil)
|
||||
}
|
||||
|
||||
type fakeJobControl struct {
|
||||
@@ -176,7 +176,7 @@ func (f *fakeJobControl) Clear() {
|
||||
// created as an interface to allow testing.
|
||||
type podControlInterface interface {
|
||||
// ListPods list pods
|
||||
ListPods(namespace string, opts api.ListOptions) (*api.PodList, error)
|
||||
ListPods(namespace string, opts v1.ListOptions) (*v1.PodList, error)
|
||||
// DeleteJob deletes the pod identified by name.
|
||||
// TODO: delete by UID?
|
||||
DeletePod(namespace string, name string) error
|
||||
@@ -190,7 +190,7 @@ type realPodControl struct {
|
||||
|
||||
var _ podControlInterface = &realPodControl{}
|
||||
|
||||
func (r realPodControl) ListPods(namespace string, opts api.ListOptions) (*api.PodList, error) {
|
||||
func (r realPodControl) ListPods(namespace string, opts v1.ListOptions) (*v1.PodList, error) {
|
||||
return r.KubeClient.Core().Pods(namespace).List(opts)
|
||||
}
|
||||
|
||||
@@ -200,17 +200,17 @@ func (r realPodControl) DeletePod(namespace string, name string) error {
|
||||
|
||||
type fakePodControl struct {
|
||||
sync.Mutex
|
||||
Pods []api.Pod
|
||||
Pods []v1.Pod
|
||||
DeletePodName []string
|
||||
Err error
|
||||
}
|
||||
|
||||
var _ podControlInterface = &fakePodControl{}
|
||||
|
||||
func (f *fakePodControl) ListPods(namespace string, opts api.ListOptions) (*api.PodList, error) {
|
||||
func (f *fakePodControl) ListPods(namespace string, opts v1.ListOptions) (*v1.PodList, error) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
return &api.PodList{Items: f.Pods}, nil
|
||||
return &v1.PodList{Items: f.Pods}, nil
|
||||
}
|
||||
|
||||
func (f *fakePodControl) DeletePod(namespace string, name string) error {
|
||||
|
||||
@@ -26,7 +26,8 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
@@ -46,7 +47,7 @@ func deleteFromActiveList(sj *batch.CronJob, uid types.UID) {
|
||||
if sj == nil {
|
||||
return
|
||||
}
|
||||
newActive := []api.ObjectReference{}
|
||||
newActive := []v1.ObjectReference{}
|
||||
for _, j := range sj.Status.Active {
|
||||
if j.UID != uid {
|
||||
newActive = append(newActive, j)
|
||||
@@ -57,12 +58,12 @@ func deleteFromActiveList(sj *batch.CronJob, uid types.UID) {
|
||||
|
||||
// getParentUIDFromJob extracts UID of job's parent and whether it was found
|
||||
func getParentUIDFromJob(j batch.Job) (types.UID, bool) {
|
||||
creatorRefJson, found := j.ObjectMeta.Annotations[api.CreatedByAnnotation]
|
||||
creatorRefJson, found := j.ObjectMeta.Annotations[v1.CreatedByAnnotation]
|
||||
if !found {
|
||||
glog.V(4).Infof("Job with no created-by annotation, name %s namespace %s", j.Name, j.Namespace)
|
||||
return types.UID(""), false
|
||||
}
|
||||
var sr api.SerializedReference
|
||||
var sr v1.SerializedReference
|
||||
err := json.Unmarshal([]byte(creatorRefJson), &sr)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Job with unparsable created-by annotation, name %s namespace %s: %v", j.Name, j.Namespace, err)
|
||||
@@ -181,12 +182,12 @@ func getJobFromTemplate(sj *batch.CronJob, scheduledTime time.Time) (*batch.Job,
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
annotations[api.CreatedByAnnotation] = string(createdByRefJson)
|
||||
annotations[v1.CreatedByAnnotation] = string(createdByRefJson)
|
||||
// We want job names for a given nominal start time to have a deterministic name to avoid the same job being created twice
|
||||
name := fmt.Sprintf("%s-%d", sj.Name, getTimeHash(scheduledTime))
|
||||
|
||||
job := &batch.Job{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
Name: name,
|
||||
@@ -205,7 +206,7 @@ func getTimeHash(scheduledTime time.Time) int64 {
|
||||
|
||||
// makeCreatedByRefJson makes a json string with an object reference for use in "created-by" annotation value
|
||||
func makeCreatedByRefJson(object runtime.Object) (string, error) {
|
||||
createdByRef, err := api.GetReference(object)
|
||||
createdByRef, err := v1.GetReference(object)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to get controller reference: %v", err)
|
||||
}
|
||||
@@ -213,9 +214,9 @@ func makeCreatedByRefJson(object runtime.Object) (string, error) {
|
||||
// TODO: this code was not safe previously - as soon as new code came along that switched to v2, old clients
|
||||
// would be broken upon reading it. This is explicitly hardcoded to v1 to guarantee predictable deployment.
|
||||
// We need to consistently handle this case of annotation versioning.
|
||||
codec := api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: api.GroupName, Version: "v1"})
|
||||
codec := api.Codecs.LegacyCodec(unversioned.GroupVersion{Group: v1.GroupName, Version: "v1"})
|
||||
|
||||
createdByRefJson, err := runtime.Encode(codec, &api.SerializedReference{
|
||||
createdByRefJson, err := runtime.Encode(codec, &v1.SerializedReference{
|
||||
Reference: *createdByRef,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -223,3 +224,12 @@ func makeCreatedByRefJson(object runtime.Object) (string, error) {
|
||||
}
|
||||
return string(createdByRefJson), nil
|
||||
}
|
||||
|
||||
func IsJobFinished(j *batch.Job) bool {
|
||||
for _, c := range j.Status.Conditions {
|
||||
if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == v1.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -22,9 +22,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch/v2alpha1"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
//"k8s.io/kubernetes/pkg/controller"
|
||||
// "k8s.io/kubernetes/pkg/util/rand"
|
||||
@@ -38,7 +38,7 @@ func TestGetJobFromTemplate(t *testing.T) {
|
||||
var no bool = false
|
||||
|
||||
sj := batch.CronJob{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "mycronjob",
|
||||
Namespace: "snazzycats",
|
||||
UID: types.UID("1a2b3c"),
|
||||
@@ -48,21 +48,21 @@ func TestGetJobFromTemplate(t *testing.T) {
|
||||
Schedule: "* * * * ?",
|
||||
ConcurrencyPolicy: batch.AllowConcurrent,
|
||||
JobTemplate: batch.JobTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{"a": "b"},
|
||||
Annotations: map[string]string{"x": "y"},
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
ActiveDeadlineSeconds: &one,
|
||||
ManualSelector: &no,
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Image: "foo/bar"},
|
||||
},
|
||||
},
|
||||
@@ -86,7 +86,7 @@ func TestGetJobFromTemplate(t *testing.T) {
|
||||
if len(job.ObjectMeta.Annotations) != 2 {
|
||||
t.Errorf("Wrong number of annotations")
|
||||
}
|
||||
v, ok := job.ObjectMeta.Annotations[api.CreatedByAnnotation]
|
||||
v, ok := job.ObjectMeta.Annotations[v1.CreatedByAnnotation]
|
||||
if !ok {
|
||||
t.Errorf("Missing created-by annotation")
|
||||
}
|
||||
@@ -102,22 +102,22 @@ func TestGetJobFromTemplate(t *testing.T) {
|
||||
|
||||
func TestGetParentUIDFromJob(t *testing.T) {
|
||||
j := &batch.Job{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foobar",
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Selector: &unversioned.LabelSelector{
|
||||
MatchLabels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Image: "foo/bar"},
|
||||
},
|
||||
},
|
||||
@@ -126,7 +126,7 @@ func TestGetParentUIDFromJob(t *testing.T) {
|
||||
Status: batch.JobStatus{
|
||||
Conditions: []batch.JobCondition{{
|
||||
Type: batch.JobComplete,
|
||||
Status: api.ConditionTrue,
|
||||
Status: v1.ConditionTrue,
|
||||
}},
|
||||
},
|
||||
}
|
||||
@@ -140,7 +140,7 @@ func TestGetParentUIDFromJob(t *testing.T) {
|
||||
}
|
||||
{
|
||||
// Case 2: Has UID annotation
|
||||
j.ObjectMeta.Annotations = map[string]string{api.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"default","name":"pi","uid":"5ef034e0-1890-11e6-8935-42010af0003e","apiVersion":"extensions","resourceVersion":"427339"}}`}
|
||||
j.ObjectMeta.Annotations = map[string]string{v1.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"default","name":"pi","uid":"5ef034e0-1890-11e6-8935-42010af0003e","apiVersion":"extensions","resourceVersion":"427339"}}`}
|
||||
|
||||
expectedUID := types.UID("5ef034e0-1890-11e6-8935-42010af0003e")
|
||||
|
||||
@@ -158,9 +158,9 @@ func TestGroupJobsByParent(t *testing.T) {
|
||||
uid1 := types.UID("11111111-1111-1111-1111-111111111111")
|
||||
uid2 := types.UID("22222222-2222-2222-2222-222222222222")
|
||||
uid3 := types.UID("33333333-3333-3333-3333-333333333333")
|
||||
createdBy1 := map[string]string{api.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"x","name":"pi","uid":"11111111-1111-1111-1111-111111111111","apiVersion":"extensions","resourceVersion":"111111"}}`}
|
||||
createdBy2 := map[string]string{api.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"x","name":"pi","uid":"22222222-2222-2222-2222-222222222222","apiVersion":"extensions","resourceVersion":"222222"}}`}
|
||||
createdBy3 := map[string]string{api.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"y","name":"pi","uid":"33333333-3333-3333-3333-333333333333","apiVersion":"extensions","resourceVersion":"333333"}}`}
|
||||
createdBy1 := map[string]string{v1.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"x","name":"pi","uid":"11111111-1111-1111-1111-111111111111","apiVersion":"extensions","resourceVersion":"111111"}}`}
|
||||
createdBy2 := map[string]string{v1.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"x","name":"pi","uid":"22222222-2222-2222-2222-222222222222","apiVersion":"extensions","resourceVersion":"222222"}}`}
|
||||
createdBy3 := map[string]string{v1.CreatedByAnnotation: `{"kind":"SerializedReference","apiVersion":"v1","reference":{"kind":"CronJob","namespace":"y","name":"pi","uid":"33333333-3333-3333-3333-333333333333","apiVersion":"extensions","resourceVersion":"333333"}}`}
|
||||
noCreatedBy := map[string]string{}
|
||||
|
||||
{
|
||||
@@ -176,7 +176,7 @@ func TestGroupJobsByParent(t *testing.T) {
|
||||
{
|
||||
// Case 2: there is one controller with no job.
|
||||
sjs := []batch.CronJob{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
|
||||
}
|
||||
js := []batch.Job{}
|
||||
jobsBySj := groupJobsByParent(sjs, js)
|
||||
@@ -188,10 +188,10 @@ func TestGroupJobsByParent(t *testing.T) {
|
||||
{
|
||||
// Case 3: there is one controller with one job it created.
|
||||
sjs := []batch.CronJob{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
|
||||
}
|
||||
js := []batch.Job{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "a", Namespace: "x", Annotations: createdBy1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "a", Namespace: "x", Annotations: createdBy1}},
|
||||
}
|
||||
jobsBySj := groupJobsByParent(sjs, js)
|
||||
|
||||
@@ -211,18 +211,18 @@ func TestGroupJobsByParent(t *testing.T) {
|
||||
// Case 4: Two namespaces, one has two jobs from one controller, other has 3 jobs from two controllers.
|
||||
// There are also two jobs with no created-by annotation.
|
||||
js := []batch.Job{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "a", Namespace: "x", Annotations: createdBy1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "b", Namespace: "x", Annotations: createdBy2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "c", Namespace: "x", Annotations: createdBy1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "d", Namespace: "x", Annotations: noCreatedBy}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "a", Namespace: "y", Annotations: createdBy3}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "b", Namespace: "y", Annotations: createdBy3}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "d", Namespace: "y", Annotations: noCreatedBy}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "a", Namespace: "x", Annotations: createdBy1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "b", Namespace: "x", Annotations: createdBy2}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "c", Namespace: "x", Annotations: createdBy1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "d", Namespace: "x", Annotations: noCreatedBy}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "a", Namespace: "y", Annotations: createdBy3}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "b", Namespace: "y", Annotations: createdBy3}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "d", Namespace: "y", Annotations: noCreatedBy}},
|
||||
}
|
||||
sjs := []batch.CronJob{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "f", Namespace: "x", UID: uid2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "g", Namespace: "y", UID: uid3}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "e", Namespace: "x", UID: uid1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "f", Namespace: "x", UID: uid2}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "g", Namespace: "y", UID: uid3}},
|
||||
}
|
||||
|
||||
jobsBySj := groupJobsByParent(sjs, js)
|
||||
@@ -270,9 +270,9 @@ func TestGetRecentUnmetScheduleTimes(t *testing.T) {
|
||||
}
|
||||
|
||||
sj := batch.CronJob{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "mycronjob",
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
UID: types.UID("1a2b3c"),
|
||||
},
|
||||
Spec: batch.CronJobSpec{
|
||||
|
||||
@@ -23,13 +23,13 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
@@ -95,17 +95,17 @@ func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podI
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
|
||||
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.Core().RESTClient().GetRateLimiter())
|
||||
}
|
||||
dsc := &DaemonSetsController{
|
||||
kubeClient: kubeClient,
|
||||
eventRecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "daemonset-controller"}),
|
||||
eventRecorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "daemonset-controller"}),
|
||||
podControl: controller.RealPodControl{
|
||||
KubeClient: kubeClient,
|
||||
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "daemon-set"}),
|
||||
Recorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "daemon-set"}),
|
||||
},
|
||||
burstReplicas: BurstReplicas,
|
||||
expectations: controller.NewControllerExpectations(),
|
||||
@@ -239,7 +239,7 @@ func (dsc *DaemonSetsController) enqueueDaemonSet(ds *extensions.DaemonSet) {
|
||||
dsc.queue.Add(key)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) getPodDaemonSet(pod *api.Pod) *extensions.DaemonSet {
|
||||
func (dsc *DaemonSetsController) getPodDaemonSet(pod *v1.Pod) *extensions.DaemonSet {
|
||||
// look up in the cache, if cached and the cache is valid, just return cached value
|
||||
if obj, cached := dsc.lookupCache.GetMatchingObject(pod); cached {
|
||||
ds, ok := obj.(*extensions.DaemonSet)
|
||||
@@ -272,7 +272,7 @@ func (dsc *DaemonSetsController) getPodDaemonSet(pod *api.Pod) *extensions.Daemo
|
||||
}
|
||||
|
||||
// isCacheValid check if the cache is valid
|
||||
func (dsc *DaemonSetsController) isCacheValid(pod *api.Pod, cachedDS *extensions.DaemonSet) bool {
|
||||
func (dsc *DaemonSetsController) isCacheValid(pod *v1.Pod, cachedDS *extensions.DaemonSet) bool {
|
||||
_, exists, err := dsc.dsStore.Get(cachedDS)
|
||||
// ds has been deleted or updated, cache is invalid
|
||||
if err != nil || !exists || !isDaemonSetMatch(pod, cachedDS) {
|
||||
@@ -283,7 +283,7 @@ func (dsc *DaemonSetsController) isCacheValid(pod *api.Pod, cachedDS *extensions
|
||||
|
||||
// isDaemonSetMatch take a Pod and DaemonSet, return whether the Pod and DaemonSet are matching
|
||||
// TODO(mqliang): This logic is a copy from GetPodDaemonSets(), remove the duplication
|
||||
func isDaemonSetMatch(pod *api.Pod, ds *extensions.DaemonSet) bool {
|
||||
func isDaemonSetMatch(pod *v1.Pod, ds *extensions.DaemonSet) bool {
|
||||
if ds.Namespace != pod.Namespace {
|
||||
return false
|
||||
}
|
||||
@@ -301,7 +301,7 @@ func isDaemonSetMatch(pod *api.Pod, ds *extensions.DaemonSet) bool {
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) addPod(obj interface{}) {
|
||||
pod := obj.(*api.Pod)
|
||||
pod := obj.(*v1.Pod)
|
||||
glog.V(4).Infof("Pod %s added.", pod.Name)
|
||||
if ds := dsc.getPodDaemonSet(pod); ds != nil {
|
||||
dsKey, err := controller.KeyFunc(ds)
|
||||
@@ -316,10 +316,10 @@ func (dsc *DaemonSetsController) addPod(obj interface{}) {
|
||||
|
||||
// When a pod is updated, figure out what sets manage it and wake them
|
||||
// up. If the labels of the pod have changed we need to awaken both the old
|
||||
// and new set. old and cur must be *api.Pod types.
|
||||
// and new set. old and cur must be *v1.Pod types.
|
||||
func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
|
||||
curPod := cur.(*api.Pod)
|
||||
oldPod := old.(*api.Pod)
|
||||
curPod := cur.(*v1.Pod)
|
||||
oldPod := old.(*v1.Pod)
|
||||
if curPod.ResourceVersion == oldPod.ResourceVersion {
|
||||
// Periodic resync will send update events for all known pods.
|
||||
// Two different versions of the same pod will always have different RVs.
|
||||
@@ -342,7 +342,7 @@ func (dsc *DaemonSetsController) updatePod(old, cur interface{}) {
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) deletePod(obj interface{}) {
|
||||
pod, ok := obj.(*api.Pod)
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
// When a delete is dropped, the relist will notice a pod in the store not
|
||||
// in the list, leading to the insertion of a tombstone object which contains
|
||||
// the deleted key/value. Note that this value might be stale. If the pod
|
||||
@@ -354,7 +354,7 @@ func (dsc *DaemonSetsController) deletePod(obj interface{}) {
|
||||
glog.Errorf("Couldn't get object from tombstone %#v", obj)
|
||||
return
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*api.Pod)
|
||||
pod, ok = tombstone.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("Tombstone contained object that is not a pod %#v", obj)
|
||||
return
|
||||
@@ -379,7 +379,7 @@ func (dsc *DaemonSetsController) addNode(obj interface{}) {
|
||||
glog.V(4).Infof("Error enqueueing daemon sets: %v", err)
|
||||
return
|
||||
}
|
||||
node := obj.(*api.Node)
|
||||
node := obj.(*v1.Node)
|
||||
for i := range dsList.Items {
|
||||
ds := &dsList.Items[i]
|
||||
shouldEnqueue := dsc.nodeShouldRunDaemonPod(node, ds)
|
||||
@@ -390,8 +390,8 @@ func (dsc *DaemonSetsController) addNode(obj interface{}) {
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
|
||||
oldNode := old.(*api.Node)
|
||||
curNode := cur.(*api.Node)
|
||||
oldNode := old.(*v1.Node)
|
||||
curNode := cur.(*v1.Node)
|
||||
if reflect.DeepEqual(oldNode.Labels, curNode.Labels) {
|
||||
// If node labels didn't change, we can ignore this update.
|
||||
return
|
||||
@@ -412,8 +412,8 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
|
||||
}
|
||||
|
||||
// getNodesToDaemonSetPods returns a map from nodes to daemon pods (corresponding to ds) running on the nodes.
|
||||
func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet) (map[string][]*api.Pod, error) {
|
||||
nodeToDaemonPods := make(map[string][]*api.Pod)
|
||||
func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet) (map[string][]*v1.Pod, error) {
|
||||
nodeToDaemonPods := make(map[string][]*v1.Pod)
|
||||
selector, err := unversioned.LabelSelectorAsSelector(ds.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -585,7 +585,7 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
|
||||
// Sort the daemon pods by creation time, so the the oldest is first.
|
||||
daemonPods, _ := nodeToDaemonPods[node.Name]
|
||||
sort.Sort(podByCreationTimestamp(daemonPods))
|
||||
if api.IsPodReady(daemonPods[0]) {
|
||||
if v1.IsPodReady(daemonPods[0]) {
|
||||
numberReady++
|
||||
}
|
||||
}
|
||||
@@ -623,7 +623,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
|
||||
everything := unversioned.LabelSelector{}
|
||||
if reflect.DeepEqual(ds.Spec.Selector, &everything) {
|
||||
dsc.eventRecorder.Eventf(ds, api.EventTypeWarning, "SelectingAll", "This daemon set is selecting all pods. A non-empty selector is required.")
|
||||
dsc.eventRecorder.Eventf(ds, v1.EventTypeWarning, "SelectingAll", "This daemon set is selecting all pods. A non-empty selector is required.")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -644,7 +644,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
|
||||
return dsc.updateDaemonSetStatus(ds)
|
||||
}
|
||||
|
||||
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *extensions.DaemonSet) bool {
|
||||
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *extensions.DaemonSet) bool {
|
||||
// If the daemon set specifies a node name, check that it matches with node.Name.
|
||||
if !(ds.Spec.Template.Spec.NodeName == "" || ds.Spec.Template.Spec.NodeName == node.Name) {
|
||||
return false
|
||||
@@ -652,23 +652,23 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *exte
|
||||
|
||||
// TODO: Move it to the predicates
|
||||
for _, c := range node.Status.Conditions {
|
||||
if c.Type == api.NodeOutOfDisk && c.Status == api.ConditionTrue {
|
||||
if c.Type == v1.NodeOutOfDisk && c.Status == v1.ConditionTrue {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
newPod := &api.Pod{Spec: ds.Spec.Template.Spec, ObjectMeta: ds.Spec.Template.ObjectMeta}
|
||||
newPod := &v1.Pod{Spec: ds.Spec.Template.Spec, ObjectMeta: ds.Spec.Template.ObjectMeta}
|
||||
newPod.Namespace = ds.Namespace
|
||||
newPod.Spec.NodeName = node.Name
|
||||
|
||||
pods := []*api.Pod{}
|
||||
pods := []*v1.Pod{}
|
||||
|
||||
for _, m := range dsc.podStore.Indexer.List() {
|
||||
pod := m.(*api.Pod)
|
||||
pod := m.(*v1.Pod)
|
||||
if pod.Spec.NodeName != node.Name {
|
||||
continue
|
||||
}
|
||||
if pod.Status.Phase == api.PodSucceeded || pod.Status.Phase == api.PodFailed {
|
||||
if pod.Status.Phase == v1.PodSucceeded || pod.Status.Phase == v1.PodFailed {
|
||||
continue
|
||||
}
|
||||
// ignore pods that belong to the daemonset when taking into account whether
|
||||
@@ -689,10 +689,10 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *api.Node, ds *exte
|
||||
glog.V(4).Infof("GeneralPredicates failed on ds '%s/%s' for reason: %v", ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason())
|
||||
switch reason := r.(type) {
|
||||
case *predicates.InsufficientResourceError:
|
||||
dsc.eventRecorder.Eventf(ds, api.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: %s", node.ObjectMeta.Name, reason.Error())
|
||||
dsc.eventRecorder.Eventf(ds, v1.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: %s", node.ObjectMeta.Name, reason.Error())
|
||||
case *predicates.PredicateFailureError:
|
||||
if reason == predicates.ErrPodNotFitsHostPorts {
|
||||
dsc.eventRecorder.Eventf(ds, api.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: host port conflict", node.ObjectMeta.Name)
|
||||
dsc.eventRecorder.Eventf(ds, v1.EventTypeNormal, "FailedPlacement", "failed to place pod on %q: host port conflict", node.ObjectMeta.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -712,7 +712,7 @@ func (o byCreationTimestamp) Less(i, j int) bool {
|
||||
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
|
||||
}
|
||||
|
||||
type podByCreationTimestamp []*api.Pod
|
||||
type podByCreationTimestamp []*v1.Pod
|
||||
|
||||
func (o podByCreationTimestamp) Len() int { return len(o) }
|
||||
func (o podByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
@@ -20,14 +20,14 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
@@ -55,46 +55,46 @@ func getKey(ds *extensions.DaemonSet, t *testing.T) string {
|
||||
func newDaemonSet(name string) *extensions.DaemonSet {
|
||||
return &extensions.DaemonSet{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: testapi.Extensions.GroupVersion().String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Spec: extensions.DaemonSetSpec{
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: simpleDaemonSetLabel},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: simpleDaemonSetLabel,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "foo/bar",
|
||||
TerminationMessagePath: api.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: api.PullIfNotPresent,
|
||||
TerminationMessagePath: v1.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
|
||||
},
|
||||
},
|
||||
DNSPolicy: api.DNSDefault,
|
||||
DNSPolicy: v1.DNSDefault,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newNode(name string, label map[string]string) *api.Node {
|
||||
return &api.Node{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func newNode(name string, label map[string]string) *v1.Node {
|
||||
return &v1.Node{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: label,
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{Type: api.NodeReady, Status: api.ConditionTrue},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{Type: v1.NodeReady, Status: v1.ConditionTrue},
|
||||
},
|
||||
Allocatable: api.ResourceList{
|
||||
api.ResourcePods: resource.MustParse("100"),
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourcePods: resource.MustParse("100"),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -106,28 +106,28 @@ func addNodes(nodeStore cache.Store, startIndex, numNodes int, label map[string]
|
||||
}
|
||||
}
|
||||
|
||||
func newPod(podName string, nodeName string, label map[string]string) *api.Pod {
|
||||
pod := &api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func newPod(podName string, nodeName string, label map[string]string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
GenerateName: podName,
|
||||
Labels: label,
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: nodeName,
|
||||
Containers: []api.Container{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "foo/bar",
|
||||
TerminationMessagePath: api.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: api.PullIfNotPresent,
|
||||
TerminationMessagePath: v1.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
|
||||
},
|
||||
},
|
||||
DNSPolicy: api.DNSDefault,
|
||||
DNSPolicy: v1.DNSDefault,
|
||||
},
|
||||
}
|
||||
api.GenerateName(api.SimpleNameGenerator, &pod.ObjectMeta)
|
||||
v1.GenerateName(v1.SimpleNameGenerator, &pod.ObjectMeta)
|
||||
return pod
|
||||
}
|
||||
|
||||
@@ -138,8 +138,8 @@ func addPods(podStore cache.Store, nodeName string, label map[string]string, num
|
||||
}
|
||||
|
||||
func newTestController() (*DaemonSetsController, *controller.FakePodControl) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
informerFactory := informers.NewSharedInformerFactory(clientset, controller.NoResyncPeriodFunc())
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
informerFactory := informers.NewSharedInformerFactory(clientset, nil, controller.NoResyncPeriodFunc())
|
||||
|
||||
manager := NewDaemonSetsController(informerFactory.DaemonSets(), informerFactory.Pods(), informerFactory.Nodes(), clientset, 0)
|
||||
informerFactory.Start(wait.NeverStop)
|
||||
@@ -212,8 +212,8 @@ func TestOneNodeDaemonLaunchesPod(t *testing.T) {
|
||||
func TestNotReadNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
manager, podControl := newTestController()
|
||||
node := newNode("not-ready", nil)
|
||||
node.Status.Conditions = []api.NodeCondition{
|
||||
{Type: api.NodeReady, Status: api.ConditionFalse},
|
||||
node.Status.Conditions = []v1.NodeCondition{
|
||||
{Type: v1.NodeReady, Status: v1.ConditionFalse},
|
||||
}
|
||||
manager.nodeStore.Add(node)
|
||||
ds := newDaemonSet("foo")
|
||||
@@ -225,29 +225,29 @@ func TestNotReadNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
func TestOutOfDiskNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
manager, podControl := newTestController()
|
||||
node := newNode("not-enough-disk", nil)
|
||||
node.Status.Conditions = []api.NodeCondition{{Type: api.NodeOutOfDisk, Status: api.ConditionTrue}}
|
||||
node.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}
|
||||
manager.nodeStore.Add(node)
|
||||
ds := newDaemonSet("foo")
|
||||
manager.dsStore.Add(ds)
|
||||
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0)
|
||||
}
|
||||
|
||||
func resourcePodSpec(nodeName, memory, cpu string) api.PodSpec {
|
||||
return api.PodSpec{
|
||||
func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec {
|
||||
return v1.PodSpec{
|
||||
NodeName: nodeName,
|
||||
Containers: []api.Container{{
|
||||
Resources: api.ResourceRequirements{
|
||||
Containers: []v1.Container{{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: allocatableResources(memory, cpu),
|
||||
},
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
func allocatableResources(memory, cpu string) api.ResourceList {
|
||||
return api.ResourceList{
|
||||
api.ResourceMemory: resource.MustParse(memory),
|
||||
api.ResourceCPU: resource.MustParse(cpu),
|
||||
api.ResourcePods: resource.MustParse("100"),
|
||||
func allocatableResources(memory, cpu string) v1.ResourceList {
|
||||
return v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse(memory),
|
||||
v1.ResourceCPU: resource.MustParse(cpu),
|
||||
v1.ResourcePods: resource.MustParse("100"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -258,7 +258,7 @@ func TestInsufficentCapacityNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
node := newNode("too-much-mem", nil)
|
||||
node.Status.Allocatable = allocatableResources("100M", "200m")
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&api.Pod{
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
Spec: podSpec,
|
||||
})
|
||||
ds := newDaemonSet("foo")
|
||||
@@ -273,9 +273,9 @@ func TestSufficentCapacityWithTerminatedPodsDaemonLaunchesPod(t *testing.T) {
|
||||
node := newNode("too-much-mem", nil)
|
||||
node.Status.Allocatable = allocatableResources("100M", "200m")
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&api.Pod{
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
Spec: podSpec,
|
||||
Status: api.PodStatus{Phase: api.PodSucceeded},
|
||||
Status: v1.PodStatus{Phase: v1.PodSucceeded},
|
||||
})
|
||||
ds := newDaemonSet("foo")
|
||||
ds.Spec.Template.Spec = podSpec
|
||||
@@ -290,7 +290,7 @@ func TestSufficentCapacityNodeDaemonLaunchesPod(t *testing.T) {
|
||||
node := newNode("not-too-much-mem", nil)
|
||||
node.Status.Allocatable = allocatableResources("200M", "200m")
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&api.Pod{
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
Spec: podSpec,
|
||||
})
|
||||
ds := newDaemonSet("foo")
|
||||
@@ -306,7 +306,7 @@ func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
|
||||
node := newNode("not-too-much-mem", nil)
|
||||
node.Status.Allocatable = allocatableResources("200M", "200m")
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&api.Pod{
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
Spec: podSpec,
|
||||
})
|
||||
ds := newDaemonSet("foo")
|
||||
@@ -319,10 +319,10 @@ func TestDontDoAnythingIfBeingDeleted(t *testing.T) {
|
||||
|
||||
// DaemonSets should not place onto nodes that would cause port conflicts
|
||||
func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
podSpec := api.PodSpec{
|
||||
podSpec := v1.PodSpec{
|
||||
NodeName: "port-conflict",
|
||||
Containers: []api.Container{{
|
||||
Ports: []api.ContainerPort{{
|
||||
Containers: []v1.Container{{
|
||||
Ports: []v1.ContainerPort{{
|
||||
HostPort: 666,
|
||||
}},
|
||||
}},
|
||||
@@ -330,7 +330,7 @@ func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
manager, podControl := newTestController()
|
||||
node := newNode("port-conflict", nil)
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&api.Pod{
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
Spec: podSpec,
|
||||
})
|
||||
|
||||
@@ -345,10 +345,10 @@ func TestPortConflictNodeDaemonDoesNotLaunchPod(t *testing.T) {
|
||||
//
|
||||
// Issue: https://github.com/kubernetes/kubernetes/issues/22309
|
||||
func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
|
||||
podSpec := api.PodSpec{
|
||||
podSpec := v1.PodSpec{
|
||||
NodeName: "port-conflict",
|
||||
Containers: []api.Container{{
|
||||
Ports: []api.ContainerPort{{
|
||||
Containers: []v1.Container{{
|
||||
Ports: []v1.ContainerPort{{
|
||||
HostPort: 666,
|
||||
}},
|
||||
}},
|
||||
@@ -356,10 +356,10 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
|
||||
manager, podControl := newTestController()
|
||||
node := newNode("port-conflict", nil)
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: simpleDaemonSetLabel,
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Spec: podSpec,
|
||||
})
|
||||
@@ -371,18 +371,18 @@ func TestPortConflictWithSameDaemonPodDoesNotDeletePod(t *testing.T) {
|
||||
|
||||
// DaemonSets should place onto nodes that would not cause port conflicts
|
||||
func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
|
||||
podSpec1 := api.PodSpec{
|
||||
podSpec1 := v1.PodSpec{
|
||||
NodeName: "no-port-conflict",
|
||||
Containers: []api.Container{{
|
||||
Ports: []api.ContainerPort{{
|
||||
Containers: []v1.Container{{
|
||||
Ports: []v1.ContainerPort{{
|
||||
HostPort: 6661,
|
||||
}},
|
||||
}},
|
||||
}
|
||||
podSpec2 := api.PodSpec{
|
||||
podSpec2 := v1.PodSpec{
|
||||
NodeName: "no-port-conflict",
|
||||
Containers: []api.Container{{
|
||||
Ports: []api.ContainerPort{{
|
||||
Containers: []v1.Container{{
|
||||
Ports: []v1.ContainerPort{{
|
||||
HostPort: 6662,
|
||||
}},
|
||||
}},
|
||||
@@ -390,7 +390,7 @@ func TestNoPortConflictNodeDaemonLaunchesPod(t *testing.T) {
|
||||
manager, podControl := newTestController()
|
||||
node := newNode("no-port-conflict", nil)
|
||||
manager.nodeStore.Add(node)
|
||||
manager.podStore.Indexer.Add(&api.Pod{
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
Spec: podSpec1,
|
||||
})
|
||||
ds := newDaemonSet("foo")
|
||||
@@ -406,12 +406,12 @@ func TestPodIsNotDeletedByDaemonsetWithEmptyLabelSelector(t *testing.T) {
|
||||
manager, podControl := newTestController()
|
||||
manager.nodeStore.Store.Add(newNode("node1", nil))
|
||||
// Create pod not controlled by a daemonset.
|
||||
manager.podStore.Indexer.Add(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
manager.podStore.Indexer.Add(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{"bang": "boom"},
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: "node1",
|
||||
},
|
||||
})
|
||||
@@ -554,7 +554,7 @@ func TestNodeAffinityDaemonLaunchesPods(t *testing.T) {
|
||||
addNodes(manager.nodeStore.Store, 4, 3, simpleNodeLabel)
|
||||
daemon := newDaemonSet("foo")
|
||||
affinity := map[string]string{
|
||||
api.AffinityAnnotationKey: fmt.Sprintf(`
|
||||
v1.AffinityAnnotationKey: fmt.Sprintf(`
|
||||
{"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": {
|
||||
"nodeSelectorTerms": [{
|
||||
"matchExpressions": [{
|
||||
@@ -586,7 +586,7 @@ func TestNumberReadyStatus(t *testing.T) {
|
||||
selector, _ := unversioned.LabelSelectorAsSelector(daemon.Spec.Selector)
|
||||
daemonPods, _ := manager.podStore.Pods(daemon.Namespace).List(selector)
|
||||
for _, pod := range daemonPods {
|
||||
condition := api.PodCondition{Type: api.PodReady, Status: api.ConditionTrue}
|
||||
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
|
||||
pod.Status.Conditions = append(pod.Status.Conditions, condition)
|
||||
}
|
||||
|
||||
|
||||
@@ -27,12 +27,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
@@ -91,14 +91,14 @@ func NewDeploymentController(dInformer informers.DeploymentInformer, rsInformer
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: client.Core().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: client.Core().Events("")})
|
||||
|
||||
if client != nil && client.Core().RESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("deployment_controller", client.Core().RESTClient().GetRateLimiter())
|
||||
}
|
||||
dc := &DeploymentController{
|
||||
client: client,
|
||||
eventRecorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "deployment-controller"}),
|
||||
eventRecorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "deployment-controller"}),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "deployment"),
|
||||
}
|
||||
|
||||
@@ -220,7 +220,7 @@ func (dc *DeploymentController) updateReplicaSet(old, cur interface{}) {
|
||||
}
|
||||
// A number of things could affect the old deployment: labels changing,
|
||||
// pod template changing, etc.
|
||||
if !api.Semantic.DeepEqual(oldRS, curRS) {
|
||||
if !v1.Semantic.DeepEqual(oldRS, curRS) {
|
||||
if oldD := dc.getDeploymentForReplicaSet(oldRS); oldD != nil {
|
||||
dc.enqueueDeployment(oldD)
|
||||
}
|
||||
@@ -333,7 +333,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
||||
|
||||
everything := unversioned.LabelSelector{}
|
||||
if reflect.DeepEqual(d.Spec.Selector, &everything) {
|
||||
dc.eventRecorder.Eventf(d, api.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
|
||||
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectingAll", "This deployment is selecting all pods. A non-empty selector is required.")
|
||||
if d.Status.ObservedGeneration < d.Generation {
|
||||
d.Status.ObservedGeneration = d.Generation
|
||||
dc.client.Extensions().Deployments(d.Namespace).UpdateStatus(d)
|
||||
@@ -347,7 +347,7 @@ func (dc *DeploymentController) syncDeployment(key string) error {
|
||||
|
||||
// Handle overlapping deployments by deterministically avoid syncing deployments that fight over ReplicaSets.
|
||||
if err = dc.handleOverlap(d); err != nil {
|
||||
dc.eventRecorder.Eventf(d, api.EventTypeWarning, "SelectorOverlap", err.Error())
|
||||
dc.eventRecorder.Eventf(d, v1.EventTypeWarning, "SelectorOverlap", err.Error())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -20,11 +20,11 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@@ -41,15 +41,15 @@ var (
|
||||
|
||||
func rs(name string, replicas int, selector map[string]string, timestamp unversioned.Time) *extensions.ReplicaSet {
|
||||
return &extensions.ReplicaSet{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
CreationTimestamp: timestamp,
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: int32(replicas),
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: selector},
|
||||
Template: api.PodTemplateSpec{},
|
||||
Template: v1.PodTemplateSpec{},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -65,24 +65,27 @@ func newRSWithStatus(name string, specReplicas, statusReplicas int, selector map
|
||||
func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSurge, maxUnavailable *intstr.IntOrString, selector map[string]string) *extensions.Deployment {
|
||||
d := extensions.Deployment{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(extensions.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: name,
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
Type: extensions.RollingUpdateDeploymentStrategyType,
|
||||
RollingUpdate: &extensions.RollingUpdateDeployment{},
|
||||
RollingUpdate: &extensions.RollingUpdateDeployment{
|
||||
MaxUnavailable: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
|
||||
MaxSurge: func() *intstr.IntOrString { i := intstr.FromInt(0); return &i }(),
|
||||
},
|
||||
Replicas: int32(replicas),
|
||||
},
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: selector},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: selector,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "foo/bar",
|
||||
},
|
||||
@@ -93,22 +96,22 @@ func newDeployment(name string, replicas int, revisionHistoryLimit *int32, maxSu
|
||||
},
|
||||
}
|
||||
if maxSurge != nil {
|
||||
d.Spec.Strategy.RollingUpdate.MaxSurge = *maxSurge
|
||||
d.Spec.Strategy.RollingUpdate.MaxSurge = maxSurge
|
||||
}
|
||||
if maxUnavailable != nil {
|
||||
d.Spec.Strategy.RollingUpdate.MaxUnavailable = *maxUnavailable
|
||||
d.Spec.Strategy.RollingUpdate.MaxUnavailable = maxUnavailable
|
||||
}
|
||||
return &d
|
||||
}
|
||||
|
||||
func newReplicaSet(d *extensions.Deployment, name string, replicas int) *extensions.ReplicaSet {
|
||||
return &extensions.ReplicaSet{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: int32(replicas),
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Template: d.Spec.Template,
|
||||
},
|
||||
}
|
||||
@@ -130,7 +133,7 @@ type fixture struct {
|
||||
// Objects to put in the store.
|
||||
dLister []*extensions.Deployment
|
||||
rsLister []*extensions.ReplicaSet
|
||||
podLister []*api.Pod
|
||||
podLister []*v1.Pod
|
||||
|
||||
// Actions expected to happen on the client. Objects from here are also
|
||||
// preloaded into NewSimpleFake.
|
||||
@@ -161,7 +164,7 @@ func newFixture(t *testing.T) *fixture {
|
||||
|
||||
func (f *fixture) run(deploymentName string) {
|
||||
f.client = fake.NewSimpleClientset(f.objects...)
|
||||
informers := informers.NewSharedInformerFactory(f.client, controller.NoResyncPeriodFunc())
|
||||
informers := informers.NewSharedInformerFactory(f.client, nil, controller.NoResyncPeriodFunc())
|
||||
c := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), f.client)
|
||||
c.eventRecorder = &record.FakeRecorder{}
|
||||
c.dListerSynced = alwaysReady
|
||||
@@ -234,7 +237,7 @@ func TestSyncDeploymentDontDoAnythingDuringDeletion(t *testing.T) {
|
||||
// issue: https://github.com/kubernetes/kubernetes/issues/23218
|
||||
func TestDeploymentController_dontSyncDeploymentsWithEmptyPodSelector(t *testing.T) {
|
||||
fake := &fake.Clientset{}
|
||||
informers := informers.NewSharedInformerFactory(fake, controller.NoResyncPeriodFunc())
|
||||
informers := informers.NewSharedInformerFactory(fake, nil, controller.NoResyncPeriodFunc())
|
||||
controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake)
|
||||
controller.eventRecorder = &record.FakeRecorder{}
|
||||
controller.dListerSynced = alwaysReady
|
||||
|
||||
@@ -20,8 +20,8 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
)
|
||||
|
||||
@@ -95,14 +95,14 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
|
||||
// Update the deployment conditions with a message for the new replica set that
|
||||
// was successfully deployed. If the condition already exists, we ignore this update.
|
||||
msg := fmt.Sprintf("Replica set %q has successfully progressed.", newRS.Name)
|
||||
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionTrue, util.NewRSAvailableReason, msg)
|
||||
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, util.NewRSAvailableReason, msg)
|
||||
util.SetDeploymentCondition(&newStatus, *condition)
|
||||
|
||||
case util.DeploymentProgressing(d, &newStatus):
|
||||
// If there is any progress made, continue by not checking if the deployment failed. This
|
||||
// behavior emulates the rolling updater progressDeadline check.
|
||||
msg := fmt.Sprintf("Replica set %q is progressing.", newRS.Name)
|
||||
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionTrue, util.ReplicaSetUpdatedReason, msg)
|
||||
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, util.ReplicaSetUpdatedReason, msg)
|
||||
// Update the current Progressing condition or add a new one if it doesn't exist.
|
||||
// If a Progressing condition with status=true already exists, we should update
|
||||
// everything but lastTransitionTime. SetDeploymentCondition already does that but
|
||||
@@ -111,7 +111,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
|
||||
// update with the same reason and change just lastUpdateTime iff we notice any
|
||||
// progress. That's why we handle it here.
|
||||
if currentCond != nil {
|
||||
if currentCond.Status == api.ConditionTrue {
|
||||
if currentCond.Status == v1.ConditionTrue {
|
||||
condition.LastTransitionTime = currentCond.LastTransitionTime
|
||||
}
|
||||
util.RemoveDeploymentCondition(&newStatus, extensions.DeploymentProgressing)
|
||||
@@ -122,7 +122,7 @@ func (dc *DeploymentController) syncRolloutStatus(allRSs []*extensions.ReplicaSe
|
||||
// Update the deployment with a timeout condition. If the condition already exists,
|
||||
// we ignore this update.
|
||||
msg := fmt.Sprintf("Replica set %q has timed out progressing.", newRS.Name)
|
||||
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionFalse, util.TimedOutReason, msg)
|
||||
condition := util.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, util.TimedOutReason, msg)
|
||||
util.SetDeploymentCondition(&newStatus, *condition)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ package deployment
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/retry"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
@@ -82,7 +82,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRecreate(oldRSs []*ext
|
||||
for i := range oldRSs {
|
||||
rs := oldRSs[i]
|
||||
// Scaling not required.
|
||||
if rs.Spec.Replicas == 0 {
|
||||
if *(rs.Spec.Replicas) == 0 {
|
||||
continue
|
||||
}
|
||||
scaledRS, updatedRS, err := dc.scaleReplicaSetAndRecordEvent(rs, 0, deployment)
|
||||
@@ -104,7 +104,7 @@ func (dc *DeploymentController) waitForInactiveReplicaSets(oldRSs []*extensions.
|
||||
rs := oldRSs[i]
|
||||
desiredGeneration := rs.Generation
|
||||
observedGeneration := rs.Status.ObservedGeneration
|
||||
specReplicas := rs.Spec.Replicas
|
||||
specReplicas := *(rs.Spec.Replicas)
|
||||
statusReplicas := rs.Status.Replicas
|
||||
|
||||
if err := wait.ExponentialBackoff(retry.DefaultRetry, func() (bool, error) {
|
||||
@@ -113,13 +113,13 @@ func (dc *DeploymentController) waitForInactiveReplicaSets(oldRSs []*extensions.
|
||||
return false, err
|
||||
}
|
||||
|
||||
specReplicas = replicaSet.Spec.Replicas
|
||||
specReplicas = *(replicaSet.Spec.Replicas)
|
||||
statusReplicas = replicaSet.Status.Replicas
|
||||
observedGeneration = replicaSet.Status.ObservedGeneration
|
||||
|
||||
// TODO: We also need to wait for terminating replicas to actually terminate.
|
||||
// See https://github.com/kubernetes/kubernetes/issues/32567
|
||||
return observedGeneration >= desiredGeneration && replicaSet.Spec.Replicas == 0 && replicaSet.Status.Replicas == 0, nil
|
||||
return observedGeneration >= desiredGeneration && *(replicaSet.Spec.Replicas) == 0 && replicaSet.Status.Replicas == 0, nil
|
||||
}); err != nil {
|
||||
if err == wait.ErrWaitTimeout {
|
||||
err = fmt.Errorf("replica set %q never became inactive: synced=%t, spec.replicas=%d, status.replicas=%d",
|
||||
@@ -133,6 +133,6 @@ func (dc *DeploymentController) waitForInactiveReplicaSets(oldRSs []*extensions.
|
||||
|
||||
// scaleUpNewReplicaSetForRecreate scales up new replica set when deployment strategy is "Recreate"
|
||||
func (dc *DeploymentController) scaleUpNewReplicaSetForRecreate(newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
|
||||
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, deployment.Spec.Replicas, deployment)
|
||||
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, *(deployment.Spec.Replicas), deployment)
|
||||
return scaled, err
|
||||
}
|
||||
|
||||
@@ -21,8 +21,8 @@ import (
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
)
|
||||
|
||||
@@ -91,11 +91,11 @@ func (dc *DeploymentController) rollbackToTemplate(deployment *extensions.Deploy
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) emitRollbackWarningEvent(deployment *extensions.Deployment, reason, message string) {
|
||||
dc.eventRecorder.Eventf(deployment, api.EventTypeWarning, reason, message)
|
||||
dc.eventRecorder.Eventf(deployment, v1.EventTypeWarning, reason, message)
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) emitRollbackNormalEvent(deployment *extensions.Deployment, message string) {
|
||||
dc.eventRecorder.Eventf(deployment, api.EventTypeNormal, deploymentutil.RollbackDone, message)
|
||||
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, deploymentutil.RollbackDone, message)
|
||||
}
|
||||
|
||||
// updateDeploymentAndClearRollbackTo sets .spec.rollbackTo to nil and update the input deployment
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
"k8s.io/kubernetes/pkg/util/integer"
|
||||
@@ -62,13 +62,13 @@ func (dc *DeploymentController) rolloutRolling(deployment *extensions.Deployment
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) reconcileNewReplicaSet(allRSs []*extensions.ReplicaSet, newRS *extensions.ReplicaSet, deployment *extensions.Deployment) (bool, error) {
|
||||
if newRS.Spec.Replicas == deployment.Spec.Replicas {
|
||||
if *(newRS.Spec.Replicas) == *(deployment.Spec.Replicas) {
|
||||
// Scaling not required.
|
||||
return false, nil
|
||||
}
|
||||
if newRS.Spec.Replicas > deployment.Spec.Replicas {
|
||||
if *(newRS.Spec.Replicas) > *(deployment.Spec.Replicas) {
|
||||
// Scale down.
|
||||
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, deployment.Spec.Replicas, deployment)
|
||||
scaled, _, err := dc.scaleReplicaSetAndRecordEvent(newRS, *(deployment.Spec.Replicas), deployment)
|
||||
return scaled, err
|
||||
}
|
||||
newReplicasCount, err := deploymentutil.NewRSNewReplicas(deployment, allRSs, newRS)
|
||||
@@ -120,8 +120,8 @@ func (dc *DeploymentController) reconcileOldReplicaSets(allRSs []*extensions.Rep
|
||||
// * The new replica set created must start with 0 replicas because allPodsCount is already at 13.
|
||||
// * However, newRSPodsUnavailable would also be 0, so the 2 old replica sets could be scaled down by 5 (13 - 8 - 0), which would then
|
||||
// allow the new replica set to be scaled up by 5.
|
||||
minAvailable := deployment.Spec.Replicas - maxUnavailable
|
||||
newRSUnavailablePodCount := newRS.Spec.Replicas - newRS.Status.AvailableReplicas
|
||||
minAvailable := *(deployment.Spec.Replicas) - maxUnavailable
|
||||
newRSUnavailablePodCount := *(newRS.Spec.Replicas) - newRS.Status.AvailableReplicas
|
||||
maxScaledDown := allPodsCount - minAvailable - newRSUnavailablePodCount
|
||||
if maxScaledDown <= 0 {
|
||||
return false, nil
|
||||
@@ -158,20 +158,20 @@ func (dc *DeploymentController) cleanupUnhealthyReplicas(oldRSs []*extensions.Re
|
||||
if totalScaledDown >= maxCleanupCount {
|
||||
break
|
||||
}
|
||||
if targetRS.Spec.Replicas == 0 {
|
||||
if *(targetRS.Spec.Replicas) == 0 {
|
||||
// cannot scale down this replica set.
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("Found %d available pods in old RS %s/%s", targetRS.Status.AvailableReplicas, targetRS.Namespace, targetRS.Name)
|
||||
if targetRS.Spec.Replicas == targetRS.Status.AvailableReplicas {
|
||||
if *(targetRS.Spec.Replicas) == targetRS.Status.AvailableReplicas {
|
||||
// no unhealthy replicas found, no scaling required.
|
||||
continue
|
||||
}
|
||||
|
||||
scaledDownCount := int32(integer.IntMin(int(maxCleanupCount-totalScaledDown), int(targetRS.Spec.Replicas-targetRS.Status.AvailableReplicas)))
|
||||
newReplicasCount := targetRS.Spec.Replicas - scaledDownCount
|
||||
if newReplicasCount > targetRS.Spec.Replicas {
|
||||
return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, targetRS.Spec.Replicas, newReplicasCount)
|
||||
scaledDownCount := int32(integer.IntMin(int(maxCleanupCount-totalScaledDown), int(*(targetRS.Spec.Replicas)-targetRS.Status.AvailableReplicas)))
|
||||
newReplicasCount := *(targetRS.Spec.Replicas) - scaledDownCount
|
||||
if newReplicasCount > *(targetRS.Spec.Replicas) {
|
||||
return nil, 0, fmt.Errorf("when cleaning up unhealthy replicas, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
|
||||
}
|
||||
_, updatedOldRS, err := dc.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount, deployment)
|
||||
if err != nil {
|
||||
@@ -189,7 +189,7 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs [
|
||||
maxUnavailable := deploymentutil.MaxUnavailable(*deployment)
|
||||
|
||||
// Check if we can scale down.
|
||||
minAvailable := deployment.Spec.Replicas - maxUnavailable
|
||||
minAvailable := *(deployment.Spec.Replicas) - maxUnavailable
|
||||
// Find the number of available pods.
|
||||
availablePodCount := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs)
|
||||
if availablePodCount <= minAvailable {
|
||||
@@ -207,15 +207,15 @@ func (dc *DeploymentController) scaleDownOldReplicaSetsForRollingUpdate(allRSs [
|
||||
// No further scaling required.
|
||||
break
|
||||
}
|
||||
if targetRS.Spec.Replicas == 0 {
|
||||
if *(targetRS.Spec.Replicas) == 0 {
|
||||
// cannot scale down this ReplicaSet.
|
||||
continue
|
||||
}
|
||||
// Scale down.
|
||||
scaleDownCount := int32(integer.IntMin(int(targetRS.Spec.Replicas), int(totalScaleDownCount-totalScaledDown)))
|
||||
newReplicasCount := targetRS.Spec.Replicas - scaleDownCount
|
||||
if newReplicasCount > targetRS.Spec.Replicas {
|
||||
return 0, fmt.Errorf("when scaling down old RS, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, targetRS.Spec.Replicas, newReplicasCount)
|
||||
scaleDownCount := int32(integer.IntMin(int(*(targetRS.Spec.Replicas)), int(totalScaleDownCount-totalScaledDown)))
|
||||
newReplicasCount := *(targetRS.Spec.Replicas) - scaleDownCount
|
||||
if newReplicasCount > *(targetRS.Spec.Replicas) {
|
||||
return 0, fmt.Errorf("when scaling down old RS, got invalid request to scale down %s/%s %d -> %d", targetRS.Namespace, targetRS.Name, *(targetRS.Spec.Replicas), newReplicasCount)
|
||||
}
|
||||
_, _, err := dc.scaleReplicaSetAndRecordEvent(targetRS, newReplicasCount, deployment)
|
||||
if err != nil {
|
||||
|
||||
@@ -19,8 +19,8 @@ package deployment
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
@@ -110,7 +110,7 @@ func TestDeploymentController_reconcileNewReplicaSet(t *testing.T) {
|
||||
continue
|
||||
}
|
||||
updated := fake.Actions()[0].(core.UpdateAction).GetObject().(*extensions.ReplicaSet)
|
||||
if e, a := test.expectedNewReplicas, int(updated.Spec.Replicas); e != a {
|
||||
if e, a := test.expectedNewReplicas, int(*(updated.Spec.Replicas)); e != a {
|
||||
t.Errorf("expected update to %d replicas, got %d", e, a)
|
||||
}
|
||||
}
|
||||
@@ -372,7 +372,7 @@ func TestDeploymentController_scaleDownOldReplicaSetsForRollingUpdate(t *testing
|
||||
continue
|
||||
}
|
||||
updated := updateAction.GetObject().(*extensions.ReplicaSet)
|
||||
if e, a := test.expectedOldReplicas, int(updated.Spec.Replicas); e != a {
|
||||
if e, a := test.expectedOldReplicas, int(*(updated.Spec.Replicas)); e != a {
|
||||
t.Errorf("expected update to %d replicas, got %d", e, a)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,9 +24,11 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
deploymentutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||
@@ -80,11 +82,11 @@ func (dc *DeploymentController) checkPausedConditions(d *extensions.Deployment)
|
||||
|
||||
needsUpdate := false
|
||||
if d.Spec.Paused && !pausedCondExists {
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused")
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.PausedDeployReason, "Deployment is paused")
|
||||
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
|
||||
needsUpdate = true
|
||||
} else if !d.Spec.Paused && pausedCondExists {
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed")
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionUnknown, deploymentutil.ResumedDeployReason, "Deployment is resumed")
|
||||
deploymentutil.SetDeploymentCondition(&d.Status, *condition)
|
||||
needsUpdate = true
|
||||
}
|
||||
@@ -126,10 +128,14 @@ func (dc *DeploymentController) getAllReplicaSetsAndSyncRevision(deployment *ext
|
||||
}
|
||||
|
||||
// rsAndPodsWithHashKeySynced returns the RSes and pods the given deployment targets, with pod-template-hash information synced.
|
||||
func (dc *DeploymentController) rsAndPodsWithHashKeySynced(deployment *extensions.Deployment) ([]*extensions.ReplicaSet, *api.PodList, error) {
|
||||
func (dc *DeploymentController) rsAndPodsWithHashKeySynced(deployment *extensions.Deployment) ([]*extensions.ReplicaSet, *v1.PodList, error) {
|
||||
rsList, err := deploymentutil.ListReplicaSets(deployment,
|
||||
func(namespace string, options api.ListOptions) ([]*extensions.ReplicaSet, error) {
|
||||
return dc.rsLister.ReplicaSets(namespace).List(options.LabelSelector)
|
||||
func(namespace string, options v1.ListOptions) ([]*extensions.ReplicaSet, error) {
|
||||
parsed, err := labels.Parse(options.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dc.rsLister.ReplicaSets(namespace).List(parsed)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("error listing ReplicaSets: %v", err)
|
||||
@@ -201,12 +207,16 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in converting selector to label selector for replica set %s: %s", updatedRS.Name, err)
|
||||
}
|
||||
options := api.ListOptions{LabelSelector: selector}
|
||||
pods, err := dc.podLister.Pods(namespace).List(options.LabelSelector)
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
parsed, err := labels.Parse(options.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pods, err := dc.podLister.Pods(namespace).List(parsed)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error in getting pod list for namespace %s and list options %+v: %s", namespace, options, err)
|
||||
}
|
||||
podList := api.PodList{Items: make([]api.Pod, 0, len(pods))}
|
||||
podList := v1.PodList{Items: make([]v1.Pod, 0, len(pods))}
|
||||
for i := range pods {
|
||||
podList.Items = append(podList.Items, *pods[i])
|
||||
}
|
||||
@@ -253,11 +263,15 @@ func (dc *DeploymentController) addHashKeyToRSAndPods(rs *extensions.ReplicaSet)
|
||||
return updatedRS, nil
|
||||
}
|
||||
|
||||
func (dc *DeploymentController) listPods(deployment *extensions.Deployment) (*api.PodList, error) {
|
||||
func (dc *DeploymentController) listPods(deployment *extensions.Deployment) (*v1.PodList, error) {
|
||||
return deploymentutil.ListPods(deployment,
|
||||
func(namespace string, options api.ListOptions) (*api.PodList, error) {
|
||||
pods, err := dc.podLister.Pods(namespace).List(options.LabelSelector)
|
||||
result := api.PodList{Items: make([]api.Pod, 0, len(pods))}
|
||||
func(namespace string, options v1.ListOptions) (*v1.PodList, error) {
|
||||
parsed, err := labels.Parse(options.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pods, err := dc.podLister.Pods(namespace).List(parsed)
|
||||
result := v1.PodList{Items: make([]v1.Pod, 0, len(pods))}
|
||||
for i := range pods {
|
||||
result.Items = append(result.Items, *pods[i])
|
||||
}
|
||||
@@ -307,7 +321,7 @@ func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployme
|
||||
cond := deploymentutil.GetDeploymentCondition(deployment.Status, extensions.DeploymentProgressing)
|
||||
if deployment.Spec.ProgressDeadlineSeconds != nil && cond == nil {
|
||||
msg := fmt.Sprintf("Found new replica set %q", rsCopy.Name)
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionTrue, deploymentutil.FoundNewRSReason, msg)
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.FoundNewRSReason, msg)
|
||||
deploymentutil.SetDeploymentCondition(&deployment.Status, *condition)
|
||||
updateConditions = true
|
||||
}
|
||||
@@ -333,13 +347,13 @@ func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployme
|
||||
|
||||
// Create new ReplicaSet
|
||||
newRS := extensions.ReplicaSet{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
// Make the name deterministic, to ensure idempotence
|
||||
Name: deployment.Name + "-" + fmt.Sprintf("%d", podTemplateSpecHash),
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: 0,
|
||||
Replicas: func(i int32) *int32 { return &i }(0),
|
||||
MinReadySeconds: deployment.Spec.MinReadySeconds,
|
||||
Selector: newRSSelector,
|
||||
Template: newRSTemplate,
|
||||
@@ -351,7 +365,7 @@ func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployme
|
||||
return nil, err
|
||||
}
|
||||
|
||||
newRS.Spec.Replicas = newReplicasCount
|
||||
*(newRS.Spec.Replicas) = newReplicasCount
|
||||
// Set new replica set's annotation
|
||||
deploymentutil.SetNewReplicaSetAnnotations(deployment, &newRS, newRevision, false)
|
||||
createdRS, err := dc.client.Extensions().ReplicaSets(namespace).Create(&newRS)
|
||||
@@ -365,7 +379,7 @@ func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployme
|
||||
case err != nil:
|
||||
msg := fmt.Sprintf("Failed to create new replica set %q: %v", newRS.Name, err)
|
||||
if deployment.Spec.ProgressDeadlineSeconds != nil {
|
||||
cond := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionFalse, deploymentutil.FailedRSCreateReason, msg)
|
||||
cond := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionFalse, deploymentutil.FailedRSCreateReason, msg)
|
||||
deploymentutil.SetDeploymentCondition(&deployment.Status, *cond)
|
||||
// We don't really care about this error at this point, since we have a bigger issue to report.
|
||||
// TODO: Update the rest of the Deployment status, too. We may need to do this every time we
|
||||
@@ -375,17 +389,17 @@ func (dc *DeploymentController) getNewReplicaSet(deployment *extensions.Deployme
|
||||
// these reasons as well. Related issue: https://github.com/kubernetes/kubernetes/issues/18568
|
||||
_, _ = dc.client.Extensions().Deployments(deployment.ObjectMeta.Namespace).UpdateStatus(deployment)
|
||||
}
|
||||
dc.eventRecorder.Eventf(deployment, api.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
|
||||
dc.eventRecorder.Eventf(deployment, v1.EventTypeWarning, deploymentutil.FailedRSCreateReason, msg)
|
||||
return nil, err
|
||||
}
|
||||
if newReplicasCount > 0 {
|
||||
dc.eventRecorder.Eventf(deployment, api.EventTypeNormal, "ScalingReplicaSet", "Scaled up replica set %s to %d", createdRS.Name, newReplicasCount)
|
||||
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled up replica set %s to %d", createdRS.Name, newReplicasCount)
|
||||
}
|
||||
|
||||
deploymentutil.SetDeploymentRevision(deployment, newRevision)
|
||||
if deployment.Spec.ProgressDeadlineSeconds != nil {
|
||||
msg := fmt.Sprintf("Created new replica set %q", createdRS.Name)
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, api.ConditionTrue, deploymentutil.NewReplicaSetReason, msg)
|
||||
condition := deploymentutil.NewDeploymentCondition(extensions.DeploymentProgressing, v1.ConditionTrue, deploymentutil.NewReplicaSetReason, msg)
|
||||
deploymentutil.SetDeploymentCondition(&deployment.Status, *condition)
|
||||
}
|
||||
_, err = dc.client.Extensions().Deployments(deployment.Namespace).UpdateStatus(deployment)
|
||||
@@ -401,10 +415,10 @@ func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *
|
||||
// If there is only one active replica set then we should scale that up to the full count of the
|
||||
// deployment. If there is no active replica set, then we should scale up the newest replica set.
|
||||
if activeOrLatest := deploymentutil.FindActiveOrLatest(newRS, oldRSs); activeOrLatest != nil {
|
||||
if activeOrLatest.Spec.Replicas == deployment.Spec.Replicas {
|
||||
if *(activeOrLatest.Spec.Replicas) == *(deployment.Spec.Replicas) {
|
||||
return nil
|
||||
}
|
||||
_, _, err := dc.scaleReplicaSetAndRecordEvent(activeOrLatest, deployment.Spec.Replicas, deployment)
|
||||
_, _, err := dc.scaleReplicaSetAndRecordEvent(activeOrLatest, *(deployment.Spec.Replicas), deployment)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -427,8 +441,8 @@ func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *
|
||||
allRSsReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
|
||||
|
||||
allowedSize := int32(0)
|
||||
if deployment.Spec.Replicas > 0 {
|
||||
allowedSize = deployment.Spec.Replicas + deploymentutil.MaxSurge(*deployment)
|
||||
if *(deployment.Spec.Replicas) > 0 {
|
||||
allowedSize = *(deployment.Spec.Replicas) + deploymentutil.MaxSurge(*deployment)
|
||||
}
|
||||
|
||||
// Number of additional replicas that can be either added or removed from the total
|
||||
@@ -465,10 +479,10 @@ func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *
|
||||
if deploymentReplicasToAdd != 0 {
|
||||
proportion := deploymentutil.GetProportion(rs, *deployment, deploymentReplicasToAdd, deploymentReplicasAdded)
|
||||
|
||||
nameToSize[rs.Name] = rs.Spec.Replicas + proportion
|
||||
nameToSize[rs.Name] = *(rs.Spec.Replicas) + proportion
|
||||
deploymentReplicasAdded += proportion
|
||||
} else {
|
||||
nameToSize[rs.Name] = rs.Spec.Replicas
|
||||
nameToSize[rs.Name] = *(rs.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -497,11 +511,11 @@ func (dc *DeploymentController) scale(deployment *extensions.Deployment, newRS *
|
||||
|
||||
func (dc *DeploymentController) scaleReplicaSetAndRecordEvent(rs *extensions.ReplicaSet, newScale int32, deployment *extensions.Deployment) (bool, *extensions.ReplicaSet, error) {
|
||||
// No need to scale
|
||||
if rs.Spec.Replicas == newScale {
|
||||
if *(rs.Spec.Replicas) == newScale {
|
||||
return false, rs, nil
|
||||
}
|
||||
var scalingOperation string
|
||||
if rs.Spec.Replicas < newScale {
|
||||
if *(rs.Spec.Replicas) < newScale {
|
||||
scalingOperation = "up"
|
||||
} else {
|
||||
scalingOperation = "down"
|
||||
@@ -517,14 +531,14 @@ func (dc *DeploymentController) scaleReplicaSet(rs *extensions.ReplicaSet, newSc
|
||||
}
|
||||
rsCopy := objCopy.(*extensions.ReplicaSet)
|
||||
|
||||
sizeNeedsUpdate := rsCopy.Spec.Replicas != newScale
|
||||
annotationsNeedUpdate := deploymentutil.SetReplicasAnnotations(rsCopy, deployment.Spec.Replicas, deployment.Spec.Replicas+deploymentutil.MaxSurge(*deployment))
|
||||
sizeNeedsUpdate := *(rsCopy.Spec.Replicas) != newScale
|
||||
annotationsNeedUpdate := deploymentutil.SetReplicasAnnotations(rsCopy, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+deploymentutil.MaxSurge(*deployment))
|
||||
|
||||
if sizeNeedsUpdate || annotationsNeedUpdate {
|
||||
rsCopy.Spec.Replicas = newScale
|
||||
*(rsCopy.Spec.Replicas) = newScale
|
||||
rs, err = dc.client.Extensions().ReplicaSets(rsCopy.Namespace).Update(rsCopy)
|
||||
if err == nil && sizeNeedsUpdate {
|
||||
dc.eventRecorder.Eventf(deployment, api.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
|
||||
dc.eventRecorder.Eventf(deployment, v1.EventTypeNormal, "ScalingReplicaSet", "Scaled %s replica set %s to %d", scalingOperation, rs.Name, newScale)
|
||||
}
|
||||
}
|
||||
return rs, err
|
||||
@@ -549,7 +563,7 @@ func (dc *DeploymentController) cleanupDeployment(oldRSs []*extensions.ReplicaSe
|
||||
for i := int32(0); i < diff; i++ {
|
||||
rs := oldRSs[i]
|
||||
// Avoid delete replica set with non-zero replica counts
|
||||
if rs.Status.Replicas != 0 || rs.Spec.Replicas != 0 || rs.Generation > rs.Status.ObservedGeneration {
|
||||
if rs.Status.Replicas != 0 || *(rs.Spec.Replicas) != 0 || rs.Generation > rs.Status.ObservedGeneration {
|
||||
continue
|
||||
}
|
||||
if err := dc.client.Extensions().ReplicaSets(rs.Namespace).Delete(rs.Name, nil); err != nil && !errors.IsNotFound(err) {
|
||||
@@ -579,11 +593,11 @@ func (dc *DeploymentController) calculateStatus(allRSs []*extensions.ReplicaSet,
|
||||
availableReplicas := deploymentutil.GetAvailableReplicaCountForReplicaSets(allRSs)
|
||||
totalReplicas := deploymentutil.GetReplicaCountForReplicaSets(allRSs)
|
||||
|
||||
if availableReplicas >= deployment.Spec.Replicas-deploymentutil.MaxUnavailable(*deployment) {
|
||||
minAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, api.ConditionTrue, deploymentutil.MinimumReplicasAvailable, "Deployment has minimum availability.")
|
||||
if availableReplicas >= *(deployment.Spec.Replicas)-deploymentutil.MaxUnavailable(*deployment) {
|
||||
minAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionTrue, deploymentutil.MinimumReplicasAvailable, "Deployment has minimum availability.")
|
||||
deploymentutil.SetDeploymentCondition(&deployment.Status, *minAvailability)
|
||||
} else {
|
||||
noMinAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, api.ConditionFalse, deploymentutil.MinimumReplicasUnavailable, "Deployment does not have minimum availability.")
|
||||
noMinAvailability := deploymentutil.NewDeploymentCondition(extensions.DeploymentAvailable, v1.ConditionFalse, deploymentutil.MinimumReplicasUnavailable, "Deployment does not have minimum availability.")
|
||||
deploymentutil.SetDeploymentCondition(&deployment.Status, *noMinAvailability)
|
||||
}
|
||||
|
||||
@@ -611,7 +625,7 @@ func (dc *DeploymentController) isScalingEvent(d *extensions.Deployment) (bool,
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if desired != d.Spec.Replicas {
|
||||
if desired != *(d.Spec.Replicas) {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,8 +21,8 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
testclient "k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@@ -261,7 +261,7 @@ func TestScale(t *testing.T) {
|
||||
}
|
||||
|
||||
if test.newRS != nil {
|
||||
desiredReplicas := test.oldDeployment.Spec.Replicas
|
||||
desiredReplicas := *(test.oldDeployment.Spec.Replicas)
|
||||
if desired, ok := test.desiredReplicasAnnotations[test.newRS.Name]; ok {
|
||||
desiredReplicas = desired
|
||||
}
|
||||
@@ -272,7 +272,7 @@ func TestScale(t *testing.T) {
|
||||
if rs == nil {
|
||||
continue
|
||||
}
|
||||
desiredReplicas := test.oldDeployment.Spec.Replicas
|
||||
desiredReplicas := *(test.oldDeployment.Spec.Replicas)
|
||||
if desired, ok := test.desiredReplicasAnnotations[rs.Name]; ok {
|
||||
desiredReplicas = desired
|
||||
}
|
||||
@@ -289,22 +289,22 @@ func TestScale(t *testing.T) {
|
||||
// no update action for it.
|
||||
nameToSize := make(map[string]int32)
|
||||
if test.newRS != nil {
|
||||
nameToSize[test.newRS.Name] = test.newRS.Spec.Replicas
|
||||
nameToSize[test.newRS.Name] = *(test.newRS.Spec.Replicas)
|
||||
}
|
||||
for i := range test.oldRSs {
|
||||
rs := test.oldRSs[i]
|
||||
nameToSize[rs.Name] = rs.Spec.Replicas
|
||||
nameToSize[rs.Name] = *(rs.Spec.Replicas)
|
||||
}
|
||||
// Get all the UPDATE actions and update nameToSize with all the updated sizes.
|
||||
for _, action := range fake.Actions() {
|
||||
rs := action.(testclient.UpdateAction).GetObject().(*extensions.ReplicaSet)
|
||||
if !test.wasntUpdated[rs.Name] {
|
||||
nameToSize[rs.Name] = rs.Spec.Replicas
|
||||
nameToSize[rs.Name] = *(rs.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
|
||||
if test.expectedNew != nil && test.newRS != nil && test.expectedNew.Spec.Replicas != nameToSize[test.newRS.Name] {
|
||||
t.Errorf("%s: expected new replicas: %d, got: %d", test.name, test.expectedNew.Spec.Replicas, nameToSize[test.newRS.Name])
|
||||
if test.expectedNew != nil && test.newRS != nil && *(test.expectedNew.Spec.Replicas) != nameToSize[test.newRS.Name] {
|
||||
t.Errorf("%s: expected new replicas: %d, got: %d", test.name, *(test.expectedNew.Spec.Replicas), nameToSize[test.newRS.Name])
|
||||
continue
|
||||
}
|
||||
if len(test.expectedOld) != len(test.oldRSs) {
|
||||
@@ -314,8 +314,8 @@ func TestScale(t *testing.T) {
|
||||
for n := range test.oldRSs {
|
||||
rs := test.oldRSs[n]
|
||||
expected := test.expectedOld[n]
|
||||
if expected.Spec.Replicas != nameToSize[rs.Name] {
|
||||
t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, expected.Spec.Replicas, nameToSize[rs.Name])
|
||||
if *(expected.Spec.Replicas) != nameToSize[rs.Name] {
|
||||
t.Errorf("%s: expected old (%s) replicas: %d, got: %d", test.name, rs.Name, *(expected.Spec.Replicas), nameToSize[rs.Name])
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -371,7 +371,7 @@ func TestDeploymentController_cleanupDeployment(t *testing.T) {
|
||||
for i := range tests {
|
||||
test := tests[i]
|
||||
fake := &fake.Clientset{}
|
||||
informers := informers.NewSharedInformerFactory(fake, controller.NoResyncPeriodFunc())
|
||||
informers := informers.NewSharedInformerFactory(fake, nil, controller.NoResyncPeriodFunc())
|
||||
controller := NewDeploymentController(informers.Deployments(), informers.ReplicaSets(), informers.Pods(), fake)
|
||||
|
||||
controller.eventRecorder = &record.FakeRecorder{}
|
||||
|
||||
@@ -29,8 +29,10 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/annotations"
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
internalextensions "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
@@ -108,7 +110,7 @@ const (
|
||||
)
|
||||
|
||||
// NewDeploymentCondition creates a new deployment condition.
|
||||
func NewDeploymentCondition(condType extensions.DeploymentConditionType, status api.ConditionStatus, reason, message string) *extensions.DeploymentCondition {
|
||||
func NewDeploymentCondition(condType extensions.DeploymentConditionType, status v1.ConditionStatus, reason, message string) *extensions.DeploymentCondition {
|
||||
return &extensions.DeploymentCondition{
|
||||
Type: condType,
|
||||
Status: status,
|
||||
@@ -266,7 +268,7 @@ func SetNewReplicaSetAnnotations(deployment *extensions.Deployment, newRS *exten
|
||||
}
|
||||
}
|
||||
// If the new replica set is about to be created, we need to add replica annotations to it.
|
||||
if !exists && SetReplicasAnnotations(newRS, deployment.Spec.Replicas, deployment.Spec.Replicas+MaxSurge(*deployment)) {
|
||||
if !exists && SetReplicasAnnotations(newRS, *(deployment.Spec.Replicas), *(deployment.Spec.Replicas)+MaxSurge(*deployment)) {
|
||||
annotationChanged = true
|
||||
}
|
||||
return annotationChanged
|
||||
@@ -404,7 +406,7 @@ func MaxUnavailable(deployment extensions.Deployment) int32 {
|
||||
return int32(0)
|
||||
}
|
||||
// Error caught by validation
|
||||
_, maxUnavailable, _ := ResolveFenceposts(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, &deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas)
|
||||
_, maxUnavailable, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas))
|
||||
return maxUnavailable
|
||||
}
|
||||
|
||||
@@ -413,7 +415,7 @@ func MinAvailable(deployment *extensions.Deployment) int32 {
|
||||
if !IsRollingUpdate(deployment) {
|
||||
return int32(0)
|
||||
}
|
||||
return deployment.Spec.Replicas - MaxUnavailable(*deployment)
|
||||
return *(deployment.Spec.Replicas) - MaxUnavailable(*deployment)
|
||||
}
|
||||
|
||||
// MaxSurge returns the maximum surge pods a rolling deployment can take.
|
||||
@@ -422,7 +424,7 @@ func MaxSurge(deployment extensions.Deployment) int32 {
|
||||
return int32(0)
|
||||
}
|
||||
// Error caught by validation
|
||||
maxSurge, _, _ := ResolveFenceposts(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, &deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, deployment.Spec.Replicas)
|
||||
maxSurge, _, _ := ResolveFenceposts(deployment.Spec.Strategy.RollingUpdate.MaxSurge, deployment.Spec.Strategy.RollingUpdate.MaxUnavailable, *(deployment.Spec.Replicas))
|
||||
return maxSurge
|
||||
}
|
||||
|
||||
@@ -430,7 +432,7 @@ func MaxSurge(deployment extensions.Deployment) int32 {
|
||||
// of the parent deployment, 2. the replica count that needs be added on the replica sets of the
|
||||
// deployment, and 3. the total replicas added in the replica sets of the deployment so far.
|
||||
func GetProportion(rs *extensions.ReplicaSet, d extensions.Deployment, deploymentReplicasToAdd, deploymentReplicasAdded int32) int32 {
|
||||
if rs == nil || rs.Spec.Replicas == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded {
|
||||
if rs == nil || *(rs.Spec.Replicas) == 0 || deploymentReplicasToAdd == 0 || deploymentReplicasToAdd == deploymentReplicasAdded {
|
||||
return int32(0)
|
||||
}
|
||||
|
||||
@@ -453,11 +455,11 @@ func GetProportion(rs *extensions.ReplicaSet, d extensions.Deployment, deploymen
|
||||
// 1. a scaling event during a rollout or 2. when scaling a paused deployment.
|
||||
func getReplicaSetFraction(rs extensions.ReplicaSet, d extensions.Deployment) int32 {
|
||||
// If we are scaling down to zero then the fraction of this replica set is its whole size (negative)
|
||||
if d.Spec.Replicas == int32(0) {
|
||||
return -rs.Spec.Replicas
|
||||
if *(d.Spec.Replicas) == int32(0) {
|
||||
return -*(rs.Spec.Replicas)
|
||||
}
|
||||
|
||||
deploymentReplicas := d.Spec.Replicas + MaxSurge(d)
|
||||
deploymentReplicas := *(d.Spec.Replicas) + MaxSurge(d)
|
||||
annotatedReplicas, ok := getMaxReplicasAnnotation(&rs)
|
||||
if !ok {
|
||||
// If we cannot find the annotation then fallback to the current deployment size. Note that this
|
||||
@@ -469,8 +471,8 @@ func getReplicaSetFraction(rs extensions.ReplicaSet, d extensions.Deployment) in
|
||||
|
||||
// We should never proportionally scale up from zero which means rs.spec.replicas and annotatedReplicas
|
||||
// will never be zero here.
|
||||
newRSsize := (float64(rs.Spec.Replicas * deploymentReplicas)) / float64(annotatedReplicas)
|
||||
return integer.RoundToInt32(newRSsize) - rs.Spec.Replicas
|
||||
newRSsize := (float64(*(rs.Spec.Replicas) * deploymentReplicas)) / float64(annotatedReplicas)
|
||||
return integer.RoundToInt32(newRSsize) - *(rs.Spec.Replicas)
|
||||
}
|
||||
|
||||
// GetAllReplicaSets returns the old and new replica sets targeted by the given Deployment. It gets PodList and ReplicaSetList from client interface.
|
||||
@@ -523,7 +525,7 @@ func GetNewReplicaSet(deployment *extensions.Deployment, c clientset.Interface)
|
||||
// listReplicaSets lists all RSes the given deployment targets with the given client interface.
|
||||
func listReplicaSets(deployment *extensions.Deployment, c clientset.Interface) ([]*extensions.ReplicaSet, error) {
|
||||
return ListReplicaSets(deployment,
|
||||
func(namespace string, options api.ListOptions) ([]*extensions.ReplicaSet, error) {
|
||||
func(namespace string, options v1.ListOptions) ([]*extensions.ReplicaSet, error) {
|
||||
rsList, err := c.Extensions().ReplicaSets(namespace).List(options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -537,16 +539,16 @@ func listReplicaSets(deployment *extensions.Deployment, c clientset.Interface) (
|
||||
}
|
||||
|
||||
// listReplicaSets lists all Pods the given deployment targets with the given client interface.
|
||||
func listPods(deployment *extensions.Deployment, c clientset.Interface) (*api.PodList, error) {
|
||||
func listPods(deployment *extensions.Deployment, c clientset.Interface) (*v1.PodList, error) {
|
||||
return ListPods(deployment,
|
||||
func(namespace string, options api.ListOptions) (*api.PodList, error) {
|
||||
func(namespace string, options v1.ListOptions) (*v1.PodList, error) {
|
||||
return c.Core().Pods(namespace).List(options)
|
||||
})
|
||||
}
|
||||
|
||||
// TODO: switch this to full namespacers
|
||||
type rsListFunc func(string, api.ListOptions) ([]*extensions.ReplicaSet, error)
|
||||
type podListFunc func(string, api.ListOptions) (*api.PodList, error)
|
||||
type rsListFunc func(string, v1.ListOptions) ([]*extensions.ReplicaSet, error)
|
||||
type podListFunc func(string, v1.ListOptions) (*v1.PodList, error)
|
||||
|
||||
// ListReplicaSets returns a slice of RSes the given deployment targets.
|
||||
func ListReplicaSets(deployment *extensions.Deployment, getRSList rsListFunc) ([]*extensions.ReplicaSet, error) {
|
||||
@@ -558,18 +560,18 @@ func ListReplicaSets(deployment *extensions.Deployment, getRSList rsListFunc) ([
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options := api.ListOptions{LabelSelector: selector}
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
return getRSList(namespace, options)
|
||||
}
|
||||
|
||||
// ListPods returns a list of pods the given deployment targets.
|
||||
func ListPods(deployment *extensions.Deployment, getPodList podListFunc) (*api.PodList, error) {
|
||||
func ListPods(deployment *extensions.Deployment, getPodList podListFunc) (*v1.PodList, error) {
|
||||
namespace := deployment.Namespace
|
||||
selector, err := unversioned.LabelSelectorAsSelector(deployment.Spec.Selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options := api.ListOptions{LabelSelector: selector}
|
||||
options := v1.ListOptions{LabelSelector: selector.String()}
|
||||
return getPodList(namespace, options)
|
||||
}
|
||||
|
||||
@@ -577,7 +579,7 @@ func ListPods(deployment *extensions.Deployment, getPodList podListFunc) (*api.P
|
||||
// We ignore pod-template-hash because the hash result would be different upon podTemplateSpec API changes
|
||||
// (e.g. the addition of a new field will cause the hash code to change)
|
||||
// Note that we assume input podTemplateSpecs contain non-empty labels
|
||||
func equalIgnoreHash(template1, template2 api.PodTemplateSpec) (bool, error) {
|
||||
func equalIgnoreHash(template1, template2 v1.PodTemplateSpec) (bool, error) {
|
||||
// First, compare template.Labels (ignoring hash)
|
||||
labels1, labels2 := template1.Labels, template2.Labels
|
||||
// The podTemplateSpec must have a non-empty label so that label selectors can find them.
|
||||
@@ -597,7 +599,7 @@ func equalIgnoreHash(template1, template2 api.PodTemplateSpec) (bool, error) {
|
||||
|
||||
// Then, compare the templates without comparing their labels
|
||||
template1.Labels, template2.Labels = nil, nil
|
||||
result := api.Semantic.DeepEqual(template1, template2)
|
||||
result := v1.Semantic.DeepEqual(template1, template2)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
@@ -620,7 +622,7 @@ func FindNewReplicaSet(deployment *extensions.Deployment, rsList []*extensions.R
|
||||
|
||||
// FindOldReplicaSets returns the old replica sets targeted by the given Deployment, with the given PodList and slice of RSes.
|
||||
// Note that the first set of old replica sets doesn't include the ones with no pods, and the second set of old replica sets include all old replica sets.
|
||||
func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, podList *api.PodList) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) {
|
||||
func FindOldReplicaSets(deployment *extensions.Deployment, rsList []*extensions.ReplicaSet, podList *v1.PodList) ([]*extensions.ReplicaSet, []*extensions.ReplicaSet, error) {
|
||||
// Find all pods whose labels match deployment.Spec.Selector, and corresponding replica sets for pods in podList.
|
||||
// All pods and replica sets are labeled with pod-template-hash to prevent overlapping
|
||||
oldRSs := map[string]*extensions.ReplicaSet{}
|
||||
@@ -679,19 +681,19 @@ func WaitForPodsHashPopulated(c clientset.Interface, desiredGeneration int64, na
|
||||
return false, err
|
||||
}
|
||||
return rs.Status.ObservedGeneration >= desiredGeneration &&
|
||||
rs.Status.FullyLabeledReplicas == rs.Spec.Replicas, nil
|
||||
rs.Status.FullyLabeledReplicas == *(rs.Spec.Replicas), nil
|
||||
})
|
||||
}
|
||||
|
||||
// LabelPodsWithHash labels all pods in the given podList with the new hash label.
|
||||
// The returned bool value can be used to tell if all pods are actually labeled.
|
||||
func LabelPodsWithHash(podList *api.PodList, rs *extensions.ReplicaSet, c clientset.Interface, namespace, hash string) (bool, error) {
|
||||
func LabelPodsWithHash(podList *v1.PodList, rs *extensions.ReplicaSet, c clientset.Interface, namespace, hash string) (bool, error) {
|
||||
allPodsLabeled := true
|
||||
for _, pod := range podList.Items {
|
||||
// Only label the pod that doesn't already have the new hash
|
||||
if pod.Labels[extensions.DefaultDeploymentUniqueLabelKey] != hash {
|
||||
if _, podUpdated, err := podutil.UpdatePodWithRetries(c.Core().Pods(namespace), &pod,
|
||||
func(podToUpdate *api.Pod) error {
|
||||
func(podToUpdate *v1.Pod) error {
|
||||
// Precondition: the pod doesn't contain the new hash in its label.
|
||||
if podToUpdate.Labels[extensions.DefaultDeploymentUniqueLabelKey] == hash {
|
||||
return errors.ErrPreconditionViolated
|
||||
@@ -713,9 +715,9 @@ func LabelPodsWithHash(podList *api.PodList, rs *extensions.ReplicaSet, c client
|
||||
}
|
||||
|
||||
// GetNewReplicaSetTemplate returns the desired PodTemplateSpec for the new ReplicaSet corresponding to the given ReplicaSet.
|
||||
func GetNewReplicaSetTemplate(deployment *extensions.Deployment) api.PodTemplateSpec {
|
||||
func GetNewReplicaSetTemplate(deployment *extensions.Deployment) v1.PodTemplateSpec {
|
||||
// newRS will have the same template as in deployment spec, plus a unique label in some cases.
|
||||
newRSTemplate := api.PodTemplateSpec{
|
||||
newRSTemplate := v1.PodTemplateSpec{
|
||||
ObjectMeta: deployment.Spec.Template.ObjectMeta,
|
||||
Spec: deployment.Spec.Template.Spec,
|
||||
}
|
||||
@@ -726,8 +728,23 @@ func GetNewReplicaSetTemplate(deployment *extensions.Deployment) api.PodTemplate
|
||||
return newRSTemplate
|
||||
}
|
||||
|
||||
// TODO: remove the duplicate
|
||||
// GetNewInternalReplicaSetTemplate returns the desired PodTemplateSpec for the new ReplicaSet corresponding to the given ReplicaSet.
|
||||
func GetNewInternalReplicaSetTemplate(deployment *internalextensions.Deployment) api.PodTemplateSpec {
|
||||
// newRS will have the same template as in deployment spec, plus a unique label in some cases.
|
||||
newRSTemplate := api.PodTemplateSpec{
|
||||
ObjectMeta: deployment.Spec.Template.ObjectMeta,
|
||||
Spec: deployment.Spec.Template.Spec,
|
||||
}
|
||||
newRSTemplate.ObjectMeta.Labels = labelsutil.CloneAndAddLabel(
|
||||
deployment.Spec.Template.ObjectMeta.Labels,
|
||||
internalextensions.DefaultDeploymentUniqueLabelKey,
|
||||
podutil.GetInternalPodTemplateSpecHash(newRSTemplate))
|
||||
return newRSTemplate
|
||||
}
|
||||
|
||||
// SetFromReplicaSetTemplate sets the desired PodTemplateSpec from a replica set template to the given deployment.
|
||||
func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template api.PodTemplateSpec) *extensions.Deployment {
|
||||
func SetFromReplicaSetTemplate(deployment *extensions.Deployment, template v1.PodTemplateSpec) *extensions.Deployment {
|
||||
deployment.Spec.Template.ObjectMeta = template.ObjectMeta
|
||||
deployment.Spec.Template.Spec = template.Spec
|
||||
deployment.Spec.Template.ObjectMeta.Labels = labelsutil.CloneAndRemoveLabel(
|
||||
@@ -741,7 +758,7 @@ func GetReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet) int32 {
|
||||
totalReplicas := int32(0)
|
||||
for _, rs := range replicaSets {
|
||||
if rs != nil {
|
||||
totalReplicas += rs.Spec.Replicas
|
||||
totalReplicas += *(rs.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
return totalReplicas
|
||||
@@ -772,7 +789,7 @@ func GetAvailableReplicaCountForReplicaSets(replicaSets []*extensions.ReplicaSet
|
||||
// IsPodAvailable return true if the pod is available.
|
||||
// TODO: Remove this once we start using replica set status for calculating available pods
|
||||
// for a deployment.
|
||||
func IsPodAvailable(pod *api.Pod, minReadySeconds int32, now time.Time) bool {
|
||||
func IsPodAvailable(pod *v1.Pod, minReadySeconds int32, now time.Time) bool {
|
||||
if !controller.IsPodActive(pod) {
|
||||
return false
|
||||
}
|
||||
@@ -780,7 +797,7 @@ func IsPodAvailable(pod *api.Pod, minReadySeconds int32, now time.Time) bool {
|
||||
// If so, this pod is ready
|
||||
for _, c := range pod.Status.Conditions {
|
||||
// we only care about pod ready conditions
|
||||
if c.Type == api.PodReady && c.Status == api.ConditionTrue {
|
||||
if c.Type == v1.PodReady && c.Status == v1.ConditionTrue {
|
||||
glog.V(4).Infof("Comparing pod %s/%s ready condition last transition time %s + minReadySeconds %d with now %s.", pod.Namespace, pod.Name, c.LastTransitionTime.String(), minReadySeconds, now.String())
|
||||
// 2 cases that this ready condition is valid (passed minReadySeconds, i.e. the pod is available):
|
||||
// 1. minReadySeconds == 0, or
|
||||
@@ -802,8 +819,8 @@ func IsRollingUpdate(deployment *extensions.Deployment) bool {
|
||||
// DeploymentComplete considers a deployment to be complete once its desired replicas equals its
|
||||
// updatedReplicas and it doesn't violate minimum availability.
|
||||
func DeploymentComplete(deployment *extensions.Deployment, newStatus *extensions.DeploymentStatus) bool {
|
||||
return newStatus.UpdatedReplicas == deployment.Spec.Replicas &&
|
||||
newStatus.AvailableReplicas >= deployment.Spec.Replicas-MaxUnavailable(*deployment)
|
||||
return newStatus.UpdatedReplicas == *(deployment.Spec.Replicas) &&
|
||||
newStatus.AvailableReplicas >= *(deployment.Spec.Replicas)-MaxUnavailable(*deployment)
|
||||
}
|
||||
|
||||
// DeploymentProgressing reports progress for a deployment. Progress is estimated by comparing the
|
||||
@@ -857,24 +874,24 @@ func NewRSNewReplicas(deployment *extensions.Deployment, allRSs []*extensions.Re
|
||||
switch deployment.Spec.Strategy.Type {
|
||||
case extensions.RollingUpdateDeploymentStrategyType:
|
||||
// Check if we can scale up.
|
||||
maxSurge, err := intstrutil.GetValueFromIntOrPercent(&deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(deployment.Spec.Replicas), true)
|
||||
maxSurge, err := intstrutil.GetValueFromIntOrPercent(deployment.Spec.Strategy.RollingUpdate.MaxSurge, int(*(deployment.Spec.Replicas)), true)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
// Find the total number of pods
|
||||
currentPodCount := GetReplicaCountForReplicaSets(allRSs)
|
||||
maxTotalPods := deployment.Spec.Replicas + int32(maxSurge)
|
||||
maxTotalPods := *(deployment.Spec.Replicas) + int32(maxSurge)
|
||||
if currentPodCount >= maxTotalPods {
|
||||
// Cannot scale up.
|
||||
return newRS.Spec.Replicas, nil
|
||||
return *(newRS.Spec.Replicas), nil
|
||||
}
|
||||
// Scale up.
|
||||
scaleUpCount := maxTotalPods - currentPodCount
|
||||
// Do not exceed the number of desired replicas.
|
||||
scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(deployment.Spec.Replicas-newRS.Spec.Replicas)))
|
||||
return newRS.Spec.Replicas + scaleUpCount, nil
|
||||
scaleUpCount = int32(integer.IntMin(int(scaleUpCount), int(*(deployment.Spec.Replicas)-*(newRS.Spec.Replicas))))
|
||||
return *(newRS.Spec.Replicas) + scaleUpCount, nil
|
||||
case extensions.RecreateDeploymentStrategyType:
|
||||
return deployment.Spec.Replicas, nil
|
||||
return *(deployment.Spec.Replicas), nil
|
||||
default:
|
||||
return 0, fmt.Errorf("deployment type %v isn't supported", deployment.Spec.Strategy.Type)
|
||||
}
|
||||
@@ -892,7 +909,7 @@ func IsSaturated(deployment *extensions.Deployment, rs *extensions.ReplicaSet) b
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return rs.Spec.Replicas == deployment.Spec.Replicas && int32(desired) == deployment.Spec.Replicas
|
||||
return *(rs.Spec.Replicas) == *(deployment.Spec.Replicas) && int32(desired) == *(deployment.Spec.Replicas)
|
||||
}
|
||||
|
||||
// WaitForObservedDeployment polls for deployment to be updated so that deployment.Status.ObservedGeneration >= desiredGeneration.
|
||||
|
||||
@@ -26,8 +26,9 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
@@ -74,22 +75,22 @@ func addUpdateRSReactor(fakeClient *fake.Clientset) *fake.Clientset {
|
||||
|
||||
func addUpdatePodsReactor(fakeClient *fake.Clientset) *fake.Clientset {
|
||||
fakeClient.AddReactor("update", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := action.(core.UpdateAction).GetObject().(*api.Pod)
|
||||
obj := action.(core.UpdateAction).GetObject().(*v1.Pod)
|
||||
return true, obj, nil
|
||||
})
|
||||
return fakeClient
|
||||
}
|
||||
|
||||
func newPod(now time.Time, ready bool, beforeSec int) api.Pod {
|
||||
conditionStatus := api.ConditionFalse
|
||||
func newPod(now time.Time, ready bool, beforeSec int) v1.Pod {
|
||||
conditionStatus := v1.ConditionFalse
|
||||
if ready {
|
||||
conditionStatus = api.ConditionTrue
|
||||
conditionStatus = v1.ConditionTrue
|
||||
}
|
||||
return api.Pod{
|
||||
Status: api.PodStatus{
|
||||
Conditions: []api.PodCondition{
|
||||
return v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: api.PodReady,
|
||||
Type: v1.PodReady,
|
||||
LastTransitionTime: unversioned.NewTime(now.Add(-1 * time.Duration(beforeSec) * time.Second)),
|
||||
Status: conditionStatus,
|
||||
},
|
||||
@@ -99,27 +100,27 @@ func newPod(now time.Time, ready bool, beforeSec int) api.Pod {
|
||||
}
|
||||
|
||||
// generatePodFromRS creates a pod, with the input ReplicaSet's selector and its template
|
||||
func generatePodFromRS(rs extensions.ReplicaSet) api.Pod {
|
||||
return api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func generatePodFromRS(rs extensions.ReplicaSet) v1.Pod {
|
||||
return v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: rs.Labels,
|
||||
},
|
||||
Spec: rs.Spec.Template.Spec,
|
||||
}
|
||||
}
|
||||
|
||||
func generatePod(labels map[string]string, image string) api.Pod {
|
||||
return api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func generatePod(labels map[string]string, image string) v1.Pod {
|
||||
return v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: image,
|
||||
Image: image,
|
||||
ImagePullPolicy: api.PullAlways,
|
||||
TerminationMessagePath: api.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: v1.PullAlways,
|
||||
TerminationMessagePath: v1.TerminationMessagePathDefault,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -128,24 +129,24 @@ func generatePod(labels map[string]string, image string) api.Pod {
|
||||
|
||||
func generateRSWithLabel(labels map[string]string, image string) extensions.ReplicaSet {
|
||||
return extensions.ReplicaSet{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: api.SimpleNameGenerator.GenerateName("replicaset"),
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: v1.SimpleNameGenerator.GenerateName("replicaset"),
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: 1,
|
||||
Replicas: func(i int32) *int32 { return &i }(1),
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: labels},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: image,
|
||||
Image: image,
|
||||
ImagePullPolicy: api.PullAlways,
|
||||
TerminationMessagePath: api.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: v1.PullAlways,
|
||||
TerminationMessagePath: v1.TerminationMessagePathDefault,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -158,11 +159,12 @@ func generateRSWithLabel(labels map[string]string, image string) extensions.Repl
|
||||
func generateRS(deployment extensions.Deployment) extensions.ReplicaSet {
|
||||
template := GetNewReplicaSetTemplate(&deployment)
|
||||
return extensions.ReplicaSet{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Name: api.SimpleNameGenerator.GenerateName("replicaset"),
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: v1.SimpleNameGenerator.GenerateName("replicaset"),
|
||||
Labels: template.Labels,
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: func() *int32 { i := int32(0); return &i }(),
|
||||
Template: template,
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: template.Labels},
|
||||
},
|
||||
@@ -174,29 +176,29 @@ func generateDeployment(image string) extensions.Deployment {
|
||||
podLabels := map[string]string{"name": image}
|
||||
terminationSec := int64(30)
|
||||
return extensions.Deployment{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: image,
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: 1,
|
||||
Replicas: func(i int32) *int32 { return &i }(1),
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: podLabels},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: image,
|
||||
Image: image,
|
||||
ImagePullPolicy: api.PullAlways,
|
||||
TerminationMessagePath: api.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: v1.PullAlways,
|
||||
TerminationMessagePath: v1.TerminationMessagePathDefault,
|
||||
},
|
||||
},
|
||||
DNSPolicy: api.DNSClusterFirst,
|
||||
DNSPolicy: v1.DNSClusterFirst,
|
||||
TerminationGracePeriodSeconds: &terminationSec,
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
SecurityContext: &api.PodSecurityContext{},
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
SecurityContext: &v1.PodSecurityContext{},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -215,7 +217,7 @@ func TestGetNewRC(t *testing.T) {
|
||||
{
|
||||
"No new ReplicaSet",
|
||||
[]runtime.Object{
|
||||
&api.PodList{},
|
||||
&v1.PodList{},
|
||||
&extensions.ReplicaSetList{
|
||||
Items: []extensions.ReplicaSet{
|
||||
generateRS(generateDeployment("foo")),
|
||||
@@ -228,7 +230,7 @@ func TestGetNewRC(t *testing.T) {
|
||||
{
|
||||
"Has new ReplicaSet",
|
||||
[]runtime.Object{
|
||||
&api.PodList{},
|
||||
&v1.PodList{},
|
||||
&extensions.ReplicaSetList{
|
||||
Items: []extensions.ReplicaSet{
|
||||
generateRS(generateDeployment("foo")),
|
||||
@@ -253,7 +255,7 @@ func TestGetNewRC(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("In test case %s, got unexpected error %v", test.test, err)
|
||||
}
|
||||
if !api.Semantic.DeepEqual(rs, test.expected) {
|
||||
if !v1.Semantic.DeepEqual(rs, test.expected) {
|
||||
t.Errorf("In test case %s, expected %#v, got %#v", test.test, test.expected, rs)
|
||||
}
|
||||
}
|
||||
@@ -262,25 +264,25 @@ func TestGetNewRC(t *testing.T) {
|
||||
func TestGetOldRCs(t *testing.T) {
|
||||
newDeployment := generateDeployment("nginx")
|
||||
newRS := generateRS(newDeployment)
|
||||
newRS.Status.FullyLabeledReplicas = newRS.Spec.Replicas
|
||||
newRS.Status.FullyLabeledReplicas = *(newRS.Spec.Replicas)
|
||||
newPod := generatePodFromRS(newRS)
|
||||
|
||||
// create 2 old deployments and related replica sets/pods, with the same labels but different template
|
||||
oldDeployment := generateDeployment("nginx")
|
||||
oldDeployment.Spec.Template.Spec.Containers[0].Name = "nginx-old-1"
|
||||
oldRS := generateRS(oldDeployment)
|
||||
oldRS.Status.FullyLabeledReplicas = oldRS.Spec.Replicas
|
||||
oldRS.Status.FullyLabeledReplicas = *(oldRS.Spec.Replicas)
|
||||
oldPod := generatePodFromRS(oldRS)
|
||||
oldDeployment2 := generateDeployment("nginx")
|
||||
oldDeployment2.Spec.Template.Spec.Containers[0].Name = "nginx-old-2"
|
||||
oldRS2 := generateRS(oldDeployment2)
|
||||
oldRS2.Status.FullyLabeledReplicas = oldRS2.Spec.Replicas
|
||||
oldRS2.Status.FullyLabeledReplicas = *(oldRS2.Spec.Replicas)
|
||||
oldPod2 := generatePodFromRS(oldRS2)
|
||||
|
||||
// create 1 ReplicaSet that existed before the deployment, with the same labels as the deployment
|
||||
existedPod := generatePod(newDeployment.Spec.Template.Labels, "foo")
|
||||
existedRS := generateRSWithLabel(newDeployment.Spec.Template.Labels, "foo")
|
||||
existedRS.Status.FullyLabeledReplicas = existedRS.Spec.Replicas
|
||||
existedRS.Status.FullyLabeledReplicas = *(existedRS.Spec.Replicas)
|
||||
|
||||
tests := []struct {
|
||||
test string
|
||||
@@ -290,8 +292,8 @@ func TestGetOldRCs(t *testing.T) {
|
||||
{
|
||||
"No old ReplicaSets",
|
||||
[]runtime.Object{
|
||||
&api.PodList{
|
||||
Items: []api.Pod{
|
||||
&v1.PodList{
|
||||
Items: []v1.Pod{
|
||||
generatePod(newDeployment.Spec.Template.Labels, "foo"),
|
||||
generatePod(newDeployment.Spec.Template.Labels, "bar"),
|
||||
newPod,
|
||||
@@ -310,8 +312,8 @@ func TestGetOldRCs(t *testing.T) {
|
||||
{
|
||||
"Has old ReplicaSet",
|
||||
[]runtime.Object{
|
||||
&api.PodList{
|
||||
Items: []api.Pod{
|
||||
&v1.PodList{
|
||||
Items: []v1.Pod{
|
||||
oldPod,
|
||||
oldPod2,
|
||||
generatePod(map[string]string{"name": "bar"}, "bar"),
|
||||
@@ -359,14 +361,14 @@ func TestGetOldRCs(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func generatePodTemplateSpec(name, nodeName string, annotations, labels map[string]string) api.PodTemplateSpec {
|
||||
return api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func generatePodTemplateSpec(name, nodeName string, annotations, labels map[string]string) v1.PodTemplateSpec {
|
||||
return v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Annotations: annotations,
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: nodeName,
|
||||
},
|
||||
}
|
||||
@@ -375,7 +377,7 @@ func generatePodTemplateSpec(name, nodeName string, annotations, labels map[stri
|
||||
func TestEqualIgnoreHash(t *testing.T) {
|
||||
tests := []struct {
|
||||
test string
|
||||
former, latter api.PodTemplateSpec
|
||||
former, latter v1.PodTemplateSpec
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
@@ -429,7 +431,7 @@ func TestEqualIgnoreHash(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
runTest := func(t1, t2 api.PodTemplateSpec, reversed bool) {
|
||||
runTest := func(t1, t2 v1.PodTemplateSpec, reversed bool) {
|
||||
// Set up
|
||||
t1Copy, err := api.Scheme.DeepCopy(t1)
|
||||
if err != nil {
|
||||
@@ -472,7 +474,7 @@ func TestFindNewReplicaSet(t *testing.T) {
|
||||
oldDeployment := generateDeployment("nginx")
|
||||
oldDeployment.Spec.Template.Spec.Containers[0].Name = "nginx-old-1"
|
||||
oldRS := generateRS(oldDeployment)
|
||||
oldRS.Status.FullyLabeledReplicas = oldRS.Spec.Replicas
|
||||
oldRS.Status.FullyLabeledReplicas = *(oldRS.Spec.Replicas)
|
||||
|
||||
tests := []struct {
|
||||
test string
|
||||
@@ -508,7 +510,7 @@ func TestFindOldReplicaSets(t *testing.T) {
|
||||
oldDeployment := generateDeployment("nginx")
|
||||
oldDeployment.Spec.Template.Spec.Containers[0].Name = "nginx-old-1"
|
||||
oldRS := generateRS(oldDeployment)
|
||||
oldRS.Status.FullyLabeledReplicas = oldRS.Spec.Replicas
|
||||
oldRS.Status.FullyLabeledReplicas = *(oldRS.Spec.Replicas)
|
||||
newPod := generatePodFromRS(newRS)
|
||||
oldPod := generatePodFromRS(oldRS)
|
||||
|
||||
@@ -516,15 +518,15 @@ func TestFindOldReplicaSets(t *testing.T) {
|
||||
test string
|
||||
deployment extensions.Deployment
|
||||
rsList []*extensions.ReplicaSet
|
||||
podList *api.PodList
|
||||
podList *v1.PodList
|
||||
expected []*extensions.ReplicaSet
|
||||
}{
|
||||
{
|
||||
test: "Get old ReplicaSets",
|
||||
deployment: deployment,
|
||||
rsList: []*extensions.ReplicaSet{&newRS, &oldRS},
|
||||
podList: &api.PodList{
|
||||
Items: []api.Pod{
|
||||
podList: &v1.PodList{
|
||||
Items: []v1.Pod{
|
||||
newPod,
|
||||
oldPod,
|
||||
},
|
||||
@@ -535,8 +537,8 @@ func TestFindOldReplicaSets(t *testing.T) {
|
||||
test: "Get old ReplicaSets with no new ReplicaSet",
|
||||
deployment: deployment,
|
||||
rsList: []*extensions.ReplicaSet{&oldRS},
|
||||
podList: &api.PodList{
|
||||
Items: []api.Pod{
|
||||
podList: &v1.PodList{
|
||||
Items: []v1.Pod{
|
||||
oldPod,
|
||||
},
|
||||
},
|
||||
@@ -546,8 +548,8 @@ func TestFindOldReplicaSets(t *testing.T) {
|
||||
test: "Get empty old ReplicaSets",
|
||||
deployment: deployment,
|
||||
rsList: []*extensions.ReplicaSet{&newRS},
|
||||
podList: &api.PodList{
|
||||
Items: []api.Pod{
|
||||
podList: &v1.PodList{
|
||||
Items: []v1.Pod{
|
||||
newPod,
|
||||
},
|
||||
},
|
||||
@@ -584,10 +586,10 @@ func equal(rss1, rss2 []*extensions.ReplicaSet) bool {
|
||||
|
||||
func TestGetReplicaCountForReplicaSets(t *testing.T) {
|
||||
rs1 := generateRS(generateDeployment("foo"))
|
||||
rs1.Spec.Replicas = 1
|
||||
*(rs1.Spec.Replicas) = 1
|
||||
rs1.Status.Replicas = 2
|
||||
rs2 := generateRS(generateDeployment("bar"))
|
||||
rs2.Spec.Replicas = 2
|
||||
*(rs2.Spec.Replicas) = 2
|
||||
rs2.Status.Replicas = 3
|
||||
|
||||
tests := []struct {
|
||||
@@ -715,16 +717,16 @@ func TestNewRSNewReplicas(t *testing.T) {
|
||||
newDeployment := generateDeployment("nginx")
|
||||
newRC := generateRS(newDeployment)
|
||||
rs5 := generateRS(newDeployment)
|
||||
rs5.Spec.Replicas = 5
|
||||
*(rs5.Spec.Replicas) = 5
|
||||
|
||||
for _, test := range tests {
|
||||
newDeployment.Spec.Replicas = test.depReplicas
|
||||
*(newDeployment.Spec.Replicas) = test.depReplicas
|
||||
newDeployment.Spec.Strategy = extensions.DeploymentStrategy{Type: test.strategyType}
|
||||
newDeployment.Spec.Strategy.RollingUpdate = &extensions.RollingUpdateDeployment{
|
||||
MaxUnavailable: intstr.FromInt(1),
|
||||
MaxSurge: intstr.FromInt(test.maxSurge),
|
||||
MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(1),
|
||||
MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(test.maxSurge),
|
||||
}
|
||||
newRC.Spec.Replicas = test.newRSReplicas
|
||||
*(newRC.Spec.Replicas) = test.newRSReplicas
|
||||
rs, err := NewRSNewReplicas(&newDeployment, []*extensions.ReplicaSet{&rs5}, &newRC)
|
||||
if err != nil {
|
||||
t.Errorf("In test case %s, got unexpected error %v", test.test, err)
|
||||
@@ -739,7 +741,7 @@ var (
|
||||
condProgressing = func() extensions.DeploymentCondition {
|
||||
return extensions.DeploymentCondition{
|
||||
Type: extensions.DeploymentProgressing,
|
||||
Status: api.ConditionFalse,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "ForSomeReason",
|
||||
}
|
||||
}
|
||||
@@ -747,7 +749,7 @@ var (
|
||||
condProgressing2 = func() extensions.DeploymentCondition {
|
||||
return extensions.DeploymentCondition{
|
||||
Type: extensions.DeploymentProgressing,
|
||||
Status: api.ConditionTrue,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "BecauseItIs",
|
||||
}
|
||||
}
|
||||
@@ -755,7 +757,7 @@ var (
|
||||
condAvailable = func() extensions.DeploymentCondition {
|
||||
return extensions.DeploymentCondition{
|
||||
Type: extensions.DeploymentAvailable,
|
||||
Status: api.ConditionTrue,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "AwesomeController",
|
||||
}
|
||||
}
|
||||
@@ -775,7 +777,7 @@ func TestGetCondition(t *testing.T) {
|
||||
|
||||
status extensions.DeploymentStatus
|
||||
condType extensions.DeploymentConditionType
|
||||
condStatus api.ConditionStatus
|
||||
condStatus v1.ConditionStatus
|
||||
condReason string
|
||||
|
||||
expected bool
|
||||
@@ -897,10 +899,11 @@ func TestDeploymentComplete(t *testing.T) {
|
||||
deployment := func(desired, current, updated, available, maxUnavailable int32) *extensions.Deployment {
|
||||
return &extensions.Deployment{
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: desired,
|
||||
Replicas: &desired,
|
||||
Strategy: extensions.DeploymentStrategy{
|
||||
RollingUpdate: &extensions.RollingUpdateDeployment{
|
||||
MaxUnavailable: intstr.FromInt(int(maxUnavailable)),
|
||||
MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxUnavailable)),
|
||||
MaxSurge: func() *intstr.IntOrString { x := intstr.FromInt(0); return &x }(),
|
||||
},
|
||||
Type: extensions.RollingUpdateDeploymentStrategyType,
|
||||
},
|
||||
@@ -1047,7 +1050,7 @@ func TestDeploymentTimedOut(t *testing.T) {
|
||||
timeFn := func(min, sec int) time.Time {
|
||||
return time.Date(2016, 1, 1, 0, min, sec, 0, time.UTC)
|
||||
}
|
||||
deployment := func(condType extensions.DeploymentConditionType, status api.ConditionStatus, pds *int32, from time.Time) extensions.Deployment {
|
||||
deployment := func(condType extensions.DeploymentConditionType, status v1.ConditionStatus, pds *int32, from time.Time) extensions.Deployment {
|
||||
return extensions.Deployment{
|
||||
Spec: extensions.DeploymentSpec{
|
||||
ProgressDeadlineSeconds: pds,
|
||||
@@ -1075,21 +1078,21 @@ func TestDeploymentTimedOut(t *testing.T) {
|
||||
{
|
||||
name: "no progressDeadlineSeconds specified - no timeout",
|
||||
|
||||
d: deployment(extensions.DeploymentProgressing, api.ConditionTrue, null, timeFn(1, 9)),
|
||||
d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, null, timeFn(1, 9)),
|
||||
nowFn: func() time.Time { return timeFn(1, 20) },
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "progressDeadlineSeconds: 10s, now - started => 00:01:20 - 00:01:09 => 11s",
|
||||
|
||||
d: deployment(extensions.DeploymentProgressing, api.ConditionTrue, &ten, timeFn(1, 9)),
|
||||
d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, &ten, timeFn(1, 9)),
|
||||
nowFn: func() time.Time { return timeFn(1, 20) },
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "progressDeadlineSeconds: 10s, now - started => 00:01:20 - 00:01:11 => 9s",
|
||||
|
||||
d: deployment(extensions.DeploymentProgressing, api.ConditionTrue, &ten, timeFn(1, 11)),
|
||||
d: deployment(extensions.DeploymentProgressing, v1.ConditionTrue, &ten, timeFn(1, 11)),
|
||||
nowFn: func() time.Time { return timeFn(1, 20) },
|
||||
expected: false,
|
||||
},
|
||||
|
||||
@@ -23,12 +23,13 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
policyclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
policyclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/policy/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
@@ -58,7 +59,7 @@ const DeletionTimeout = 2 * 60 * time.Second
|
||||
type updater func(*policy.PodDisruptionBudget) error
|
||||
|
||||
type DisruptionController struct {
|
||||
kubeClient internalclientset.Interface
|
||||
kubeClient clientset.Interface
|
||||
|
||||
pdbStore cache.Store
|
||||
pdbController *cache.Controller
|
||||
@@ -98,9 +99,9 @@ type controllerAndScale struct {
|
||||
|
||||
// podControllerFinder is a function type that maps a pod to a list of
|
||||
// controllers and their scale.
|
||||
type podControllerFinder func(*api.Pod) ([]controllerAndScale, error)
|
||||
type podControllerFinder func(*v1.Pod) ([]controllerAndScale, error)
|
||||
|
||||
func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient internalclientset.Interface) *DisruptionController {
|
||||
func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface) *DisruptionController {
|
||||
dc := &DisruptionController{
|
||||
kubeClient: kubeClient,
|
||||
podController: podInformer.GetController(),
|
||||
@@ -108,7 +109,7 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient i
|
||||
recheckQueue: workqueue.NewNamedDelayingQueue("disruption-recheck"),
|
||||
broadcaster: record.NewBroadcaster(),
|
||||
}
|
||||
dc.recorder = dc.broadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
|
||||
dc.recorder = dc.broadcaster.NewRecorder(v1.EventSource{Component: "controllermanager"})
|
||||
|
||||
dc.getUpdater = func() updater { return dc.writePdbStatus }
|
||||
|
||||
@@ -122,11 +123,11 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient i
|
||||
|
||||
dc.pdbStore, dc.pdbController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Policy().PodDisruptionBudgets(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Policy().PodDisruptionBudgets(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Policy().PodDisruptionBudgets(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Policy().PodDisruptionBudgets(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&policy.PodDisruptionBudget{},
|
||||
@@ -141,14 +142,14 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient i
|
||||
|
||||
dc.rcIndexer, dc.rcController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Core().ReplicationControllers(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ReplicationController{},
|
||||
&v1.ReplicationController{},
|
||||
30*time.Second,
|
||||
cache.ResourceEventHandlerFuncs{},
|
||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
@@ -158,11 +159,11 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient i
|
||||
|
||||
dc.rsLister.Indexer, dc.rsController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Extensions().ReplicaSets(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Extensions().ReplicaSets(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Extensions().ReplicaSets(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&extensions.ReplicaSet{},
|
||||
@@ -174,11 +175,11 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient i
|
||||
|
||||
dc.dIndexer, dc.dController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Extensions().Deployments(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return dc.kubeClient.Extensions().Deployments(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Extensions().Deployments(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return dc.kubeClient.Extensions().Deployments(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&extensions.Deployment{},
|
||||
@@ -204,7 +205,7 @@ func (dc *DisruptionController) finders() []podControllerFinder {
|
||||
}
|
||||
|
||||
// getPodReplicaSets finds replicasets which have no matching deployments.
|
||||
func (dc *DisruptionController) getPodReplicaSets(pod *api.Pod) ([]controllerAndScale, error) {
|
||||
func (dc *DisruptionController) getPodReplicaSets(pod *v1.Pod) ([]controllerAndScale, error) {
|
||||
cas := []controllerAndScale{}
|
||||
rss, err := dc.rsLister.GetPodReplicaSets(pod)
|
||||
// GetPodReplicaSets returns an error only if no ReplicaSets are found. We
|
||||
@@ -220,7 +221,7 @@ func (dc *DisruptionController) getPodReplicaSets(pod *api.Pod) ([]controllerAnd
|
||||
if err == nil { // A deployment was found, so this finder will not count this RS.
|
||||
continue
|
||||
}
|
||||
controllerScale[rs.UID] = rs.Spec.Replicas
|
||||
controllerScale[rs.UID] = *(rs.Spec.Replicas)
|
||||
}
|
||||
|
||||
for uid, scale := range controllerScale {
|
||||
@@ -231,7 +232,7 @@ func (dc *DisruptionController) getPodReplicaSets(pod *api.Pod) ([]controllerAnd
|
||||
}
|
||||
|
||||
// getPodDeployments finds deployments for any replicasets which are being managed by deployments.
|
||||
func (dc *DisruptionController) getPodDeployments(pod *api.Pod) ([]controllerAndScale, error) {
|
||||
func (dc *DisruptionController) getPodDeployments(pod *v1.Pod) ([]controllerAndScale, error) {
|
||||
cas := []controllerAndScale{}
|
||||
rss, err := dc.rsLister.GetPodReplicaSets(pod)
|
||||
// GetPodReplicaSets returns an error only if no ReplicaSets are found. We
|
||||
@@ -248,7 +249,7 @@ func (dc *DisruptionController) getPodDeployments(pod *api.Pod) ([]controllerAnd
|
||||
continue
|
||||
}
|
||||
for _, d := range ds {
|
||||
controllerScale[d.UID] = d.Spec.Replicas
|
||||
controllerScale[d.UID] = *(d.Spec.Replicas)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -259,12 +260,12 @@ func (dc *DisruptionController) getPodDeployments(pod *api.Pod) ([]controllerAnd
|
||||
return cas, nil
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) getPodReplicationControllers(pod *api.Pod) ([]controllerAndScale, error) {
|
||||
func (dc *DisruptionController) getPodReplicationControllers(pod *v1.Pod) ([]controllerAndScale, error) {
|
||||
cas := []controllerAndScale{}
|
||||
rcs, err := dc.rcLister.GetPodControllers(pod)
|
||||
if err == nil {
|
||||
for _, rc := range rcs {
|
||||
cas = append(cas, controllerAndScale{UID: rc.UID, scale: rc.Spec.Replicas})
|
||||
cas = append(cas, controllerAndScale{UID: rc.UID, scale: *(rc.Spec.Replicas)})
|
||||
}
|
||||
}
|
||||
return cas, nil
|
||||
@@ -274,7 +275,7 @@ func (dc *DisruptionController) Run(stopCh <-chan struct{}) {
|
||||
glog.V(0).Infof("Starting disruption controller")
|
||||
if dc.kubeClient != nil {
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
dc.broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: dc.kubeClient.Core().Events("")})
|
||||
dc.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dc.kubeClient.Core().Events("")})
|
||||
} else {
|
||||
glog.V(0).Infof("No api server defined - no events will be sent to API server.")
|
||||
}
|
||||
@@ -310,7 +311,7 @@ func (dc *DisruptionController) removeDb(obj interface{}) {
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) addPod(obj interface{}) {
|
||||
pod := obj.(*api.Pod)
|
||||
pod := obj.(*v1.Pod)
|
||||
glog.V(4).Infof("addPod called on pod %q", pod.Name)
|
||||
pdb := dc.getPdbForPod(pod)
|
||||
if pdb == nil {
|
||||
@@ -322,7 +323,7 @@ func (dc *DisruptionController) addPod(obj interface{}) {
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) updatePod(old, cur interface{}) {
|
||||
pod := cur.(*api.Pod)
|
||||
pod := cur.(*v1.Pod)
|
||||
glog.V(4).Infof("updatePod called on pod %q", pod.Name)
|
||||
pdb := dc.getPdbForPod(pod)
|
||||
if pdb == nil {
|
||||
@@ -334,7 +335,7 @@ func (dc *DisruptionController) updatePod(old, cur interface{}) {
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) deletePod(obj interface{}) {
|
||||
pod, ok := obj.(*api.Pod)
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
// When a delete is dropped, the relist will notice a pod in the store not
|
||||
// in the list, leading to the insertion of a tombstone object which contains
|
||||
// the deleted key/value. Note that this value might be stale. If the pod
|
||||
@@ -346,7 +347,7 @@ func (dc *DisruptionController) deletePod(obj interface{}) {
|
||||
glog.Errorf("Couldn't get object from tombstone %+v", obj)
|
||||
return
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*api.Pod)
|
||||
pod, ok = tombstone.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("Tombstone contained object that is not a pod %+v", obj)
|
||||
return
|
||||
@@ -380,7 +381,7 @@ func (dc *DisruptionController) enqueuePdbForRecheck(pdb *policy.PodDisruptionBu
|
||||
dc.recheckQueue.AddAfter(key, delay)
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) getPdbForPod(pod *api.Pod) *policy.PodDisruptionBudget {
|
||||
func (dc *DisruptionController) getPdbForPod(pod *v1.Pod) *policy.PodDisruptionBudget {
|
||||
// GetPodPodDisruptionBudgets returns an error only if no
|
||||
// PodDisruptionBudgets are found. We don't return that as an error to the
|
||||
// caller.
|
||||
@@ -393,25 +394,25 @@ func (dc *DisruptionController) getPdbForPod(pod *api.Pod) *policy.PodDisruption
|
||||
if len(pdbs) > 1 {
|
||||
msg := fmt.Sprintf("Pod %q/%q matches multiple PodDisruptionBudgets. Chose %q arbitrarily.", pod.Namespace, pod.Name, pdbs[0].Name)
|
||||
glog.Warning(msg)
|
||||
dc.recorder.Event(pod, api.EventTypeWarning, "MultiplePodDisruptionBudgets", msg)
|
||||
dc.recorder.Event(pod, v1.EventTypeWarning, "MultiplePodDisruptionBudgets", msg)
|
||||
}
|
||||
return &pdbs[0]
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) getPodsForPdb(pdb *policy.PodDisruptionBudget) ([]*api.Pod, error) {
|
||||
func (dc *DisruptionController) getPodsForPdb(pdb *policy.PodDisruptionBudget) ([]*v1.Pod, error) {
|
||||
sel, err := unversioned.LabelSelectorAsSelector(pdb.Spec.Selector)
|
||||
if sel.Empty() {
|
||||
return []*api.Pod{}, nil
|
||||
return []*v1.Pod{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return []*api.Pod{}, err
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
pods, err := dc.podLister.Pods(pdb.Namespace).List(sel)
|
||||
if err != nil {
|
||||
return []*api.Pod{}, err
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
// TODO: Do we need to copy here?
|
||||
result := make([]*api.Pod, 0, len(pods))
|
||||
result := make([]*v1.Pod, 0, len(pods))
|
||||
for i := range pods {
|
||||
result = append(result, &(*pods[i]))
|
||||
}
|
||||
@@ -485,16 +486,16 @@ func (dc *DisruptionController) sync(key string) error {
|
||||
func (dc *DisruptionController) trySync(pdb *policy.PodDisruptionBudget) error {
|
||||
pods, err := dc.getPodsForPdb(pdb)
|
||||
if err != nil {
|
||||
dc.recorder.Eventf(pdb, api.EventTypeWarning, "NoPods", "Failed to get pods: %v", err)
|
||||
dc.recorder.Eventf(pdb, v1.EventTypeWarning, "NoPods", "Failed to get pods: %v", err)
|
||||
return err
|
||||
}
|
||||
if len(pods) == 0 {
|
||||
dc.recorder.Eventf(pdb, api.EventTypeNormal, "NoPods", "No matching pods found")
|
||||
dc.recorder.Eventf(pdb, v1.EventTypeNormal, "NoPods", "No matching pods found")
|
||||
}
|
||||
|
||||
expectedCount, desiredHealthy, err := dc.getExpectedPodCount(pdb, pods)
|
||||
if err != nil {
|
||||
dc.recorder.Eventf(pdb, api.EventTypeNormal, "ExpectedPods", "Failed to calculate the number of expected pods: %v", err)
|
||||
dc.recorder.Eventf(pdb, v1.EventTypeNormal, "ExpectedPods", "Failed to calculate the number of expected pods: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -512,7 +513,7 @@ func (dc *DisruptionController) trySync(pdb *policy.PodDisruptionBudget) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (dc *DisruptionController) getExpectedPodCount(pdb *policy.PodDisruptionBudget, pods []*api.Pod) (expectedCount, desiredHealthy int32, err error) {
|
||||
func (dc *DisruptionController) getExpectedPodCount(pdb *policy.PodDisruptionBudget, pods []*v1.Pod) (expectedCount, desiredHealthy int32, err error) {
|
||||
err = nil
|
||||
// TODO(davidopp): consider making the way expectedCount and rules about
|
||||
// permitted controller configurations (specifically, considering it an error
|
||||
@@ -554,11 +555,11 @@ func (dc *DisruptionController) getExpectedPodCount(pdb *policy.PodDisruptionBud
|
||||
}
|
||||
if controllerCount == 0 {
|
||||
err = fmt.Errorf("asked for percentage, but found no controllers for pod %q", pod.Name)
|
||||
dc.recorder.Event(pdb, api.EventTypeWarning, "NoControllers", err.Error())
|
||||
dc.recorder.Event(pdb, v1.EventTypeWarning, "NoControllers", err.Error())
|
||||
return
|
||||
} else if controllerCount > 1 {
|
||||
err = fmt.Errorf("pod %q has %v>1 controllers", pod.Name, controllerCount)
|
||||
dc.recorder.Event(pdb, api.EventTypeWarning, "TooManyControllers", err.Error())
|
||||
dc.recorder.Event(pdb, v1.EventTypeWarning, "TooManyControllers", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -581,7 +582,7 @@ func (dc *DisruptionController) getExpectedPodCount(pdb *policy.PodDisruptionBud
|
||||
return
|
||||
}
|
||||
|
||||
func countHealthyPods(pods []*api.Pod, disruptedPods map[string]unversioned.Time, currentTime time.Time) (currentHealthy int32) {
|
||||
func countHealthyPods(pods []*v1.Pod, disruptedPods map[string]unversioned.Time, currentTime time.Time) (currentHealthy int32) {
|
||||
Pod:
|
||||
for _, pod := range pods {
|
||||
// Pod is beeing deleted.
|
||||
@@ -592,7 +593,7 @@ Pod:
|
||||
if disruptionTime, found := disruptedPods[pod.Name]; found && disruptionTime.Time.Add(DeletionTimeout).After(currentTime) {
|
||||
continue
|
||||
}
|
||||
if api.IsPodReady(pod) {
|
||||
if v1.IsPodReady(pod) {
|
||||
currentHealthy++
|
||||
continue Pod
|
||||
}
|
||||
@@ -603,7 +604,7 @@ Pod:
|
||||
|
||||
// Builds new PodDisruption map, possibly removing items that refer to non-existing, already deleted
|
||||
// or not-deleted at all items. Also returns an information when this check should be repeated.
|
||||
func (dc *DisruptionController) buildDisruptedPodMap(pods []*api.Pod, pdb *policy.PodDisruptionBudget, currentTime time.Time) (map[string]unversioned.Time, *time.Time) {
|
||||
func (dc *DisruptionController) buildDisruptedPodMap(pods []*v1.Pod, pdb *policy.PodDisruptionBudget, currentTime time.Time) (map[string]unversioned.Time, *time.Time) {
|
||||
disruptedPods := pdb.Status.DisruptedPods
|
||||
result := make(map[string]unversioned.Time)
|
||||
var recheckTime *time.Time
|
||||
@@ -625,7 +626,7 @@ func (dc *DisruptionController) buildDisruptedPodMap(pods []*api.Pod, pdb *polic
|
||||
if expectedDeletion.Before(currentTime) {
|
||||
glog.V(1).Infof("Pod %s/%s was expected to be deleted at %s but it wasn't, updating pdb %s/%s",
|
||||
pod.Namespace, pod.Name, disruptionTime.String(), pdb.Namespace, pdb.Name)
|
||||
dc.recorder.Eventf(pod, api.EventTypeWarning, "NotDeleted", "Pod was expected by PDB %s/%s to be deleted but it wasn't",
|
||||
dc.recorder.Eventf(pod, v1.EventTypeWarning, "NotDeleted", "Pod was expected by PDB %s/%s to be deleted but it wasn't",
|
||||
pdb.Namespace, pdb.Namespace)
|
||||
} else {
|
||||
if recheckTime == nil || expectedDeletion.Before(*recheckTime) {
|
||||
|
||||
@@ -25,9 +25,10 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/apis/policy"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
policy "k8s.io/kubernetes/pkg/apis/policy/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@@ -95,7 +96,7 @@ func newFakeDisruptionController() (*DisruptionController, *pdbStates) {
|
||||
broadcaster: record.NewBroadcaster(),
|
||||
}
|
||||
|
||||
dc.recorder = dc.broadcaster.NewRecorder(api.EventSource{Component: "disruption_test"})
|
||||
dc.recorder = dc.broadcaster.NewRecorder(v1.EventSource{Component: "disruption_test"})
|
||||
|
||||
return dc, ps
|
||||
}
|
||||
@@ -115,11 +116,11 @@ func newSelFooBar() *unversioned.LabelSelector {
|
||||
func newPodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrString) (*policy.PodDisruptionBudget, string) {
|
||||
|
||||
pdb := &policy.PodDisruptionBudget{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
},
|
||||
Spec: policy.PodDisruptionBudgetSpec{
|
||||
@@ -136,21 +137,21 @@ func newPodDisruptionBudget(t *testing.T, minAvailable intstr.IntOrString) (*pol
|
||||
return pdb, pdbName
|
||||
}
|
||||
|
||||
func newPod(t *testing.T, name string) (*api.Pod, string) {
|
||||
pod := &api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func newPod(t *testing.T, name string) (*v1.Pod, string) {
|
||||
pod := &v1.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Annotations: make(map[string]string),
|
||||
Name: name,
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
Labels: fooBar(),
|
||||
},
|
||||
Spec: api.PodSpec{},
|
||||
Status: api.PodStatus{
|
||||
Conditions: []api.PodCondition{
|
||||
{Type: api.PodReady, Status: api.ConditionTrue},
|
||||
Spec: v1.PodSpec{},
|
||||
Status: v1.PodStatus{
|
||||
Conditions: []v1.PodCondition{
|
||||
{Type: v1.PodReady, Status: v1.ConditionTrue},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -163,18 +164,18 @@ func newPod(t *testing.T, name string) (*api.Pod, string) {
|
||||
return pod, podName
|
||||
}
|
||||
|
||||
func newReplicationController(t *testing.T, size int32) (*api.ReplicationController, string) {
|
||||
rc := &api.ReplicationController{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func newReplicationController(t *testing.T, size int32) (*v1.ReplicationController, string) {
|
||||
rc := &v1.ReplicationController{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
Labels: fooBar(),
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Replicas: size,
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: &size,
|
||||
Selector: fooBar(),
|
||||
},
|
||||
}
|
||||
@@ -189,16 +190,16 @@ func newReplicationController(t *testing.T, size int32) (*api.ReplicationControl
|
||||
|
||||
func newDeployment(t *testing.T, size int32) (*extensions.Deployment, string) {
|
||||
d := &extensions.Deployment{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
Labels: fooBar(),
|
||||
},
|
||||
Spec: extensions.DeploymentSpec{
|
||||
Replicas: size,
|
||||
Replicas: &size,
|
||||
Selector: newSelFooBar(),
|
||||
},
|
||||
}
|
||||
@@ -213,16 +214,16 @@ func newDeployment(t *testing.T, size int32) (*extensions.Deployment, string) {
|
||||
|
||||
func newReplicaSet(t *testing.T, size int32) (*extensions.ReplicaSet, string) {
|
||||
rs := &extensions.ReplicaSet{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
Labels: fooBar(),
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: size,
|
||||
Replicas: &size,
|
||||
Selector: newSelFooBar(),
|
||||
},
|
||||
}
|
||||
@@ -274,7 +275,7 @@ func TestUnavailable(t *testing.T) {
|
||||
dc.sync(pdbName)
|
||||
|
||||
// Add three pods, verifying that the counts go up at each step.
|
||||
pods := []*api.Pod{}
|
||||
pods := []*v1.Pod{}
|
||||
for i := int32(0); i < 4; i++ {
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, i, 3, i, map[string]unversioned.Time{})
|
||||
pod, _ := newPod(t, fmt.Sprintf("yo-yo-yo %d", i))
|
||||
@@ -285,7 +286,7 @@ func TestUnavailable(t *testing.T) {
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 4, 3, 4, map[string]unversioned.Time{})
|
||||
|
||||
// Now set one pod as unavailable
|
||||
pods[0].Status.Conditions = []api.PodCondition{}
|
||||
pods[0].Status.Conditions = []v1.PodCondition{}
|
||||
update(t, dc.podLister.Indexer, pods[0])
|
||||
dc.sync(pdbName)
|
||||
|
||||
@@ -387,7 +388,7 @@ func TestReplicationController(t *testing.T) {
|
||||
// about the RC. This is a known bug. TODO(mml): file issue
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]unversioned.Time{})
|
||||
|
||||
pods := []*api.Pod{}
|
||||
pods := []*v1.Pod{}
|
||||
|
||||
for i := int32(0); i < 3; i++ {
|
||||
pod, _ := newPod(t, fmt.Sprintf("foobar %d", i))
|
||||
@@ -439,7 +440,7 @@ func TestTwoControllers(t *testing.T) {
|
||||
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, 0, 0, 0, map[string]unversioned.Time{})
|
||||
|
||||
pods := []*api.Pod{}
|
||||
pods := []*v1.Pod{}
|
||||
|
||||
unavailablePods := collectionSize - minimumOne - 1
|
||||
for i := int32(1); i <= collectionSize; i++ {
|
||||
@@ -447,7 +448,7 @@ func TestTwoControllers(t *testing.T) {
|
||||
pods = append(pods, pod)
|
||||
pod.Labels = rcLabels
|
||||
if i <= unavailablePods {
|
||||
pod.Status.Conditions = []api.PodCondition{}
|
||||
pod.Status.Conditions = []v1.PodCondition{}
|
||||
}
|
||||
add(t, dc.podLister.Indexer, pod)
|
||||
dc.sync(pdbName)
|
||||
@@ -480,7 +481,7 @@ func TestTwoControllers(t *testing.T) {
|
||||
pods = append(pods, pod)
|
||||
pod.Labels = dLabels
|
||||
if i <= unavailablePods {
|
||||
pod.Status.Conditions = []api.PodCondition{}
|
||||
pod.Status.Conditions = []v1.PodCondition{}
|
||||
}
|
||||
add(t, dc.podLister.Indexer, pod)
|
||||
dc.sync(pdbName)
|
||||
@@ -498,17 +499,17 @@ func TestTwoControllers(t *testing.T) {
|
||||
// but if we bring down two, it's not. Then we make the pod ready again and
|
||||
// verify that a disruption is permitted again.
|
||||
ps.VerifyPdbStatus(t, pdbName, 2, 2+minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{})
|
||||
pods[collectionSize-1].Status.Conditions = []api.PodCondition{}
|
||||
pods[collectionSize-1].Status.Conditions = []v1.PodCondition{}
|
||||
update(t, dc.podLister.Indexer, pods[collectionSize-1])
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{})
|
||||
|
||||
pods[collectionSize-2].Status.Conditions = []api.PodCondition{}
|
||||
pods[collectionSize-2].Status.Conditions = []v1.PodCondition{}
|
||||
update(t, dc.podLister.Indexer, pods[collectionSize-2])
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 0, minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{})
|
||||
|
||||
pods[collectionSize-1].Status.Conditions = []api.PodCondition{{Type: api.PodReady, Status: api.ConditionTrue}}
|
||||
pods[collectionSize-1].Status.Conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue}}
|
||||
update(t, dc.podLister.Indexer, pods[collectionSize-1])
|
||||
dc.sync(pdbName)
|
||||
ps.VerifyPdbStatus(t, pdbName, 1, 1+minimumTwo, minimumTwo, 2*collectionSize, map[string]unversioned.Time{})
|
||||
|
||||
@@ -24,13 +24,13 @@ import (
|
||||
|
||||
"encoding/json"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/endpoints"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
podutil "k8s.io/kubernetes/pkg/api/pod"
|
||||
utilpod "k8s.io/kubernetes/pkg/api/pod"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/api/v1/endpoints"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
utilpod "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
@@ -80,14 +80,14 @@ func NewEndpointController(podInformer cache.SharedIndexInformer, client clients
|
||||
|
||||
e.serviceStore.Indexer, e.serviceController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return e.client.Core().Services(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return e.client.Core().Services(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return e.client.Core().Services(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return e.client.Core().Services(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Service{},
|
||||
&v1.Service{},
|
||||
// TODO: Can we have much longer period here?
|
||||
FullServiceResyncPeriod,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
@@ -180,7 +180,7 @@ func (e *EndpointController) Run(workers int, stopCh <-chan struct{}) {
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
func (e *EndpointController) getPodServiceMemberships(pod *api.Pod) (sets.String, error) {
|
||||
func (e *EndpointController) getPodServiceMemberships(pod *v1.Pod) (sets.String, error) {
|
||||
set := sets.String{}
|
||||
services, err := e.serviceStore.GetPodServices(pod)
|
||||
if err != nil {
|
||||
@@ -199,9 +199,9 @@ func (e *EndpointController) getPodServiceMemberships(pod *api.Pod) (sets.String
|
||||
}
|
||||
|
||||
// When a pod is added, figure out what services it will be a member of and
|
||||
// enqueue them. obj must have *api.Pod type.
|
||||
// enqueue them. obj must have *v1.Pod type.
|
||||
func (e *EndpointController) addPod(obj interface{}) {
|
||||
pod := obj.(*api.Pod)
|
||||
pod := obj.(*v1.Pod)
|
||||
services, err := e.getPodServiceMemberships(pod)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Unable to get pod %v/%v's service memberships: %v", pod.Namespace, pod.Name, err))
|
||||
@@ -214,10 +214,10 @@ func (e *EndpointController) addPod(obj interface{}) {
|
||||
|
||||
// When a pod is updated, figure out what services it used to be a member of
|
||||
// and what services it will be a member of, and enqueue the union of these.
|
||||
// old and cur must be *api.Pod types.
|
||||
// old and cur must be *v1.Pod types.
|
||||
func (e *EndpointController) updatePod(old, cur interface{}) {
|
||||
newPod := cur.(*api.Pod)
|
||||
oldPod := old.(*api.Pod)
|
||||
newPod := cur.(*v1.Pod)
|
||||
oldPod := old.(*v1.Pod)
|
||||
if newPod.ResourceVersion == oldPod.ResourceVersion {
|
||||
// Periodic resync will send update events for all known pods.
|
||||
// Two different versions of the same pod will always have different RVs.
|
||||
@@ -244,12 +244,12 @@ func (e *EndpointController) updatePod(old, cur interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
func hostNameAndDomainAreEqual(pod1, pod2 *api.Pod) bool {
|
||||
func hostNameAndDomainAreEqual(pod1, pod2 *v1.Pod) bool {
|
||||
return getHostname(pod1) == getHostname(pod2) &&
|
||||
getSubdomain(pod1) == getSubdomain(pod2)
|
||||
}
|
||||
|
||||
func getHostname(pod *api.Pod) string {
|
||||
func getHostname(pod *v1.Pod) string {
|
||||
if len(pod.Spec.Hostname) > 0 {
|
||||
return pod.Spec.Hostname
|
||||
}
|
||||
@@ -259,7 +259,7 @@ func getHostname(pod *api.Pod) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func getSubdomain(pod *api.Pod) string {
|
||||
func getSubdomain(pod *v1.Pod) string {
|
||||
if len(pod.Spec.Subdomain) > 0 {
|
||||
return pod.Spec.Subdomain
|
||||
}
|
||||
@@ -270,9 +270,9 @@ func getSubdomain(pod *api.Pod) string {
|
||||
}
|
||||
|
||||
// When a pod is deleted, enqueue the services the pod used to be a member of.
|
||||
// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item.
|
||||
// obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
|
||||
func (e *EndpointController) deletePod(obj interface{}) {
|
||||
if _, ok := obj.(*api.Pod); ok {
|
||||
if _, ok := obj.(*v1.Pod); ok {
|
||||
// Enqueue all the services that the pod used to be a member
|
||||
// of. This happens to be exactly the same thing we do when a
|
||||
// pod is added.
|
||||
@@ -289,7 +289,7 @@ func (e *EndpointController) deletePod(obj interface{}) {
|
||||
// TODO: keep a map of pods to services to handle this condition.
|
||||
}
|
||||
|
||||
// obj could be an *api.Service, or a DeletionFinalStateUnknown marker item.
|
||||
// obj could be an *v1.Service, or a DeletionFinalStateUnknown marker item.
|
||||
func (e *EndpointController) enqueueService(obj interface{}) {
|
||||
key, err := keyFunc(obj)
|
||||
if err != nil {
|
||||
@@ -354,7 +354,7 @@ func (e *EndpointController) syncService(key string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
service := obj.(*api.Service)
|
||||
service := obj.(*v1.Service)
|
||||
if service.Spec.Selector == nil {
|
||||
// services without a selector receive no endpoints from this controller;
|
||||
// these services will receive the endpoints that are created out-of-band via the REST API.
|
||||
@@ -369,7 +369,7 @@ func (e *EndpointController) syncService(key string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
subsets := []api.EndpointSubset{}
|
||||
subsets := []v1.EndpointSubset{}
|
||||
podHostNames := map[string]endpoints.HostRecord{}
|
||||
|
||||
var tolerateUnreadyEndpoints bool
|
||||
@@ -407,11 +407,11 @@ func (e *EndpointController) syncService(key string) error {
|
||||
continue
|
||||
}
|
||||
|
||||
epp := api.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto}
|
||||
epa := api.EndpointAddress{
|
||||
epp := v1.EndpointPort{Name: portName, Port: int32(portNum), Protocol: portProto}
|
||||
epa := v1.EndpointAddress{
|
||||
IP: pod.Status.PodIP,
|
||||
NodeName: &pod.Spec.NodeName,
|
||||
TargetRef: &api.ObjectReference{
|
||||
TargetRef: &v1.ObjectReference{
|
||||
Kind: "Pod",
|
||||
Namespace: pod.ObjectMeta.Namespace,
|
||||
Name: pod.ObjectMeta.Name,
|
||||
@@ -431,17 +431,17 @@ func (e *EndpointController) syncService(key string) error {
|
||||
epa.Hostname = hostname
|
||||
}
|
||||
|
||||
if tolerateUnreadyEndpoints || api.IsPodReady(pod) {
|
||||
subsets = append(subsets, api.EndpointSubset{
|
||||
Addresses: []api.EndpointAddress{epa},
|
||||
Ports: []api.EndpointPort{epp},
|
||||
if tolerateUnreadyEndpoints || v1.IsPodReady(pod) {
|
||||
subsets = append(subsets, v1.EndpointSubset{
|
||||
Addresses: []v1.EndpointAddress{epa},
|
||||
Ports: []v1.EndpointPort{epp},
|
||||
})
|
||||
readyEps++
|
||||
} else {
|
||||
glog.V(5).Infof("Pod is out of service: %v/%v", pod.Namespace, pod.Name)
|
||||
subsets = append(subsets, api.EndpointSubset{
|
||||
NotReadyAddresses: []api.EndpointAddress{epa},
|
||||
Ports: []api.EndpointPort{epp},
|
||||
subsets = append(subsets, v1.EndpointSubset{
|
||||
NotReadyAddresses: []v1.EndpointAddress{epa},
|
||||
Ports: []v1.EndpointPort{epp},
|
||||
})
|
||||
notReadyEps++
|
||||
}
|
||||
@@ -453,8 +453,8 @@ func (e *EndpointController) syncService(key string) error {
|
||||
currentEndpoints, err := e.client.Core().Endpoints(service.Namespace).Get(service.Name)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
currentEndpoints = &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
currentEndpoints = &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: service.Name,
|
||||
Labels: service.Labels,
|
||||
},
|
||||
@@ -521,7 +521,7 @@ func (e *EndpointController) syncService(key string) error {
|
||||
// some stragglers could have been left behind if the endpoint controller
|
||||
// reboots).
|
||||
func (e *EndpointController) checkLeftoverEndpoints() {
|
||||
list, err := e.client.Core().Endpoints(api.NamespaceAll).List(api.ListOptions{})
|
||||
list, err := e.client.Core().Endpoints(v1.NamespaceAll).List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Unable to list endpoints (%v); orphaned endpoints will not be cleaned up. (They're pretty harmless, but you can restart this component if you want another attempt made.)", err))
|
||||
return
|
||||
|
||||
@@ -22,14 +22,14 @@ import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
endptspkg "k8s.io/kubernetes/pkg/api/endpoints"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
endptspkg "k8s.io/kubernetes/pkg/api/v1/endpoints"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
_ "k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
@@ -42,32 +42,32 @@ var emptyNodeName string
|
||||
|
||||
func addPods(store cache.Store, namespace string, nPods int, nPorts int, nNotReady int) {
|
||||
for i := 0; i < nPods+nNotReady; i++ {
|
||||
p := &api.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
p := &v1.Pod{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: fmt.Sprintf("pod%d", i),
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{Ports: []api.ContainerPort{}}},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{Ports: []v1.ContainerPort{}}},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
Status: v1.PodStatus{
|
||||
PodIP: fmt.Sprintf("1.2.3.%d", 4+i),
|
||||
Conditions: []api.PodCondition{
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: api.PodReady,
|
||||
Status: api.ConditionTrue,
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if i >= nPods {
|
||||
p.Status.Conditions[0].Status = api.ConditionFalse
|
||||
p.Status.Conditions[0].Status = v1.ConditionFalse
|
||||
}
|
||||
for j := 0; j < nPorts; j++ {
|
||||
p.Spec.Containers[0].Ports = append(p.Spec.Containers[0].Ports,
|
||||
api.ContainerPort{Name: fmt.Sprintf("port%d", i), ContainerPort: int32(8080 + j)})
|
||||
v1.ContainerPort{Name: fmt.Sprintf("port%d", i), ContainerPort: int32(8080 + j)})
|
||||
}
|
||||
store.Add(p)
|
||||
}
|
||||
@@ -94,54 +94,54 @@ func makeTestServer(t *testing.T, namespace string, endpointsResponse serverResp
|
||||
}
|
||||
|
||||
func TestSyncEndpointsItemsPreserveNoSelector(t *testing.T) {
|
||||
ns := api.NamespaceDefault
|
||||
ns := v1.NamespaceDefault
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
|
||||
Ports: []api.EndpointPort{{Port: 1000}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
|
||||
Ports: []v1.EndpointPort{{Port: 1000}},
|
||||
}},
|
||||
}})
|
||||
defer testServer.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
|
||||
endpoints.podStoreSynced = alwaysReady
|
||||
endpoints.serviceStore.Indexer.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Port: 80}}},
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: v1.ServiceSpec{Ports: []v1.ServicePort{{Port: 80}}},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
endpointsHandler.ValidateRequestCount(t, 0)
|
||||
}
|
||||
|
||||
func TestCheckLeftoverEndpoints(t *testing.T) {
|
||||
ns := api.NamespaceDefault
|
||||
ns := v1.NamespaceDefault
|
||||
// Note that this requests *all* endpoints, therefore the NamespaceAll
|
||||
// below.
|
||||
testServer, _ := makeTestServer(t, api.NamespaceAll,
|
||||
serverResponse{http.StatusOK, &api.EndpointsList{
|
||||
testServer, _ := makeTestServer(t, v1.NamespaceAll,
|
||||
serverResponse{http.StatusOK, &v1.EndpointsList{
|
||||
ListMeta: unversioned.ListMeta{
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Items: []api.Endpoints{{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Items: []v1.Endpoints{{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
|
||||
Ports: []api.EndpointPort{{Port: 1000}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
|
||||
Ports: []v1.EndpointPort{{Port: 1000}},
|
||||
}},
|
||||
}},
|
||||
}})
|
||||
defer testServer.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
|
||||
endpoints.podStoreSynced = alwaysReady
|
||||
endpoints.checkLeftoverEndpoints()
|
||||
@@ -158,41 +158,41 @@ func TestCheckLeftoverEndpoints(t *testing.T) {
|
||||
func TestSyncEndpointsProtocolTCP(t *testing.T) {
|
||||
ns := "other"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
|
||||
Ports: []api.EndpointPort{{Port: 1000, Protocol: "TCP"}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
|
||||
Ports: []v1.EndpointPort{{Port: 1000, Protocol: "TCP"}},
|
||||
}},
|
||||
}})
|
||||
defer testServer.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
|
||||
endpoints.podStoreSynced = alwaysReady
|
||||
|
||||
addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
|
||||
endpoints.serviceStore.Indexer.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: api.ServiceSpec{
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{},
|
||||
Ports: []api.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "TCP"}},
|
||||
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "TCP"}},
|
||||
},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
endpointsHandler.ValidateRequestCount(t, 2)
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []v1.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
}},
|
||||
})
|
||||
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
|
||||
@@ -201,40 +201,40 @@ func TestSyncEndpointsProtocolTCP(t *testing.T) {
|
||||
func TestSyncEndpointsProtocolUDP(t *testing.T) {
|
||||
ns := "other"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
|
||||
Ports: []api.EndpointPort{{Port: 1000, Protocol: "UDP"}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
|
||||
Ports: []v1.EndpointPort{{Port: 1000, Protocol: "UDP"}},
|
||||
}},
|
||||
}})
|
||||
defer testServer.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
|
||||
endpoints.podStoreSynced = alwaysReady
|
||||
addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
|
||||
endpoints.serviceStore.Indexer.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: api.ServiceSpec{
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{},
|
||||
Ports: []api.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "UDP"}},
|
||||
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt(8080), Protocol: "UDP"}},
|
||||
},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
endpointsHandler.ValidateRequestCount(t, 2)
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []api.EndpointPort{{Port: 8080, Protocol: "UDP"}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []v1.EndpointPort{{Port: 8080, Protocol: "UDP"}},
|
||||
}},
|
||||
})
|
||||
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
|
||||
@@ -243,36 +243,36 @@ func TestSyncEndpointsProtocolUDP(t *testing.T) {
|
||||
func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) {
|
||||
ns := "other"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{},
|
||||
Subsets: []v1.EndpointSubset{},
|
||||
}})
|
||||
defer testServer.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
|
||||
endpoints.podStoreSynced = alwaysReady
|
||||
addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
|
||||
endpoints.serviceStore.Indexer.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: api.ServiceSpec{
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{},
|
||||
Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
|
||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
|
||||
},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []v1.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
}},
|
||||
})
|
||||
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
|
||||
@@ -281,36 +281,36 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAll(t *testing.T) {
|
||||
func TestSyncEndpointsItemsEmptySelectorSelectsAllNotReady(t *testing.T) {
|
||||
ns := "other"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{},
|
||||
Subsets: []v1.EndpointSubset{},
|
||||
}})
|
||||
defer testServer.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
|
||||
endpoints.podStoreSynced = alwaysReady
|
||||
addPods(endpoints.podStore.Indexer, ns, 0, 1, 1)
|
||||
endpoints.serviceStore.Indexer.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: api.ServiceSpec{
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{},
|
||||
Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
|
||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
|
||||
},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []v1.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
}},
|
||||
})
|
||||
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
|
||||
@@ -319,37 +319,37 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllNotReady(t *testing.T) {
|
||||
func TestSyncEndpointsItemsEmptySelectorSelectsAllMixed(t *testing.T) {
|
||||
ns := "other"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{},
|
||||
Subsets: []v1.EndpointSubset{},
|
||||
}})
|
||||
defer testServer.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
|
||||
endpoints.podStoreSynced = alwaysReady
|
||||
addPods(endpoints.podStore.Indexer, ns, 1, 1, 1)
|
||||
endpoints.serviceStore.Indexer.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: api.ServiceSpec{
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{},
|
||||
Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
|
||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
|
||||
},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
NotReadyAddresses: []api.EndpointAddress{{IP: "1.2.3.5", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}}},
|
||||
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
NotReadyAddresses: []v1.EndpointAddress{{IP: "1.2.3.5", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}}},
|
||||
Ports: []v1.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
}},
|
||||
})
|
||||
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
|
||||
@@ -358,108 +358,108 @@ func TestSyncEndpointsItemsEmptySelectorSelectsAllMixed(t *testing.T) {
|
||||
func TestSyncEndpointsItemsPreexisting(t *testing.T) {
|
||||
ns := "bar"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
|
||||
Ports: []api.EndpointPort{{Port: 1000}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
|
||||
Ports: []v1.EndpointPort{{Port: 1000}},
|
||||
}},
|
||||
}})
|
||||
defer testServer.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
|
||||
endpoints.podStoreSynced = alwaysReady
|
||||
addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
|
||||
endpoints.serviceStore.Indexer.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: api.ServiceSpec{
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
|
||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
|
||||
},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []v1.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
}},
|
||||
})
|
||||
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
|
||||
}
|
||||
|
||||
func TestSyncEndpointsItemsPreexistingIdentical(t *testing.T) {
|
||||
ns := api.NamespaceDefault
|
||||
testServer, endpointsHandler := makeTestServer(t, api.NamespaceDefault,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ns := v1.NamespaceDefault
|
||||
testServer, endpointsHandler := makeTestServer(t, v1.NamespaceDefault,
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ResourceVersion: "1",
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []v1.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
}},
|
||||
}})
|
||||
defer testServer.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
|
||||
endpoints.podStoreSynced = alwaysReady
|
||||
addPods(endpoints.podStore.Indexer, api.NamespaceDefault, 1, 1, 0)
|
||||
endpoints.serviceStore.Indexer.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault},
|
||||
Spec: api.ServiceSpec{
|
||||
addPods(endpoints.podStore.Indexer, v1.NamespaceDefault, 1, 1, 0)
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: v1.NamespaceDefault},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
|
||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
|
||||
},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", api.NamespaceDefault, "foo"), "GET", nil)
|
||||
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", v1.NamespaceDefault, "foo"), "GET", nil)
|
||||
}
|
||||
|
||||
func TestSyncEndpointsItems(t *testing.T) {
|
||||
ns := "other"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{}})
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{}})
|
||||
defer testServer.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
|
||||
endpoints.podStoreSynced = alwaysReady
|
||||
addPods(endpoints.podStore.Indexer, ns, 3, 2, 0)
|
||||
addPods(endpoints.podStore.Indexer, "blah", 5, 2, 0) // make sure these aren't found!
|
||||
endpoints.serviceStore.Indexer.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: api.ServiceSpec{
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: ns},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Ports: []api.ServicePort{
|
||||
Ports: []v1.ServicePort{
|
||||
{Name: "port0", Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
|
||||
{Name: "port1", Port: 88, Protocol: "TCP", TargetPort: intstr.FromInt(8088)},
|
||||
},
|
||||
},
|
||||
})
|
||||
endpoints.syncService("other/foo")
|
||||
expectedSubsets := []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{
|
||||
{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}},
|
||||
{IP: "1.2.3.5", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}},
|
||||
{IP: "1.2.3.6", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod2", Namespace: ns}},
|
||||
expectedSubsets := []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}},
|
||||
{IP: "1.2.3.5", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}},
|
||||
{IP: "1.2.3.6", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod2", Namespace: ns}},
|
||||
},
|
||||
Ports: []api.EndpointPort{
|
||||
Ports: []v1.EndpointPort{
|
||||
{Name: "port0", Port: 8080, Protocol: "TCP"},
|
||||
{Name: "port1", Port: 8088, Protocol: "TCP"},
|
||||
},
|
||||
}}
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ResourceVersion: "",
|
||||
},
|
||||
Subsets: endptspkg.SortSubsets(expectedSubsets),
|
||||
@@ -472,41 +472,41 @@ func TestSyncEndpointsItems(t *testing.T) {
|
||||
func TestSyncEndpointsItemsWithLabels(t *testing.T) {
|
||||
ns := "other"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{}})
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{}})
|
||||
defer testServer.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
|
||||
endpoints.podStoreSynced = alwaysReady
|
||||
addPods(endpoints.podStore.Indexer, ns, 3, 2, 0)
|
||||
serviceLabels := map[string]string{"foo": "bar"}
|
||||
endpoints.serviceStore.Indexer.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
Labels: serviceLabels,
|
||||
},
|
||||
Spec: api.ServiceSpec{
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Ports: []api.ServicePort{
|
||||
Ports: []v1.ServicePort{
|
||||
{Name: "port0", Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)},
|
||||
{Name: "port1", Port: 88, Protocol: "TCP", TargetPort: intstr.FromInt(8088)},
|
||||
},
|
||||
},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
expectedSubsets := []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{
|
||||
{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}},
|
||||
{IP: "1.2.3.5", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}},
|
||||
{IP: "1.2.3.6", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod2", Namespace: ns}},
|
||||
expectedSubsets := []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}},
|
||||
{IP: "1.2.3.5", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod1", Namespace: ns}},
|
||||
{IP: "1.2.3.6", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod2", Namespace: ns}},
|
||||
},
|
||||
Ports: []api.EndpointPort{
|
||||
Ports: []v1.EndpointPort{
|
||||
{Name: "port0", Port: 8080, Protocol: "TCP"},
|
||||
{Name: "port1", Port: 8088, Protocol: "TCP"},
|
||||
},
|
||||
}}
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
ResourceVersion: "",
|
||||
Labels: serviceLabels,
|
||||
},
|
||||
@@ -520,8 +520,8 @@ func TestSyncEndpointsItemsWithLabels(t *testing.T) {
|
||||
func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) {
|
||||
ns := "bar"
|
||||
testServer, endpointsHandler := makeTestServer(t, ns,
|
||||
serverResponse{http.StatusOK, &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
serverResponse{http.StatusOK, &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
@@ -529,39 +529,39 @@ func TestSyncEndpointsItemsPreexistingLabelsChange(t *testing.T) {
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
|
||||
Ports: []api.EndpointPort{{Port: 1000}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "6.7.8.9", NodeName: &emptyNodeName}},
|
||||
Ports: []v1.EndpointPort{{Port: 1000}},
|
||||
}},
|
||||
}})
|
||||
defer testServer.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
endpoints := NewEndpointControllerFromClient(client, controller.NoResyncPeriodFunc)
|
||||
endpoints.podStoreSynced = alwaysReady
|
||||
addPods(endpoints.podStore.Indexer, ns, 1, 1, 0)
|
||||
serviceLabels := map[string]string{"baz": "blah"}
|
||||
endpoints.serviceStore.Indexer.Add(&api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
endpoints.serviceStore.Indexer.Add(&v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
Labels: serviceLabels,
|
||||
},
|
||||
Spec: api.ServiceSpec{
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Ports: []api.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
|
||||
Ports: []v1.ServicePort{{Port: 80, Protocol: "TCP", TargetPort: intstr.FromInt(8080)}},
|
||||
},
|
||||
})
|
||||
endpoints.syncService(ns + "/foo")
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Endpoints{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
data := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Endpoints{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
Labels: serviceLabels,
|
||||
},
|
||||
Subsets: []api.EndpointSubset{{
|
||||
Addresses: []api.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &api.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []api.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "1.2.3.4", NodeName: &emptyNodeName, TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod0", Namespace: ns}}},
|
||||
Ports: []v1.EndpointPort{{Port: 8080, Protocol: "TCP"}},
|
||||
}},
|
||||
})
|
||||
endpointsHandler.ValidateRequest(t, testapi.Default.ResourcePath("endpoints", ns, "foo"), "PUT", &data)
|
||||
|
||||
@@ -232,7 +232,7 @@ func shouldOrphanDependents(e *event, accessor meta.Object) bool {
|
||||
}
|
||||
finalizers := accessor.GetFinalizers()
|
||||
for _, finalizer := range finalizers {
|
||||
if finalizer == api.FinalizerOrphan {
|
||||
if finalizer == v1.FinalizerOrphan {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -277,7 +277,7 @@ func (gc *GarbageCollector) removeOrphanFinalizer(owner *node) error {
|
||||
var newFinalizers []string
|
||||
found := false
|
||||
for _, f := range finalizers {
|
||||
if f == api.FinalizerOrphan {
|
||||
if f == v1.FinalizerOrphan {
|
||||
found = true
|
||||
break
|
||||
} else {
|
||||
@@ -450,24 +450,24 @@ type GarbageCollector struct {
|
||||
|
||||
func gcListWatcher(client *dynamic.Client, resource unversioned.GroupVersionResource) *cache.ListWatch {
|
||||
return &cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
// APIResource.Kind is not used by the dynamic client, so
|
||||
// leave it empty. We want to list this resource in all
|
||||
// namespaces if it's namespace scoped, so leave
|
||||
// APIResource.Namespaced as false is all right.
|
||||
apiResource := unversioned.APIResource{Name: resource.Resource}
|
||||
return client.ParameterCodec(dynamic.VersionedParameterEncoderWithV1Fallback).
|
||||
Resource(&apiResource, api.NamespaceAll).
|
||||
Resource(&apiResource, v1.NamespaceAll).
|
||||
List(&options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
// APIResource.Kind is not used by the dynamic client, so
|
||||
// leave it empty. We want to list this resource in all
|
||||
// namespaces if it's namespace scoped, so leave
|
||||
// APIResource.Namespaced as false is all right.
|
||||
apiResource := unversioned.APIResource{Name: resource.Resource}
|
||||
return client.ParameterCodec(dynamic.VersionedParameterEncoderWithV1Fallback).
|
||||
Resource(&apiResource, api.NamespaceAll).
|
||||
Resource(&apiResource, v1.NamespaceAll).
|
||||
Watch(&options)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
|
||||
_ "k8s.io/kubernetes/pkg/api/install"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/meta/metatypes"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
@@ -231,14 +230,14 @@ func verifyGraphInvariants(scenario string, uidToNode map[types.UID]*node, t *te
|
||||
}
|
||||
|
||||
func createEvent(eventType eventType, selfUID string, owners []string) event {
|
||||
var ownerReferences []api.OwnerReference
|
||||
var ownerReferences []v1.OwnerReference
|
||||
for i := 0; i < len(owners); i++ {
|
||||
ownerReferences = append(ownerReferences, api.OwnerReference{UID: types.UID(owners[i])})
|
||||
ownerReferences = append(ownerReferences, v1.OwnerReference{UID: types.UID(owners[i])})
|
||||
}
|
||||
return event{
|
||||
eventType: eventType,
|
||||
obj: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
obj: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: types.UID(selfUID),
|
||||
OwnerReferences: ownerReferences,
|
||||
},
|
||||
@@ -350,8 +349,8 @@ func TestGCListWatcher(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
lw := gcListWatcher(client, podResource)
|
||||
lw.Watch(api.ListOptions{ResourceVersion: "1"})
|
||||
lw.List(api.ListOptions{ResourceVersion: "1"})
|
||||
lw.Watch(v1.ListOptions{ResourceVersion: "1"})
|
||||
lw.List(v1.ListOptions{ResourceVersion: "1"})
|
||||
if e, a := 2, len(testHandler.actions); e != a {
|
||||
t.Errorf("expect %d requests, got %d", e, a)
|
||||
}
|
||||
|
||||
@@ -24,13 +24,14 @@ package metaonly
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
time "time"
|
||||
|
||||
codec1978 "github.com/ugorji/go/codec"
|
||||
pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
|
||||
pkg2_v1 "k8s.io/kubernetes/pkg/api/v1"
|
||||
pkg3_types "k8s.io/kubernetes/pkg/types"
|
||||
"reflect"
|
||||
"runtime"
|
||||
time "time"
|
||||
)
|
||||
|
||||
const (
|
||||
|
||||
@@ -20,11 +20,11 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
batchinternallisters "k8s.io/kubernetes/pkg/client/listers/batch/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
batchv1listers "k8s.io/kubernetes/pkg/client/listers/batch/v1"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
)
|
||||
@@ -33,7 +33,7 @@ import (
|
||||
// Interface provides constructor for informer and lister for jobs
|
||||
type JobInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() batchinternallisters.JobLister
|
||||
Lister() batchv1listers.JobLister
|
||||
}
|
||||
|
||||
type jobInformer struct {
|
||||
@@ -61,11 +61,11 @@ func (f *jobInformer) Informer() cache.SharedIndexInformer {
|
||||
func NewJobInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return client.Batch().Jobs(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return client.Batch().Jobs(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return client.Batch().Jobs(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return client.Batch().Jobs(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&batch.Job{},
|
||||
@@ -77,7 +77,7 @@ func NewJobInformer(client clientset.Interface, resyncPeriod time.Duration) cach
|
||||
}
|
||||
|
||||
// Lister returns lister for jobInformer
|
||||
func (f *jobInformer) Lister() batchinternallisters.JobLister {
|
||||
func (f *jobInformer) Lister() batchv1listers.JobLister {
|
||||
informer := f.Informer()
|
||||
return batchinternallisters.NewJobLister(informer.GetIndexer())
|
||||
return batchv1listers.NewJobLister(informer.GetIndexer())
|
||||
}
|
||||
|
||||
@@ -21,8 +21,10 @@ import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
coreinternallisters "k8s.io/kubernetes/pkg/client/listers/core/internalversion"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
@@ -45,7 +47,7 @@ func (f *podInformer) Informer() cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
informerType := reflect.TypeOf(&api.Pod{})
|
||||
informerType := reflect.TypeOf(&v1.Pod{})
|
||||
informer, exists := f.informers[informerType]
|
||||
if exists {
|
||||
return informer
|
||||
@@ -81,7 +83,7 @@ func (f *namespaceInformer) Informer() cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
informerType := reflect.TypeOf(&api.Namespace{})
|
||||
informerType := reflect.TypeOf(&v1.Namespace{})
|
||||
informer, exists := f.informers[informerType]
|
||||
if exists {
|
||||
return informer
|
||||
@@ -100,6 +102,42 @@ func (f *namespaceInformer) Lister() *cache.IndexerToNamespaceLister {
|
||||
|
||||
//*****************************************************************************
|
||||
|
||||
// InternalNamespaceInformer is type of SharedIndexInformer which watches and lists all namespaces.
|
||||
// Interface provides constructor for informer and lister for namsespaces
|
||||
type InternalNamespaceInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() coreinternallisters.NamespaceLister
|
||||
}
|
||||
|
||||
type internalNamespaceInformer struct {
|
||||
*sharedInformerFactory
|
||||
}
|
||||
|
||||
// Informer checks whether internalNamespaceInformer exists in sharedInformerFactory and if not, it creates new informer of type
|
||||
// internalNamespaceInformer and connects it to sharedInformerFactory
|
||||
func (f *internalNamespaceInformer) Informer() cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
informerType := reflect.TypeOf(&api.Namespace{})
|
||||
informer, exists := f.informers[informerType]
|
||||
if exists {
|
||||
return informer
|
||||
}
|
||||
informer = NewInternalNamespaceInformer(f.internalclient, f.defaultResync)
|
||||
f.informers[informerType] = informer
|
||||
|
||||
return informer
|
||||
}
|
||||
|
||||
// Lister returns lister for internalNamespaceInformer
|
||||
func (f *internalNamespaceInformer) Lister() coreinternallisters.NamespaceLister {
|
||||
informer := f.Informer()
|
||||
return coreinternallisters.NewNamespaceLister(informer.GetIndexer())
|
||||
}
|
||||
|
||||
//*****************************************************************************
|
||||
|
||||
// NodeInformer is type of SharedIndexInformer which watches and lists all nodes.
|
||||
// Interface provides constructor for informer and lister for nodes
|
||||
type NodeInformer interface {
|
||||
@@ -117,7 +155,7 @@ func (f *nodeInformer) Informer() cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
informerType := reflect.TypeOf(&api.Node{})
|
||||
informerType := reflect.TypeOf(&v1.Node{})
|
||||
informer, exists := f.informers[informerType]
|
||||
if exists {
|
||||
return informer
|
||||
@@ -153,7 +191,7 @@ func (f *pvcInformer) Informer() cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
informerType := reflect.TypeOf(&api.PersistentVolumeClaim{})
|
||||
informerType := reflect.TypeOf(&v1.PersistentVolumeClaim{})
|
||||
informer, exists := f.informers[informerType]
|
||||
if exists {
|
||||
return informer
|
||||
@@ -189,7 +227,7 @@ func (f *pvInformer) Informer() cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
informerType := reflect.TypeOf(&api.PersistentVolume{})
|
||||
informerType := reflect.TypeOf(&v1.PersistentVolume{})
|
||||
informer, exists := f.informers[informerType]
|
||||
if exists {
|
||||
return informer
|
||||
@@ -212,7 +250,7 @@ func (f *pvInformer) Lister() *cache.StoreToPVFetcher {
|
||||
// Interface provides constructor for informer and lister for limit ranges.
|
||||
type LimitRangeInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() coreinternallisters.LimitRangeLister
|
||||
Lister() *cache.StoreToLimitRangeLister
|
||||
}
|
||||
|
||||
type limitRangeInformer struct {
|
||||
@@ -225,7 +263,7 @@ func (f *limitRangeInformer) Informer() cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
informerType := reflect.TypeOf(&api.LimitRange{})
|
||||
informerType := reflect.TypeOf(&v1.LimitRange{})
|
||||
informer, exists := f.informers[informerType]
|
||||
if exists {
|
||||
return informer
|
||||
@@ -237,23 +275,61 @@ func (f *limitRangeInformer) Informer() cache.SharedIndexInformer {
|
||||
}
|
||||
|
||||
// Lister returns lister for limitRangeInformer
|
||||
func (f *limitRangeInformer) Lister() coreinternallisters.LimitRangeLister {
|
||||
func (f *limitRangeInformer) Lister() *cache.StoreToLimitRangeLister {
|
||||
informer := f.Informer()
|
||||
return &cache.StoreToLimitRangeLister{Indexer: informer.GetIndexer()}
|
||||
}
|
||||
|
||||
//*****************************************************************************
|
||||
|
||||
// InternalLimitRangeInformer is type of SharedIndexInformer which watches and lists all limit ranges.
|
||||
// Interface provides constructor for informer and lister for limit ranges.
|
||||
type InternalLimitRangeInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() coreinternallisters.LimitRangeLister
|
||||
}
|
||||
|
||||
type internalLimitRangeInformer struct {
|
||||
*sharedInformerFactory
|
||||
}
|
||||
|
||||
// Informer checks whether pvcInformer exists in sharedInformerFactory and if not, it creates new informer of type
|
||||
// internalLimitRangeInformer and connects it to sharedInformerFactory
|
||||
func (f *internalLimitRangeInformer) Informer() cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
informerType := reflect.TypeOf(&api.LimitRange{})
|
||||
informer, exists := f.informers[informerType]
|
||||
if exists {
|
||||
return informer
|
||||
}
|
||||
informer = NewInternalLimitRangeInformer(f.internalclient, f.defaultResync)
|
||||
f.informers[informerType] = informer
|
||||
|
||||
return informer
|
||||
}
|
||||
|
||||
// Lister returns lister for internalLimitRangeInformer
|
||||
func (f *internalLimitRangeInformer) Lister() coreinternallisters.LimitRangeLister {
|
||||
informer := f.Informer()
|
||||
return coreinternallisters.NewLimitRangeLister(informer.GetIndexer())
|
||||
}
|
||||
|
||||
//*****************************************************************************
|
||||
|
||||
// NewPodInformer returns a SharedIndexInformer that lists and watches all pods
|
||||
func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().Pods(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().Pods(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().Pods(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().Pods(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
&v1.Pod{},
|
||||
resyncPeriod,
|
||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
@@ -265,14 +341,14 @@ func NewPodInformer(client clientset.Interface, resyncPeriod time.Duration) cach
|
||||
func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().Nodes().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().Nodes().Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Node{},
|
||||
&v1.Node{},
|
||||
resyncPeriod,
|
||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
|
||||
|
||||
@@ -283,14 +359,14 @@ func NewNodeInformer(client clientset.Interface, resyncPeriod time.Duration) cac
|
||||
func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().PersistentVolumeClaims(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().PersistentVolumeClaims(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.PersistentVolumeClaim{},
|
||||
&v1.PersistentVolumeClaim{},
|
||||
resyncPeriod,
|
||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
)
|
||||
@@ -302,14 +378,14 @@ func NewPVCInformer(client clientset.Interface, resyncPeriod time.Duration) cach
|
||||
func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().PersistentVolumes().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().PersistentVolumes().Watch(options)
|
||||
},
|
||||
},
|
||||
&api.PersistentVolume{},
|
||||
&v1.PersistentVolume{},
|
||||
resyncPeriod,
|
||||
cache.Indexers{})
|
||||
|
||||
@@ -320,13 +396,35 @@ func NewPVInformer(client clientset.Interface, resyncPeriod time.Duration) cache
|
||||
func NewNamespaceInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().Namespaces().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().Namespaces().Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.Namespace{},
|
||||
resyncPeriod,
|
||||
cache.Indexers{})
|
||||
|
||||
return sharedIndexInformer
|
||||
}
|
||||
|
||||
// NewInternalNamespaceInformer returns a SharedIndexInformer that lists and watches namespaces
|
||||
func NewInternalNamespaceInformer(client internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
internalOptions := api.ListOptions{}
|
||||
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
|
||||
return client.Core().Namespaces().List(internalOptions)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
internalOptions := api.ListOptions{}
|
||||
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
|
||||
return client.Core().Namespaces().Watch(internalOptions)
|
||||
},
|
||||
},
|
||||
&api.Namespace{},
|
||||
resyncPeriod,
|
||||
cache.Indexers{})
|
||||
@@ -338,11 +436,33 @@ func NewNamespaceInformer(client clientset.Interface, resyncPeriod time.Duration
|
||||
func NewLimitRangeInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().LimitRanges(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().LimitRanges(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().LimitRanges(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().LimitRanges(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&v1.LimitRange{},
|
||||
resyncPeriod,
|
||||
cache.Indexers{})
|
||||
|
||||
return sharedIndexInformer
|
||||
}
|
||||
|
||||
// NewInternalLimitRangeInformer returns a SharedIndexInformer that lists and watches all LimitRanges
|
||||
func NewInternalLimitRangeInformer(internalclient internalclientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
internalOptions := api.ListOptions{}
|
||||
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
|
||||
return internalclient.Core().LimitRanges(v1.NamespaceAll).List(internalOptions)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
internalOptions := api.ListOptions{}
|
||||
v1.Convert_v1_ListOptions_To_api_ListOptions(&options, &internalOptions, nil)
|
||||
return internalclient.Core().LimitRanges(v1.NamespaceAll).Watch(internalOptions)
|
||||
},
|
||||
},
|
||||
&api.LimitRange{},
|
||||
@@ -371,7 +491,7 @@ func (f *serviceAccountInformer) Informer() cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
informerType := reflect.TypeOf(&api.ServiceAccount{})
|
||||
informerType := reflect.TypeOf(&v1.ServiceAccount{})
|
||||
informer, exists := f.informers[informerType]
|
||||
if exists {
|
||||
return informer
|
||||
@@ -392,14 +512,14 @@ func (f *serviceAccountInformer) Lister() *cache.StoreToServiceAccountLister {
|
||||
func NewServiceAccountInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
sharedIndexInformer := cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().ServiceAccounts(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return client.Core().ServiceAccounts(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().ServiceAccounts(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return client.Core().ServiceAccounts(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ServiceAccount{},
|
||||
&v1.ServiceAccount{},
|
||||
resyncPeriod,
|
||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})
|
||||
|
||||
|
||||
@@ -20,8 +20,8 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
@@ -49,11 +49,11 @@ func (f *daemonSetInformer) Informer() cache.SharedIndexInformer {
|
||||
}
|
||||
informer = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Extensions().DaemonSets(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Extensions().DaemonSets(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Extensions().DaemonSets(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Extensions().DaemonSets(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&extensions.DaemonSet{},
|
||||
@@ -91,11 +91,11 @@ func (f *deploymentInformer) Informer() cache.SharedIndexInformer {
|
||||
}
|
||||
informer = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Extensions().Deployments(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Extensions().Deployments(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Extensions().Deployments(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Extensions().Deployments(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&extensions.Deployment{},
|
||||
@@ -135,11 +135,11 @@ func (f *replicaSetInformer) Informer() cache.SharedIndexInformer {
|
||||
}
|
||||
informer = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Extensions().ReplicaSets(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Extensions().ReplicaSets(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Extensions().ReplicaSets(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Extensions().ReplicaSets(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&extensions.ReplicaSet{},
|
||||
|
||||
@@ -23,7 +23,8 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
)
|
||||
|
||||
// SharedInformerFactory provides interface which holds unique informers for pods, nodes, namespaces, persistent volume
|
||||
@@ -38,7 +39,9 @@ type SharedInformerFactory interface {
|
||||
|
||||
Pods() PodInformer
|
||||
LimitRanges() LimitRangeInformer
|
||||
InternalLimitRanges() InternalLimitRangeInformer
|
||||
Namespaces() NamespaceInformer
|
||||
InternalNamespaces() InternalNamespaceInformer
|
||||
Nodes() NodeInformer
|
||||
PersistentVolumeClaims() PVCInformer
|
||||
PersistentVolumes() PVInformer
|
||||
@@ -60,6 +63,8 @@ type SharedInformerFactory interface {
|
||||
|
||||
type sharedInformerFactory struct {
|
||||
client clientset.Interface
|
||||
// for admission plugins etc.
|
||||
internalclient internalclientset.Interface
|
||||
lock sync.Mutex
|
||||
defaultResync time.Duration
|
||||
|
||||
@@ -70,9 +75,10 @@ type sharedInformerFactory struct {
|
||||
}
|
||||
|
||||
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory
|
||||
func NewSharedInformerFactory(client clientset.Interface, defaultResync time.Duration) SharedInformerFactory {
|
||||
func NewSharedInformerFactory(client clientset.Interface, internalclient internalclientset.Interface, defaultResync time.Duration) SharedInformerFactory {
|
||||
return &sharedInformerFactory{
|
||||
client: client,
|
||||
internalclient: internalclient,
|
||||
defaultResync: defaultResync,
|
||||
informers: make(map[reflect.Type]cache.SharedIndexInformer),
|
||||
startedInformers: make(map[reflect.Type]bool),
|
||||
@@ -107,6 +113,11 @@ func (f *sharedInformerFactory) Namespaces() NamespaceInformer {
|
||||
return &namespaceInformer{sharedInformerFactory: f}
|
||||
}
|
||||
|
||||
// InternalNamespaces returns a SharedIndexInformer that lists and watches all namespaces
|
||||
func (f *sharedInformerFactory) InternalNamespaces() InternalNamespaceInformer {
|
||||
return &internalNamespaceInformer{sharedInformerFactory: f}
|
||||
}
|
||||
|
||||
// PersistentVolumeClaims returns a SharedIndexInformer that lists and watches all persistent volume claims
|
||||
func (f *sharedInformerFactory) PersistentVolumeClaims() PVCInformer {
|
||||
return &pvcInformer{sharedInformerFactory: f}
|
||||
@@ -156,6 +167,11 @@ func (f *sharedInformerFactory) LimitRanges() LimitRangeInformer {
|
||||
return &limitRangeInformer{sharedInformerFactory: f}
|
||||
}
|
||||
|
||||
// InternalLimitRanges returns a SharedIndexInformer that lists and watches all limit ranges.
|
||||
func (f *sharedInformerFactory) InternalLimitRanges() InternalLimitRangeInformer {
|
||||
return &internalLimitRangeInformer{sharedInformerFactory: f}
|
||||
}
|
||||
|
||||
// StorageClasses returns a SharedIndexInformer that lists and watches all storage classes
|
||||
func (f *sharedInformerFactory) StorageClasses() StorageClassInformer {
|
||||
return &storageClassInformer{sharedInformerFactory: f}
|
||||
|
||||
@@ -22,8 +22,8 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/apis/rbac"
|
||||
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
|
||||
rbacinternal "k8s.io/kubernetes/pkg/apis/rbac"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
)
|
||||
|
||||
@@ -53,20 +53,20 @@ func (f *sharedInformerFactory) ForResource(resource unversioned.GroupResource)
|
||||
case api.Resource("serviceaccounts"):
|
||||
return &genericInformer{resource: resource, informer: f.ServiceAccounts().Informer()}, nil
|
||||
|
||||
case extensions.Resource("daemonsets"):
|
||||
case extensionsinternal.Resource("daemonsets"):
|
||||
return &genericInformer{resource: resource, informer: f.DaemonSets().Informer()}, nil
|
||||
case extensions.Resource("deployments"):
|
||||
case extensionsinternal.Resource("deployments"):
|
||||
return &genericInformer{resource: resource, informer: f.Deployments().Informer()}, nil
|
||||
case extensions.Resource("replicasets"):
|
||||
case extensionsinternal.Resource("replicasets"):
|
||||
return &genericInformer{resource: resource, informer: f.ReplicaSets().Informer()}, nil
|
||||
|
||||
case rbac.Resource("clusterrolebindings"):
|
||||
case rbacinternal.Resource("clusterrolebindings"):
|
||||
return &genericInformer{resource: resource, informer: f.ClusterRoleBindings().Informer()}, nil
|
||||
case rbac.Resource("clusterroles"):
|
||||
case rbacinternal.Resource("clusterroles"):
|
||||
return &genericInformer{resource: resource, informer: f.ClusterRoles().Informer()}, nil
|
||||
case rbac.Resource("rolebindings"):
|
||||
case rbacinternal.Resource("rolebindings"):
|
||||
return &genericInformer{resource: resource, informer: f.RoleBindings().Informer()}, nil
|
||||
case rbac.Resource("roles"):
|
||||
case rbacinternal.Resource("roles"):
|
||||
return &genericInformer{resource: resource, informer: f.Roles().Informer()}, nil
|
||||
|
||||
case batch.Resource("jobs"):
|
||||
|
||||
@@ -19,8 +19,8 @@ package informers
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/rbac"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
rbac "k8s.io/kubernetes/pkg/apis/rbac"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
@@ -46,10 +46,10 @@ func (f *clusterRoleInformer) Informer() cache.SharedIndexInformer {
|
||||
}
|
||||
informer = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Rbac().ClusterRoles().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Rbac().ClusterRoles().Watch(options)
|
||||
},
|
||||
},
|
||||
@@ -86,10 +86,10 @@ func (f *clusterRoleBindingInformer) Informer() cache.SharedIndexInformer {
|
||||
}
|
||||
informer = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Rbac().ClusterRoleBindings().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Rbac().ClusterRoleBindings().Watch(options)
|
||||
},
|
||||
},
|
||||
@@ -126,11 +126,11 @@ func (f *roleInformer) Informer() cache.SharedIndexInformer {
|
||||
}
|
||||
informer = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Rbac().Roles(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Rbac().Roles(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Rbac().Roles(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Rbac().Roles(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&rbac.Role{},
|
||||
@@ -166,11 +166,11 @@ func (f *roleBindingInformer) Informer() cache.SharedIndexInformer {
|
||||
}
|
||||
informer = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Rbac().RoleBindings(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Rbac().RoleBindings(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Rbac().RoleBindings(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Rbac().RoleBindings(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&rbac.RoleBinding{},
|
||||
|
||||
@@ -19,8 +19,8 @@ package informers
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/storage"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/watch"
|
||||
@@ -48,10 +48,10 @@ func (f *storageClassInformer) Informer() cache.SharedIndexInformer {
|
||||
}
|
||||
informer = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return f.client.Storage().StorageClasses().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return f.client.Storage().StorageClasses().Watch(options)
|
||||
},
|
||||
},
|
||||
|
||||
@@ -23,14 +23,14 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
batchinternallisters "k8s.io/kubernetes/pkg/client/listers/batch/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
batchv1listers "k8s.io/kubernetes/pkg/client/listers/batch/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
@@ -60,7 +60,7 @@ type JobController struct {
|
||||
expectations controller.ControllerExpectationsInterface
|
||||
|
||||
// A store of jobs
|
||||
jobLister batchinternallisters.JobLister
|
||||
jobLister batchv1listers.JobLister
|
||||
|
||||
// A store of pods, populated by the podController
|
||||
podStore cache.StoreToPodLister
|
||||
@@ -75,7 +75,7 @@ func NewJobController(podInformer cache.SharedIndexInformer, jobInformer informe
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
|
||||
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("job_controller", kubeClient.Core().RESTClient().GetRateLimiter())
|
||||
@@ -85,11 +85,11 @@ func NewJobController(podInformer cache.SharedIndexInformer, jobInformer informe
|
||||
kubeClient: kubeClient,
|
||||
podControl: controller.RealPodControl{
|
||||
KubeClient: kubeClient,
|
||||
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}),
|
||||
Recorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "job-controller"}),
|
||||
},
|
||||
expectations: controller.NewControllerExpectations(),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "job"),
|
||||
recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "job-controller"}),
|
||||
recorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "job-controller"}),
|
||||
}
|
||||
|
||||
jobInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
@@ -135,7 +135,7 @@ func (jm *JobController) Run(workers int, stopCh <-chan struct{}) {
|
||||
}
|
||||
|
||||
// getPodJob returns the job managing the given pod.
|
||||
func (jm *JobController) getPodJob(pod *api.Pod) *batch.Job {
|
||||
func (jm *JobController) getPodJob(pod *v1.Pod) *batch.Job {
|
||||
jobs, err := jm.jobLister.GetPodJobs(pod)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("No jobs found for pod %v, job controller will avoid syncing", pod.Name)
|
||||
@@ -150,7 +150,7 @@ func (jm *JobController) getPodJob(pod *api.Pod) *batch.Job {
|
||||
|
||||
// When a pod is created, enqueue the controller that manages it and update it's expectations.
|
||||
func (jm *JobController) addPod(obj interface{}) {
|
||||
pod := obj.(*api.Pod)
|
||||
pod := obj.(*v1.Pod)
|
||||
if pod.DeletionTimestamp != nil {
|
||||
// on a restart of the controller controller, it's possible a new pod shows up in a state that
|
||||
// is already pending deletion. Prevent the pod from being a creation observation.
|
||||
@@ -170,10 +170,10 @@ func (jm *JobController) addPod(obj interface{}) {
|
||||
|
||||
// When a pod is updated, figure out what job/s manage it and wake them up.
|
||||
// If the labels of the pod have changed we need to awaken both the old
|
||||
// and new job. old and cur must be *api.Pod types.
|
||||
// and new job. old and cur must be *v1.Pod types.
|
||||
func (jm *JobController) updatePod(old, cur interface{}) {
|
||||
curPod := cur.(*api.Pod)
|
||||
oldPod := old.(*api.Pod)
|
||||
curPod := cur.(*v1.Pod)
|
||||
oldPod := old.(*v1.Pod)
|
||||
if curPod.ResourceVersion == oldPod.ResourceVersion {
|
||||
// Periodic resync will send update events for all known pods.
|
||||
// Two different versions of the same pod will always have different RVs.
|
||||
@@ -201,9 +201,9 @@ func (jm *JobController) updatePod(old, cur interface{}) {
|
||||
}
|
||||
|
||||
// When a pod is deleted, enqueue the job that manages the pod and update its expectations.
|
||||
// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item.
|
||||
// obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
|
||||
func (jm *JobController) deletePod(obj interface{}) {
|
||||
pod, ok := obj.(*api.Pod)
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
|
||||
// When a delete is dropped, the relist will notice a pod in the store not
|
||||
// in the list, leading to the insertion of a tombstone object which contains
|
||||
@@ -215,7 +215,7 @@ func (jm *JobController) deletePod(obj interface{}) {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %+v", obj))
|
||||
return
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*api.Pod)
|
||||
pod, ok = tombstone.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a pod %+v", obj))
|
||||
return
|
||||
@@ -346,7 +346,7 @@ func (jm *JobController) syncJob(key string) error {
|
||||
failed += active
|
||||
active = 0
|
||||
job.Status.Conditions = append(job.Status.Conditions, newCondition(batch.JobFailed, "DeadlineExceeded", "Job was active longer than specified deadline"))
|
||||
jm.recorder.Event(&job, api.EventTypeNormal, "DeadlineExceeded", "Job was active longer than specified deadline")
|
||||
jm.recorder.Event(&job, v1.EventTypeNormal, "DeadlineExceeded", "Job was active longer than specified deadline")
|
||||
} else {
|
||||
if jobNeedsSync && job.DeletionTimestamp == nil {
|
||||
active = jm.manageJob(activePods, succeeded, &job)
|
||||
@@ -371,10 +371,10 @@ func (jm *JobController) syncJob(key string) error {
|
||||
if completions >= *job.Spec.Completions {
|
||||
complete = true
|
||||
if active > 0 {
|
||||
jm.recorder.Event(&job, api.EventTypeWarning, "TooManyActivePods", "Too many active pods running after completion count reached")
|
||||
jm.recorder.Event(&job, v1.EventTypeWarning, "TooManyActivePods", "Too many active pods running after completion count reached")
|
||||
}
|
||||
if completions > *job.Spec.Completions {
|
||||
jm.recorder.Event(&job, api.EventTypeWarning, "TooManySucceededPods", "Too many succeeded pods running after completion count reached")
|
||||
jm.recorder.Event(&job, v1.EventTypeWarning, "TooManySucceededPods", "Too many succeeded pods running after completion count reached")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -413,7 +413,7 @@ func pastActiveDeadline(job *batch.Job) bool {
|
||||
func newCondition(conditionType batch.JobConditionType, reason, message string) batch.JobCondition {
|
||||
return batch.JobCondition{
|
||||
Type: conditionType,
|
||||
Status: api.ConditionTrue,
|
||||
Status: v1.ConditionTrue,
|
||||
LastProbeTime: unversioned.Now(),
|
||||
LastTransitionTime: unversioned.Now(),
|
||||
Reason: reason,
|
||||
@@ -422,16 +422,16 @@ func newCondition(conditionType batch.JobConditionType, reason, message string)
|
||||
}
|
||||
|
||||
// getStatus returns no of succeeded and failed pods running a job
|
||||
func getStatus(pods []*api.Pod) (succeeded, failed int32) {
|
||||
succeeded = int32(filterPods(pods, api.PodSucceeded))
|
||||
failed = int32(filterPods(pods, api.PodFailed))
|
||||
func getStatus(pods []*v1.Pod) (succeeded, failed int32) {
|
||||
succeeded = int32(filterPods(pods, v1.PodSucceeded))
|
||||
failed = int32(filterPods(pods, v1.PodFailed))
|
||||
return
|
||||
}
|
||||
|
||||
// manageJob is the core method responsible for managing the number of running
|
||||
// pods according to what is specified in the job.Spec.
|
||||
// Does NOT modify <activePods>.
|
||||
func (jm *JobController) manageJob(activePods []*api.Pod, succeeded int32, job *batch.Job) int32 {
|
||||
func (jm *JobController) manageJob(activePods []*v1.Pod, succeeded int32, job *batch.Job) int32 {
|
||||
var activeLock sync.Mutex
|
||||
active := int32(len(activePods))
|
||||
parallelism := *job.Spec.Parallelism
|
||||
@@ -523,7 +523,7 @@ func (jm *JobController) updateJobStatus(job *batch.Job) error {
|
||||
}
|
||||
|
||||
// filterPods returns pods based on their phase.
|
||||
func filterPods(pods []*api.Pod, phase api.PodPhase) int {
|
||||
func filterPods(pods []*v1.Pod, phase v1.PodPhase) int {
|
||||
result := 0
|
||||
for i := range pods {
|
||||
if phase == pods[i].Status.Phase {
|
||||
|
||||
@@ -21,13 +21,13 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@@ -41,22 +41,22 @@ var alwaysReady = func() bool { return true }
|
||||
|
||||
func newJob(parallelism, completions int32) *batch.Job {
|
||||
j := &batch.Job{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foobar",
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
},
|
||||
Spec: batch.JobSpec{
|
||||
Selector: &unversioned.LabelSelector{
|
||||
MatchLabels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"foo": "bar",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{Image: "foo/bar"},
|
||||
},
|
||||
},
|
||||
@@ -88,23 +88,23 @@ func getKey(job *batch.Job, t *testing.T) string {
|
||||
}
|
||||
|
||||
func newJobControllerFromClient(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) (*JobController, informers.SharedInformerFactory) {
|
||||
sharedInformers := informers.NewSharedInformerFactory(kubeClient, resyncPeriod())
|
||||
sharedInformers := informers.NewSharedInformerFactory(kubeClient, nil, resyncPeriod())
|
||||
jm := NewJobController(sharedInformers.Pods().Informer(), sharedInformers.Jobs(), kubeClient)
|
||||
|
||||
return jm, sharedInformers
|
||||
}
|
||||
|
||||
// create count pods with the given phase for the given job
|
||||
func newPodList(count int32, status api.PodPhase, job *batch.Job) []api.Pod {
|
||||
pods := []api.Pod{}
|
||||
func newPodList(count int32, status v1.PodPhase, job *batch.Job) []v1.Pod {
|
||||
pods := []v1.Pod{}
|
||||
for i := int32(0); i < count; i++ {
|
||||
newPod := api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
newPod := v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: fmt.Sprintf("pod-%v", rand.String(10)),
|
||||
Labels: job.Spec.Selector.MatchLabels,
|
||||
Namespace: job.Namespace,
|
||||
},
|
||||
Status: api.PodStatus{Phase: status},
|
||||
Status: v1.PodStatus{Phase: status},
|
||||
}
|
||||
pods = append(pods, newPod)
|
||||
}
|
||||
@@ -227,7 +227,7 @@ func TestControllerSyncJob(t *testing.T) {
|
||||
|
||||
for name, tc := range testCases {
|
||||
// job manager setup
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{Err: tc.podControllerError}
|
||||
manager.podControl = &fakePodControl
|
||||
@@ -247,16 +247,16 @@ func TestControllerSyncJob(t *testing.T) {
|
||||
}
|
||||
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job)
|
||||
podIndexer := sharedInformerFactory.Pods().Informer().GetIndexer()
|
||||
for _, pod := range newPodList(tc.pendingPods, api.PodPending, job) {
|
||||
for _, pod := range newPodList(tc.pendingPods, v1.PodPending, job) {
|
||||
podIndexer.Add(&pod)
|
||||
}
|
||||
for _, pod := range newPodList(tc.activePods, api.PodRunning, job) {
|
||||
for _, pod := range newPodList(tc.activePods, v1.PodRunning, job) {
|
||||
podIndexer.Add(&pod)
|
||||
}
|
||||
for _, pod := range newPodList(tc.succeededPods, api.PodSucceeded, job) {
|
||||
for _, pod := range newPodList(tc.succeededPods, v1.PodSucceeded, job) {
|
||||
podIndexer.Add(&pod)
|
||||
}
|
||||
for _, pod := range newPodList(tc.failedPods, api.PodFailed, job) {
|
||||
for _, pod := range newPodList(tc.failedPods, v1.PodFailed, job) {
|
||||
podIndexer.Add(&pod)
|
||||
}
|
||||
|
||||
@@ -331,7 +331,7 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
||||
|
||||
for name, tc := range testCases {
|
||||
// job manager setup
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@@ -350,13 +350,13 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
||||
job.Status.StartTime = &start
|
||||
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job)
|
||||
podIndexer := sharedInformerFactory.Pods().Informer().GetIndexer()
|
||||
for _, pod := range newPodList(tc.activePods, api.PodRunning, job) {
|
||||
for _, pod := range newPodList(tc.activePods, v1.PodRunning, job) {
|
||||
podIndexer.Add(&pod)
|
||||
}
|
||||
for _, pod := range newPodList(tc.succeededPods, api.PodSucceeded, job) {
|
||||
for _, pod := range newPodList(tc.succeededPods, v1.PodSucceeded, job) {
|
||||
podIndexer.Add(&pod)
|
||||
}
|
||||
for _, pod := range newPodList(tc.failedPods, api.PodFailed, job) {
|
||||
for _, pod := range newPodList(tc.failedPods, v1.PodFailed, job) {
|
||||
podIndexer.Add(&pod)
|
||||
}
|
||||
|
||||
@@ -395,7 +395,7 @@ func TestSyncJobPastDeadline(t *testing.T) {
|
||||
|
||||
func getCondition(job *batch.Job, condition batch.JobConditionType) bool {
|
||||
for _, v := range job.Status.Conditions {
|
||||
if v.Type == condition && v.Status == api.ConditionTrue {
|
||||
if v.Type == condition && v.Status == v1.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
@@ -403,7 +403,7 @@ func getCondition(job *batch.Job, condition batch.JobConditionType) bool {
|
||||
}
|
||||
|
||||
func TestSyncPastDeadlineJobFinished(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@@ -438,7 +438,7 @@ func TestSyncPastDeadlineJobFinished(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncJobComplete(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@@ -463,7 +463,7 @@ func TestSyncJobComplete(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncJobDeleted(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager, _ := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@@ -484,7 +484,7 @@ func TestSyncJobDeleted(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncJobUpdateRequeue(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@@ -510,38 +510,38 @@ func TestSyncJobUpdateRequeue(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestJobPodLookup(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
manager.jobStoreSynced = alwaysReady
|
||||
testCases := []struct {
|
||||
job *batch.Job
|
||||
pod *api.Pod
|
||||
pod *v1.Pod
|
||||
|
||||
expectedName string
|
||||
}{
|
||||
// pods without labels don't match any job
|
||||
{
|
||||
job: &batch.Job{
|
||||
ObjectMeta: api.ObjectMeta{Name: "basic"},
|
||||
ObjectMeta: v1.ObjectMeta{Name: "basic"},
|
||||
},
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo1", Namespace: api.NamespaceAll},
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo1", Namespace: v1.NamespaceAll},
|
||||
},
|
||||
expectedName: "",
|
||||
},
|
||||
// matching labels, different namespace
|
||||
{
|
||||
job: &batch.Job{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo"},
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo"},
|
||||
Spec: batch.JobSpec{
|
||||
Selector: &unversioned.LabelSelector{
|
||||
MatchLabels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo2",
|
||||
Namespace: "ns",
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
@@ -552,7 +552,7 @@ func TestJobPodLookup(t *testing.T) {
|
||||
// matching ns and labels returns
|
||||
{
|
||||
job: &batch.Job{
|
||||
ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"},
|
||||
ObjectMeta: v1.ObjectMeta{Name: "bar", Namespace: "ns"},
|
||||
Spec: batch.JobSpec{
|
||||
Selector: &unversioned.LabelSelector{
|
||||
MatchExpressions: []unversioned.LabelSelectorRequirement{
|
||||
@@ -565,8 +565,8 @@ func TestJobPodLookup(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo3",
|
||||
Namespace: "ns",
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
@@ -601,7 +601,7 @@ func (fe FakeJobExpectations) SatisfiedExpectations(controllerKey string) bool {
|
||||
// TestSyncJobExpectations tests that a pod cannot sneak in between counting active pods
|
||||
// and checking expectations.
|
||||
func TestSyncJobExpectations(t *testing.T) {
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
clientset := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager, sharedInformerFactory := newJobControllerFromClient(clientset, controller.NoResyncPeriodFunc)
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@@ -611,7 +611,7 @@ func TestSyncJobExpectations(t *testing.T) {
|
||||
|
||||
job := newJob(2, 2)
|
||||
sharedInformerFactory.Jobs().Informer().GetIndexer().Add(job)
|
||||
pods := newPodList(2, api.PodPending, job)
|
||||
pods := newPodList(2, v1.PodPending, job)
|
||||
podIndexer := sharedInformerFactory.Pods().Informer().GetIndexer()
|
||||
podIndexer.Add(&pods[0])
|
||||
|
||||
@@ -656,7 +656,7 @@ func TestWatchJobs(t *testing.T) {
|
||||
t.Errorf("Expected to find job under key %v: %v", key, err)
|
||||
return nil
|
||||
}
|
||||
if !api.Semantic.DeepDerivative(*job, testJob) {
|
||||
if !v1.Semantic.DeepDerivative(*job, testJob) {
|
||||
t.Errorf("Expected %#v, but got %#v", testJob, *job)
|
||||
}
|
||||
return nil
|
||||
@@ -699,7 +699,7 @@ func TestWatchPods(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Expected to find job under key %v: %v", key, err)
|
||||
}
|
||||
if !api.Semantic.DeepDerivative(job, testJob) {
|
||||
if !v1.Semantic.DeepDerivative(job, testJob) {
|
||||
t.Errorf("\nExpected %#v,\nbut got %#v", testJob, job)
|
||||
close(received)
|
||||
return nil
|
||||
@@ -714,9 +714,9 @@ func TestWatchPods(t *testing.T) {
|
||||
go sharedInformerFactory.Pods().Informer().Run(stopCh)
|
||||
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
||||
|
||||
pods := newPodList(1, api.PodRunning, testJob)
|
||||
pods := newPodList(1, v1.PodRunning, testJob)
|
||||
testPod := pods[0]
|
||||
testPod.Status.Phase = api.PodFailed
|
||||
testPod.Status.Phase = v1.PodFailed
|
||||
fakeWatch.Add(&testPod)
|
||||
|
||||
t.Log("Waiting for pod to reach syncHandler")
|
||||
|
||||
@@ -17,13 +17,13 @@ limitations under the License.
|
||||
package job
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
)
|
||||
|
||||
func IsJobFinished(j *batch.Job) bool {
|
||||
for _, c := range j.Status.Conditions {
|
||||
if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == api.ConditionTrue {
|
||||
if (c.Type == batch.JobComplete || c.Type == batch.JobFailed) && c.Status == v1.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,8 +19,8 @@ package job
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/batch"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
batch "k8s.io/kubernetes/pkg/apis/batch/v1"
|
||||
)
|
||||
|
||||
func TestIsJobFinished(t *testing.T) {
|
||||
@@ -28,7 +28,7 @@ func TestIsJobFinished(t *testing.T) {
|
||||
Status: batch.JobStatus{
|
||||
Conditions: []batch.JobCondition{{
|
||||
Type: batch.JobComplete,
|
||||
Status: api.ConditionTrue,
|
||||
Status: v1.ConditionTrue,
|
||||
}},
|
||||
},
|
||||
}
|
||||
@@ -37,12 +37,12 @@ func TestIsJobFinished(t *testing.T) {
|
||||
t.Error("Job was expected to be finished")
|
||||
}
|
||||
|
||||
job.Status.Conditions[0].Status = api.ConditionFalse
|
||||
job.Status.Conditions[0].Status = v1.ConditionFalse
|
||||
if IsJobFinished(job) {
|
||||
t.Error("Job was not expected to be finished")
|
||||
}
|
||||
|
||||
job.Status.Conditions[0].Status = api.ConditionUnknown
|
||||
job.Status.Conditions[0].Status = v1.ConditionUnknown
|
||||
if IsJobFinished(job) {
|
||||
t.Error("Job was not expected to be finished")
|
||||
}
|
||||
|
||||
@@ -19,10 +19,10 @@ package namespace
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
@@ -52,7 +52,7 @@ type NamespaceController struct {
|
||||
// opCache is a cache to remember if a particular operation is not supported to aid dynamic client.
|
||||
opCache *operationNotSupportedCache
|
||||
// finalizerToken is the finalizer token managed by this controller
|
||||
finalizerToken api.FinalizerName
|
||||
finalizerToken v1.FinalizerName
|
||||
}
|
||||
|
||||
// NewNamespaceController creates a new NamespaceController
|
||||
@@ -61,7 +61,7 @@ func NewNamespaceController(
|
||||
clientPool dynamic.ClientPool,
|
||||
groupVersionResourcesFn func() ([]unversioned.GroupVersionResource, error),
|
||||
resyncPeriod time.Duration,
|
||||
finalizerToken api.FinalizerName) *NamespaceController {
|
||||
finalizerToken v1.FinalizerName) *NamespaceController {
|
||||
|
||||
// the namespace deletion code looks at the discovery document to enumerate the set of resources on the server.
|
||||
// it then finds all namespaced resources, and in response to namespace deletion, will call delete on all of them.
|
||||
@@ -98,22 +98,22 @@ func NewNamespaceController(
|
||||
// configure the backing store/controller
|
||||
store, controller := cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return kubeClient.Core().Namespaces().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return kubeClient.Core().Namespaces().Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Namespace{},
|
||||
&v1.Namespace{},
|
||||
resyncPeriod,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
namespace := obj.(*api.Namespace)
|
||||
namespace := obj.(*v1.Namespace)
|
||||
namespaceController.enqueueNamespace(namespace)
|
||||
},
|
||||
UpdateFunc: func(oldObj, newObj interface{}) {
|
||||
namespace := newObj.(*api.Namespace)
|
||||
namespace := newObj.(*v1.Namespace)
|
||||
namespaceController.enqueueNamespace(namespace)
|
||||
},
|
||||
},
|
||||
@@ -125,7 +125,7 @@ func NewNamespaceController(
|
||||
}
|
||||
|
||||
// enqueueNamespace adds an object to the controller work queue
|
||||
// obj could be an *api.Namespace, or a DeletionFinalStateUnknown item.
|
||||
// obj could be an *v1.Namespace, or a DeletionFinalStateUnknown item.
|
||||
func (nm *NamespaceController) enqueueNamespace(obj interface{}) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
@@ -190,7 +190,7 @@ func (nm *NamespaceController) syncNamespaceFromKey(key string) (err error) {
|
||||
nm.queue.Add(key)
|
||||
return err
|
||||
}
|
||||
namespace := obj.(*api.Namespace)
|
||||
namespace := obj.(*v1.Namespace)
|
||||
return syncNamespace(nm.kubeClient, nm.clientPool, nm.opCache, nm.groupVersionResourcesFn, namespace, nm.finalizerToken)
|
||||
}
|
||||
|
||||
|
||||
@@ -28,9 +28,10 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
@@ -39,15 +40,15 @@ import (
|
||||
)
|
||||
|
||||
func TestFinalized(t *testing.T) {
|
||||
testNamespace := &api.Namespace{
|
||||
Spec: api.NamespaceSpec{
|
||||
Finalizers: []api.FinalizerName{"a", "b"},
|
||||
testNamespace := &v1.Namespace{
|
||||
Spec: v1.NamespaceSpec{
|
||||
Finalizers: []v1.FinalizerName{"a", "b"},
|
||||
},
|
||||
}
|
||||
if finalized(testNamespace) {
|
||||
t.Errorf("Unexpected result, namespace is not finalized")
|
||||
}
|
||||
testNamespace.Spec.Finalizers = []api.FinalizerName{}
|
||||
testNamespace.Spec.Finalizers = []v1.FinalizerName{}
|
||||
if !finalized(testNamespace) {
|
||||
t.Errorf("Expected object to be finalized")
|
||||
}
|
||||
@@ -55,16 +56,16 @@ func TestFinalized(t *testing.T) {
|
||||
|
||||
func TestFinalizeNamespaceFunc(t *testing.T) {
|
||||
mockClient := &fake.Clientset{}
|
||||
testNamespace := &api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
testNamespace := &v1.Namespace{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "test",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: api.NamespaceSpec{
|
||||
Finalizers: []api.FinalizerName{"kubernetes", "other"},
|
||||
Spec: v1.NamespaceSpec{
|
||||
Finalizers: []v1.FinalizerName{"kubernetes", "other"},
|
||||
},
|
||||
}
|
||||
finalizeNamespace(mockClient, testNamespace, api.FinalizerKubernetes)
|
||||
finalizeNamespace(mockClient, testNamespace, v1.FinalizerKubernetes)
|
||||
actions := mockClient.Actions()
|
||||
if len(actions) != 1 {
|
||||
t.Errorf("Expected 1 mock client action, but got %v", len(actions))
|
||||
@@ -72,7 +73,7 @@ func TestFinalizeNamespaceFunc(t *testing.T) {
|
||||
if !actions[0].Matches("create", "namespaces") || actions[0].GetSubresource() != "finalize" {
|
||||
t.Errorf("Expected finalize-namespace action %v", actions[0])
|
||||
}
|
||||
finalizers := actions[0].(core.CreateAction).GetObject().(*api.Namespace).Spec.Finalizers
|
||||
finalizers := actions[0].(core.CreateAction).GetObject().(*v1.Namespace).Spec.Finalizers
|
||||
if len(finalizers) != 1 {
|
||||
t.Errorf("There should be a single finalizer remaining")
|
||||
}
|
||||
@@ -84,28 +85,28 @@ func TestFinalizeNamespaceFunc(t *testing.T) {
|
||||
func testSyncNamespaceThatIsTerminating(t *testing.T, versions *unversioned.APIVersions) {
|
||||
now := unversioned.Now()
|
||||
namespaceName := "test"
|
||||
testNamespacePendingFinalize := &api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
testNamespacePendingFinalize := &v1.Namespace{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: namespaceName,
|
||||
ResourceVersion: "1",
|
||||
DeletionTimestamp: &now,
|
||||
},
|
||||
Spec: api.NamespaceSpec{
|
||||
Finalizers: []api.FinalizerName{"kubernetes"},
|
||||
Spec: v1.NamespaceSpec{
|
||||
Finalizers: []v1.FinalizerName{"kubernetes"},
|
||||
},
|
||||
Status: api.NamespaceStatus{
|
||||
Phase: api.NamespaceTerminating,
|
||||
Status: v1.NamespaceStatus{
|
||||
Phase: v1.NamespaceTerminating,
|
||||
},
|
||||
}
|
||||
testNamespaceFinalizeComplete := &api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
testNamespaceFinalizeComplete := &v1.Namespace{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: namespaceName,
|
||||
ResourceVersion: "1",
|
||||
DeletionTimestamp: &now,
|
||||
},
|
||||
Spec: api.NamespaceSpec{},
|
||||
Status: api.NamespaceStatus{
|
||||
Phase: api.NamespaceTerminating,
|
||||
Spec: v1.NamespaceSpec{},
|
||||
Status: v1.NamespaceStatus{
|
||||
Phase: v1.NamespaceTerminating,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -126,7 +127,7 @@ func testSyncNamespaceThatIsTerminating(t *testing.T, versions *unversioned.APIV
|
||||
}
|
||||
|
||||
scenarios := map[string]struct {
|
||||
testNamespace *api.Namespace
|
||||
testNamespace *v1.Namespace
|
||||
kubeClientActionSet sets.String
|
||||
dynamicClientActionSet sets.String
|
||||
gvrError error
|
||||
@@ -172,7 +173,7 @@ func testSyncNamespaceThatIsTerminating(t *testing.T, versions *unversioned.APIV
|
||||
return groupVersionResources, nil
|
||||
}
|
||||
|
||||
err := syncNamespace(mockClient, clientPool, &operationNotSupportedCache{m: make(map[operationKey]bool)}, fn, testInput.testNamespace, api.FinalizerKubernetes)
|
||||
err := syncNamespace(mockClient, clientPool, &operationNotSupportedCache{m: make(map[operationKey]bool)}, fn, testInput.testNamespace, v1.FinalizerKubernetes)
|
||||
if err != nil {
|
||||
t.Errorf("scenario %s - Unexpected error when synching namespace %v", scenario, err)
|
||||
}
|
||||
@@ -202,14 +203,14 @@ func testSyncNamespaceThatIsTerminating(t *testing.T, versions *unversioned.APIV
|
||||
func TestRetryOnConflictError(t *testing.T) {
|
||||
mockClient := &fake.Clientset{}
|
||||
numTries := 0
|
||||
retryOnce := func(kubeClient clientset.Interface, namespace *api.Namespace) (*api.Namespace, error) {
|
||||
retryOnce := func(kubeClient clientset.Interface, namespace *v1.Namespace) (*v1.Namespace, error) {
|
||||
numTries++
|
||||
if numTries <= 1 {
|
||||
return namespace, errors.NewConflict(api.Resource("namespaces"), namespace.Name, fmt.Errorf("ERROR!"))
|
||||
}
|
||||
return namespace, nil
|
||||
}
|
||||
namespace := &api.Namespace{}
|
||||
namespace := &v1.Namespace{}
|
||||
_, err := retryOnConflictError(mockClient, namespace, retryOnce)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error %v", err)
|
||||
@@ -229,22 +230,22 @@ func TestSyncNamespaceThatIsTerminatingV1Beta1(t *testing.T) {
|
||||
|
||||
func TestSyncNamespaceThatIsActive(t *testing.T) {
|
||||
mockClient := &fake.Clientset{}
|
||||
testNamespace := &api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
testNamespace := &v1.Namespace{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "test",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: api.NamespaceSpec{
|
||||
Finalizers: []api.FinalizerName{"kubernetes"},
|
||||
Spec: v1.NamespaceSpec{
|
||||
Finalizers: []v1.FinalizerName{"kubernetes"},
|
||||
},
|
||||
Status: api.NamespaceStatus{
|
||||
Phase: api.NamespaceActive,
|
||||
Status: v1.NamespaceStatus{
|
||||
Phase: v1.NamespaceActive,
|
||||
},
|
||||
}
|
||||
fn := func() ([]unversioned.GroupVersionResource, error) {
|
||||
return testGroupVersionResources(), nil
|
||||
}
|
||||
err := syncNamespace(mockClient, nil, &operationNotSupportedCache{m: make(map[operationKey]bool)}, fn, testNamespace, api.FinalizerKubernetes)
|
||||
err := syncNamespace(mockClient, nil, &operationNotSupportedCache{m: make(map[operationKey]bool)}, fn, testNamespace, v1.FinalizerKubernetes)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error when synching namespace %v", err)
|
||||
}
|
||||
|
||||
@@ -22,11 +22,10 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/typed/dynamic"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
@@ -80,12 +79,12 @@ func (o *operationNotSupportedCache) setNotSupported(key operationKey) {
|
||||
}
|
||||
|
||||
// updateNamespaceFunc is a function that makes an update to a namespace
|
||||
type updateNamespaceFunc func(kubeClient clientset.Interface, namespace *api.Namespace) (*api.Namespace, error)
|
||||
type updateNamespaceFunc func(kubeClient clientset.Interface, namespace *v1.Namespace) (*v1.Namespace, error)
|
||||
|
||||
// retryOnConflictError retries the specified fn if there was a conflict error
|
||||
// it will return an error if the UID for an object changes across retry operations.
|
||||
// TODO RetryOnConflict should be a generic concept in client code
|
||||
func retryOnConflictError(kubeClient clientset.Interface, namespace *api.Namespace, fn updateNamespaceFunc) (result *api.Namespace, err error) {
|
||||
func retryOnConflictError(kubeClient clientset.Interface, namespace *v1.Namespace, fn updateNamespaceFunc) (result *v1.Namespace, err error) {
|
||||
latestNamespace := namespace
|
||||
for {
|
||||
result, err = fn(kubeClient, latestNamespace)
|
||||
@@ -107,32 +106,32 @@ func retryOnConflictError(kubeClient clientset.Interface, namespace *api.Namespa
|
||||
}
|
||||
|
||||
// updateNamespaceStatusFunc will verify that the status of the namespace is correct
|
||||
func updateNamespaceStatusFunc(kubeClient clientset.Interface, namespace *api.Namespace) (*api.Namespace, error) {
|
||||
if namespace.DeletionTimestamp.IsZero() || namespace.Status.Phase == api.NamespaceTerminating {
|
||||
func updateNamespaceStatusFunc(kubeClient clientset.Interface, namespace *v1.Namespace) (*v1.Namespace, error) {
|
||||
if namespace.DeletionTimestamp.IsZero() || namespace.Status.Phase == v1.NamespaceTerminating {
|
||||
return namespace, nil
|
||||
}
|
||||
newNamespace := api.Namespace{}
|
||||
newNamespace := v1.Namespace{}
|
||||
newNamespace.ObjectMeta = namespace.ObjectMeta
|
||||
newNamespace.Status = namespace.Status
|
||||
newNamespace.Status.Phase = api.NamespaceTerminating
|
||||
newNamespace.Status.Phase = v1.NamespaceTerminating
|
||||
return kubeClient.Core().Namespaces().UpdateStatus(&newNamespace)
|
||||
}
|
||||
|
||||
// finalized returns true if the namespace.Spec.Finalizers is an empty list
|
||||
func finalized(namespace *api.Namespace) bool {
|
||||
func finalized(namespace *v1.Namespace) bool {
|
||||
return len(namespace.Spec.Finalizers) == 0
|
||||
}
|
||||
|
||||
// finalizeNamespaceFunc returns a function that knows how to finalize a namespace for specified token.
|
||||
func finalizeNamespaceFunc(finalizerToken api.FinalizerName) updateNamespaceFunc {
|
||||
return func(kubeClient clientset.Interface, namespace *api.Namespace) (*api.Namespace, error) {
|
||||
func finalizeNamespaceFunc(finalizerToken v1.FinalizerName) updateNamespaceFunc {
|
||||
return func(kubeClient clientset.Interface, namespace *v1.Namespace) (*v1.Namespace, error) {
|
||||
return finalizeNamespace(kubeClient, namespace, finalizerToken)
|
||||
}
|
||||
}
|
||||
|
||||
// finalizeNamespace removes the specified finalizerToken and finalizes the namespace
|
||||
func finalizeNamespace(kubeClient clientset.Interface, namespace *api.Namespace, finalizerToken api.FinalizerName) (*api.Namespace, error) {
|
||||
namespaceFinalize := api.Namespace{}
|
||||
func finalizeNamespace(kubeClient clientset.Interface, namespace *v1.Namespace, finalizerToken v1.FinalizerName) (*v1.Namespace, error) {
|
||||
namespaceFinalize := v1.Namespace{}
|
||||
namespaceFinalize.ObjectMeta = namespace.ObjectMeta
|
||||
namespaceFinalize.Spec = namespace.Spec
|
||||
finalizerSet := sets.NewString()
|
||||
@@ -141,9 +140,9 @@ func finalizeNamespace(kubeClient clientset.Interface, namespace *api.Namespace,
|
||||
finalizerSet.Insert(string(namespace.Spec.Finalizers[i]))
|
||||
}
|
||||
}
|
||||
namespaceFinalize.Spec.Finalizers = make([]api.FinalizerName, 0, len(finalizerSet))
|
||||
namespaceFinalize.Spec.Finalizers = make([]v1.FinalizerName, 0, len(finalizerSet))
|
||||
for _, value := range finalizerSet.List() {
|
||||
namespaceFinalize.Spec.Finalizers = append(namespaceFinalize.Spec.Finalizers, api.FinalizerName(value))
|
||||
namespaceFinalize.Spec.Finalizers = append(namespaceFinalize.Spec.Finalizers, v1.FinalizerName(value))
|
||||
}
|
||||
namespace, err := kubeClient.Core().Namespaces().Finalize(&namespaceFinalize)
|
||||
if err != nil {
|
||||
@@ -372,8 +371,8 @@ func syncNamespace(
|
||||
clientPool dynamic.ClientPool,
|
||||
opCache *operationNotSupportedCache,
|
||||
groupVersionResourcesFn func() ([]unversioned.GroupVersionResource, error),
|
||||
namespace *api.Namespace,
|
||||
finalizerToken api.FinalizerName,
|
||||
namespace *v1.Namespace,
|
||||
finalizerToken v1.FinalizerName,
|
||||
) error {
|
||||
if namespace.DeletionTimestamp == nil {
|
||||
return nil
|
||||
@@ -409,10 +408,10 @@ func syncNamespace(
|
||||
|
||||
// if the namespace is already finalized, delete it
|
||||
if finalized(namespace) {
|
||||
var opts *api.DeleteOptions
|
||||
var opts *v1.DeleteOptions
|
||||
uid := namespace.UID
|
||||
if len(uid) > 0 {
|
||||
opts = &api.DeleteOptions{Preconditions: &api.Preconditions{UID: &uid}}
|
||||
opts = &v1.DeleteOptions{Preconditions: &v1.Preconditions{UID: &uid}}
|
||||
}
|
||||
err = kubeClient.Core().Namespaces().Delete(namespace.Name, opts)
|
||||
if err != nil && !errors.IsNotFound(err) {
|
||||
@@ -483,14 +482,14 @@ func estimateGracefulTermination(kubeClient clientset.Interface, groupVersionRes
|
||||
func estimateGracefulTerminationForPods(kubeClient clientset.Interface, ns string) (int64, error) {
|
||||
glog.V(5).Infof("namespace controller - estimateGracefulTerminationForPods - namespace %s", ns)
|
||||
estimate := int64(0)
|
||||
items, err := kubeClient.Core().Pods(ns).List(api.ListOptions{})
|
||||
items, err := kubeClient.Core().Pods(ns).List(v1.ListOptions{})
|
||||
if err != nil {
|
||||
return estimate, err
|
||||
}
|
||||
for i := range items.Items {
|
||||
// filter out terminal pods
|
||||
phase := items.Items[i].Status.Phase
|
||||
if api.PodSucceeded == phase || api.PodFailed == phase {
|
||||
if v1.PodSucceeded == phase || v1.PodFailed == phase {
|
||||
continue
|
||||
}
|
||||
if items.Items[i].Spec.TerminationGracePeriodSeconds != nil {
|
||||
|
||||
@@ -22,9 +22,9 @@ import (
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
apierrors "k8s.io/kubernetes/pkg/api/errors"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
@@ -50,8 +50,8 @@ type nodeAndCIDR struct {
|
||||
|
||||
// CIDRAllocator is an interface implemented by things that know how to allocate/occupy/recycle CIDR for nodes.
|
||||
type CIDRAllocator interface {
|
||||
AllocateOrOccupyCIDR(node *api.Node) error
|
||||
ReleaseCIDR(node *api.Node) error
|
||||
AllocateOrOccupyCIDR(node *v1.Node) error
|
||||
ReleaseCIDR(node *v1.Node) error
|
||||
}
|
||||
|
||||
type rangeAllocator struct {
|
||||
@@ -72,9 +72,9 @@ type rangeAllocator struct {
|
||||
// Caller must ensure subNetMaskSize is not less than cluster CIDR mask size.
|
||||
// Caller must always pass in a list of existing nodes so the new allocator
|
||||
// can initialize its CIDR map. NodeList is only nil in testing.
|
||||
func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *api.NodeList) (CIDRAllocator, error) {
|
||||
func NewCIDRRangeAllocator(client clientset.Interface, clusterCIDR *net.IPNet, serviceCIDR *net.IPNet, subNetMaskSize int, nodeList *v1.NodeList) (CIDRAllocator, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "cidrAllocator"})
|
||||
recorder := eventBroadcaster.NewRecorder(v1.EventSource{Component: "cidrAllocator"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
|
||||
ra := &rangeAllocator{
|
||||
@@ -145,7 +145,7 @@ func (r *rangeAllocator) removeNodeFromProcessing(nodeName string) {
|
||||
r.nodesInProcessing.Delete(nodeName)
|
||||
}
|
||||
|
||||
func (r *rangeAllocator) occupyCIDR(node *api.Node) error {
|
||||
func (r *rangeAllocator) occupyCIDR(node *v1.Node) error {
|
||||
defer r.removeNodeFromProcessing(node.Name)
|
||||
if node.Spec.PodCIDR == "" {
|
||||
return nil
|
||||
@@ -164,7 +164,7 @@ func (r *rangeAllocator) occupyCIDR(node *api.Node) error {
|
||||
// if it doesn't currently have one or mark the CIDR as used if the node already have one.
|
||||
// WARNING: If you're adding any return calls or defer any more work from this function
|
||||
// you have to handle correctly nodesInProcessing.
|
||||
func (r *rangeAllocator) AllocateOrOccupyCIDR(node *api.Node) error {
|
||||
func (r *rangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error {
|
||||
if node == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -191,7 +191,7 @@ func (r *rangeAllocator) AllocateOrOccupyCIDR(node *api.Node) error {
|
||||
}
|
||||
|
||||
// ReleaseCIDR releases the CIDR of the removed node
|
||||
func (r *rangeAllocator) ReleaseCIDR(node *api.Node) error {
|
||||
func (r *rangeAllocator) ReleaseCIDR(node *v1.Node) error {
|
||||
if node == nil || node.Spec.PodCIDR == "" {
|
||||
return nil
|
||||
}
|
||||
@@ -225,7 +225,7 @@ func (r *rangeAllocator) filterOutServiceRange(serviceCIDR *net.IPNet) {
|
||||
// Assigns CIDR to Node and sends an update to the API server.
|
||||
func (r *rangeAllocator) updateCIDRAllocation(data nodeAndCIDR) error {
|
||||
var err error
|
||||
var node *api.Node
|
||||
var node *v1.Node
|
||||
defer r.removeNodeFromProcessing(data.nodeName)
|
||||
for rep := 0; rep < podCIDRUpdateRetry; rep++ {
|
||||
// TODO: change it to using PATCH instead of full Node updates.
|
||||
|
||||
@@ -21,8 +21,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
)
|
||||
|
||||
@@ -52,9 +52,9 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
||||
{
|
||||
description: "When there's no ServiceCIDR return first CIDR in range",
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
@@ -72,9 +72,9 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
||||
{
|
||||
description: "Correctly filter out ServiceCIDR",
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
@@ -96,9 +96,9 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
||||
{
|
||||
description: "Correctly ignore already allocated CIDRs",
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
@@ -182,9 +182,9 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
|
||||
{
|
||||
description: "When there's no ServiceCIDR return first CIDR in range",
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
@@ -265,9 +265,9 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
||||
{
|
||||
description: "Correctly release preallocated CIDR",
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
@@ -288,9 +288,9 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
||||
{
|
||||
description: "Correctly recycle CIDR",
|
||||
fakeNodeHandler: &FakeNodeHandler{
|
||||
Existing: []*api.Node{
|
||||
Existing: []*v1.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
},
|
||||
@@ -357,8 +357,8 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, cidrToRelease := range tc.cidrsToRelease {
|
||||
nodeToRelease := api.Node{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
nodeToRelease := v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "node0",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -22,8 +22,9 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
@@ -46,9 +47,9 @@ const (
|
||||
// if any pods were deleted, or were found pending deletion.
|
||||
func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, nodeName, nodeUID string, daemonStore cache.StoreToDaemonSetLister) (bool, error) {
|
||||
remaining := false
|
||||
selector := fields.OneTermEqualSelector(api.PodHostField, nodeName)
|
||||
options := api.ListOptions{FieldSelector: selector}
|
||||
pods, err := kubeClient.Core().Pods(api.NamespaceAll).List(options)
|
||||
selector := fields.OneTermEqualSelector(api.PodHostField, nodeName).String()
|
||||
options := v1.ListOptions{FieldSelector: selector}
|
||||
pods, err := kubeClient.Core().Pods(v1.NamespaceAll).List(options)
|
||||
var updateErrList []error
|
||||
|
||||
if err != nil {
|
||||
@@ -56,7 +57,7 @@ func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, n
|
||||
}
|
||||
|
||||
if len(pods.Items) > 0 {
|
||||
recordNodeEvent(recorder, nodeName, nodeUID, api.EventTypeNormal, "DeletingAllPods", fmt.Sprintf("Deleting all Pods from Node %v.", nodeName))
|
||||
recordNodeEvent(recorder, nodeName, nodeUID, v1.EventTypeNormal, "DeletingAllPods", fmt.Sprintf("Deleting all Pods from Node %v.", nodeName))
|
||||
}
|
||||
|
||||
for _, pod := range pods.Items {
|
||||
@@ -85,7 +86,7 @@ func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, n
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Starting deletion of pod %v", pod.Name)
|
||||
recorder.Eventf(&pod, api.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
|
||||
recorder.Eventf(&pod, v1.EventTypeNormal, "NodeControllerEviction", "Marking for deletion Pod %s from Node %s", pod.Name, nodeName)
|
||||
if err := kubeClient.Core().Pods(pod.Namespace).Delete(pod.Name, nil); err != nil {
|
||||
return false, err
|
||||
}
|
||||
@@ -100,7 +101,7 @@ func deletePods(kubeClient clientset.Interface, recorder record.EventRecorder, n
|
||||
|
||||
// setPodTerminationReason attempts to set a reason and message in the pod status, updates it in the apiserver,
|
||||
// and returns an error if it encounters one.
|
||||
func setPodTerminationReason(kubeClient clientset.Interface, pod *api.Pod, nodeName string) (*api.Pod, error) {
|
||||
func setPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeName string) (*v1.Pod, error) {
|
||||
if pod.Status.Reason == node.NodeUnreachablePodReason {
|
||||
return pod, nil
|
||||
}
|
||||
@@ -108,7 +109,7 @@ func setPodTerminationReason(kubeClient clientset.Interface, pod *api.Pod, nodeN
|
||||
pod.Status.Reason = node.NodeUnreachablePodReason
|
||||
pod.Status.Message = fmt.Sprintf(node.NodeUnreachablePodMessage, nodeName, pod.Name)
|
||||
|
||||
var updatedPod *api.Pod
|
||||
var updatedPod *v1.Pod
|
||||
var err error
|
||||
if updatedPod, err = kubeClient.Core().Pods(pod.Namespace).UpdateStatus(pod); err != nil {
|
||||
return nil, err
|
||||
@@ -116,10 +117,10 @@ func setPodTerminationReason(kubeClient clientset.Interface, pod *api.Pod, nodeN
|
||||
return updatedPod, nil
|
||||
}
|
||||
|
||||
func forcefullyDeletePod(c clientset.Interface, pod *api.Pod) error {
|
||||
func forcefullyDeletePod(c clientset.Interface, pod *v1.Pod) error {
|
||||
var zero int64
|
||||
glog.Infof("NodeController is force deleting Pod: %v:%v", pod.Namespace, pod.Name)
|
||||
err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &api.DeleteOptions{GracePeriodSeconds: &zero})
|
||||
err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &v1.DeleteOptions{GracePeriodSeconds: &zero})
|
||||
if err == nil {
|
||||
glog.V(4).Infof("forceful deletion of %s succeeded", pod.Name)
|
||||
}
|
||||
@@ -138,14 +139,14 @@ func forcefullyDeleteNode(kubeClient clientset.Interface, nodeName string) error
|
||||
// maybeDeleteTerminatingPod non-gracefully deletes pods that are terminating
|
||||
// that should not be gracefully terminated.
|
||||
func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
|
||||
pod, ok := obj.(*api.Pod)
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Errorf("Couldn't get object from tombstone %#v", obj)
|
||||
return
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*api.Pod)
|
||||
pod, ok = tombstone.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("Tombstone contained object that is not a Pod %#v", obj)
|
||||
return
|
||||
@@ -176,7 +177,7 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
|
||||
// TODO(mikedanese): this can be removed when we no longer
|
||||
// guarantee backwards compatibility of master API to kubelets with
|
||||
// versions less than 1.1.0
|
||||
node := nodeObj.(*api.Node)
|
||||
node := nodeObj.(*v1.Node)
|
||||
v, err := version.Parse(node.Status.NodeInfo.KubeletVersion)
|
||||
if err != nil {
|
||||
glog.V(0).Infof("couldn't parse verions %q of minion: %v", node.Status.NodeInfo.KubeletVersion, err)
|
||||
@@ -191,7 +192,7 @@ func (nc *NodeController) maybeDeleteTerminatingPod(obj interface{}) {
|
||||
|
||||
// update ready status of all pods running on given node from master
|
||||
// return true if success
|
||||
func markAllPodsNotReady(kubeClient clientset.Interface, node *api.Node) error {
|
||||
func markAllPodsNotReady(kubeClient clientset.Interface, node *v1.Node) error {
|
||||
// Don't set pods to NotReady if the kubelet is running a version that
|
||||
// doesn't understand how to correct readiness.
|
||||
// TODO: Remove this check when we no longer guarantee backward compatibility
|
||||
@@ -201,8 +202,8 @@ func markAllPodsNotReady(kubeClient clientset.Interface, node *api.Node) error {
|
||||
}
|
||||
nodeName := node.Name
|
||||
glog.V(2).Infof("Update ready status of pods on node [%v]", nodeName)
|
||||
opts := api.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName)}
|
||||
pods, err := kubeClient.Core().Pods(api.NamespaceAll).List(opts)
|
||||
opts := v1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.PodHostField, nodeName).String()}
|
||||
pods, err := kubeClient.Core().Pods(v1.NamespaceAll).List(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -215,8 +216,8 @@ func markAllPodsNotReady(kubeClient clientset.Interface, node *api.Node) error {
|
||||
}
|
||||
|
||||
for i, cond := range pod.Status.Conditions {
|
||||
if cond.Type == api.PodReady {
|
||||
pod.Status.Conditions[i].Status = api.ConditionFalse
|
||||
if cond.Type == v1.PodReady {
|
||||
pod.Status.Conditions[i].Status = v1.ConditionFalse
|
||||
glog.V(2).Infof("Updating ready status of pod %v to false", pod.Name)
|
||||
_, err := kubeClient.Core().Pods(pod.Namespace).UpdateStatus(&pod)
|
||||
if err != nil {
|
||||
@@ -237,7 +238,7 @@ func markAllPodsNotReady(kubeClient clientset.Interface, node *api.Node) error {
|
||||
// in the nodeInfo of the given node is "outdated", meaning < 1.2.0.
|
||||
// Older versions were inflexible and modifying pod.Status directly through
|
||||
// the apiserver would result in unexpected outcomes.
|
||||
func nodeRunningOutdatedKubelet(node *api.Node) bool {
|
||||
func nodeRunningOutdatedKubelet(node *v1.Node) bool {
|
||||
v, err := version.Parse(node.Status.NodeInfo.KubeletVersion)
|
||||
if err != nil {
|
||||
glog.Errorf("couldn't parse version %q of node %v", node.Status.NodeInfo.KubeletVersion, err)
|
||||
@@ -265,7 +266,7 @@ func nodeExistsInCloudProvider(cloud cloudprovider.Interface, nodeName types.Nod
|
||||
}
|
||||
|
||||
func recordNodeEvent(recorder record.EventRecorder, nodeName, nodeUID, eventtype, reason, event string) {
|
||||
ref := &api.ObjectReference{
|
||||
ref := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: nodeName,
|
||||
UID: types.UID(nodeUID),
|
||||
@@ -275,8 +276,8 @@ func recordNodeEvent(recorder record.EventRecorder, nodeName, nodeUID, eventtype
|
||||
recorder.Eventf(ref, eventtype, reason, "Node %s event: %s", nodeName, event)
|
||||
}
|
||||
|
||||
func recordNodeStatusChange(recorder record.EventRecorder, node *api.Node, new_status string) {
|
||||
ref := &api.ObjectReference{
|
||||
func recordNodeStatusChange(recorder record.EventRecorder, node *v1.Node, new_status string) {
|
||||
ref := &v1.ObjectReference{
|
||||
Kind: "Node",
|
||||
Name: node.Name,
|
||||
UID: node.UID,
|
||||
@@ -285,5 +286,5 @@ func recordNodeStatusChange(recorder record.EventRecorder, node *api.Node, new_s
|
||||
glog.V(2).Infof("Recording status change %s event message for node %s", new_status, node.Name)
|
||||
// TODO: This requires a transaction, either both node status is updated
|
||||
// and event is recorded or neither should happen, see issue #6055.
|
||||
recorder.Eventf(ref, api.EventTypeNormal, new_status, "Node %s status is now: %s", node.Name, new_status)
|
||||
recorder.Eventf(ref, v1.EventTypeNormal, new_status, "Node %s status is now: %s", node.Name, new_status)
|
||||
}
|
||||
|
||||
@@ -26,9 +26,10 @@ import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
@@ -83,7 +84,7 @@ const (
|
||||
type nodeStatusData struct {
|
||||
probeTimestamp unversioned.Time
|
||||
readyTransitionTimestamp unversioned.Time
|
||||
status api.NodeStatus
|
||||
status v1.NodeStatus
|
||||
}
|
||||
|
||||
type NodeController struct {
|
||||
@@ -91,7 +92,7 @@ type NodeController struct {
|
||||
cloud cloudprovider.Interface
|
||||
clusterCIDR *net.IPNet
|
||||
serviceCIDR *net.IPNet
|
||||
knownNodeSet map[string]*api.Node
|
||||
knownNodeSet map[string]*v1.Node
|
||||
kubeClient clientset.Interface
|
||||
// Method for easy mocking in unittest.
|
||||
lookupIP func(host string) ([]net.IP, error)
|
||||
@@ -140,9 +141,9 @@ type NodeController struct {
|
||||
// allocate/recycle CIDRs for node if allocateNodeCIDRs == true
|
||||
cidrAllocator CIDRAllocator
|
||||
|
||||
forcefullyDeletePod func(*api.Pod) error
|
||||
forcefullyDeletePod func(*v1.Pod) error
|
||||
nodeExistsInCloudProvider func(types.NodeName) (bool, error)
|
||||
computeZoneStateFunc func(nodeConditions []*api.NodeCondition) (int, zoneState)
|
||||
computeZoneStateFunc func(nodeConditions []*v1.NodeCondition) (int, zoneState)
|
||||
enterPartialDisruptionFunc func(nodeNum int) float32
|
||||
enterFullDisruptionFunc func(nodeNum int) float32
|
||||
|
||||
@@ -183,11 +184,11 @@ func NewNodeController(
|
||||
nodeCIDRMaskSize int,
|
||||
allocateNodeCIDRs bool) (*NodeController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "controllermanager"})
|
||||
recorder := eventBroadcaster.NewRecorder(v1.EventSource{Component: "controllermanager"})
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
if kubeClient != nil {
|
||||
glog.V(0).Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
} else {
|
||||
glog.V(0).Infof("No api server defined - no events will be sent to API server.")
|
||||
}
|
||||
@@ -208,7 +209,7 @@ func NewNodeController(
|
||||
|
||||
nc := &NodeController{
|
||||
cloud: cloud,
|
||||
knownNodeSet: make(map[string]*api.Node),
|
||||
knownNodeSet: make(map[string]*v1.Node),
|
||||
kubeClient: kubeClient,
|
||||
recorder: recorder,
|
||||
podEvictionTimeout: podEvictionTimeout,
|
||||
@@ -223,7 +224,7 @@ func NewNodeController(
|
||||
clusterCIDR: clusterCIDR,
|
||||
serviceCIDR: serviceCIDR,
|
||||
allocateNodeCIDRs: allocateNodeCIDRs,
|
||||
forcefullyDeletePod: func(p *api.Pod) error { return forcefullyDeletePod(kubeClient, p) },
|
||||
forcefullyDeletePod: func(p *v1.Pod) error { return forcefullyDeletePod(kubeClient, p) },
|
||||
nodeExistsInCloudProvider: func(nodeName types.NodeName) (bool, error) { return nodeExistsInCloudProvider(cloud, nodeName) },
|
||||
evictionLimiterQPS: evictionLimiterQPS,
|
||||
secondaryEvictionLimiterQPS: secondaryEvictionLimiterQPS,
|
||||
@@ -246,14 +247,14 @@ func NewNodeController(
|
||||
|
||||
nodeEventHandlerFuncs := cache.ResourceEventHandlerFuncs{}
|
||||
if nc.allocateNodeCIDRs {
|
||||
var nodeList *api.NodeList
|
||||
var nodeList *v1.NodeList
|
||||
var err error
|
||||
// We must poll because apiserver might not be up. This error causes
|
||||
// controller manager to restart.
|
||||
if pollErr := wait.Poll(10*time.Second, apiserverStartupGracePeriod, func() (bool, error) {
|
||||
nodeList, err = kubeClient.Core().Nodes().List(api.ListOptions{
|
||||
FieldSelector: fields.Everything(),
|
||||
LabelSelector: labels.Everything(),
|
||||
nodeList, err = kubeClient.Core().Nodes().List(v1.ListOptions{
|
||||
FieldSelector: fields.Everything().String(),
|
||||
LabelSelector: labels.Everything().String(),
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to list all nodes: %v", err)
|
||||
@@ -275,14 +276,14 @@ func NewNodeController(
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
node := obj.(*api.Node)
|
||||
node := obj.(*v1.Node)
|
||||
|
||||
if err := nc.cidrAllocator.AllocateOrOccupyCIDR(node); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Error allocating CIDR: %v", err))
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(_, obj interface{}) {
|
||||
node := obj.(*api.Node)
|
||||
node := obj.(*v1.Node)
|
||||
// If the PodCIDR is not empty we either:
|
||||
// - already processed a Node that already had a CIDR after NC restarted
|
||||
// (cidr is marked as used),
|
||||
@@ -309,7 +310,7 @@ func NewNodeController(
|
||||
return
|
||||
}
|
||||
|
||||
if err := nc.cidrAllocator.AllocateOrOccupyCIDR(nodeCopy.(*api.Node)); err != nil {
|
||||
if err := nc.cidrAllocator.AllocateOrOccupyCIDR(nodeCopy.(*v1.Node)); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Error allocating CIDR: %v", err))
|
||||
}
|
||||
}
|
||||
@@ -321,15 +322,15 @@ func NewNodeController(
|
||||
return
|
||||
}
|
||||
|
||||
node, isNode := obj.(*api.Node)
|
||||
// We can get DeletedFinalStateUnknown instead of *api.Node here and we need to handle that correctly. #34692
|
||||
node, isNode := obj.(*v1.Node)
|
||||
// We can get DeletedFinalStateUnknown instead of *v1.Node here and we need to handle that correctly. #34692
|
||||
if !isNode {
|
||||
deletedState, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Errorf("Received unexpected object: %v", obj)
|
||||
return
|
||||
}
|
||||
node, ok = deletedState.Obj.(*api.Node)
|
||||
node, ok = deletedState.Obj.(*v1.Node)
|
||||
if !ok {
|
||||
glog.Errorf("DeletedFinalStateUnknown contained non-Node object: %v", deletedState.Obj)
|
||||
return
|
||||
@@ -381,7 +382,7 @@ func (nc *NodeController) Run() {
|
||||
} else if !exists {
|
||||
glog.Warningf("Node %v no longer present in nodeStore!", value.Value)
|
||||
} else {
|
||||
node, _ := obj.(*api.Node)
|
||||
node, _ := obj.(*v1.Node)
|
||||
zone := utilnode.GetZoneKey(node)
|
||||
EvictionsNumber.WithLabelValues(zone).Inc()
|
||||
}
|
||||
@@ -410,14 +411,14 @@ func (nc *NodeController) monitorNodeStatus() error {
|
||||
// It is enough to list Nodes from apiserver, since we can tolerate some small
|
||||
// delays comparing to state from etcd and there is eventual consistency anyway.
|
||||
// TODO: We should list them from local cache: nodeStore.
|
||||
nodes, err := nc.kubeClient.Core().Nodes().List(api.ListOptions{ResourceVersion: "0"})
|
||||
nodes, err := nc.kubeClient.Core().Nodes().List(v1.ListOptions{ResourceVersion: "0"})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
added, deleted := nc.checkForNodeAddedDeleted(nodes)
|
||||
for i := range added {
|
||||
glog.V(1).Infof("NodeController observed a new Node: %#v", added[i].Name)
|
||||
recordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), api.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in NodeController", added[i].Name))
|
||||
recordNodeEvent(nc.recorder, added[i].Name, string(added[i].UID), v1.EventTypeNormal, "RegisteredNode", fmt.Sprintf("Registered Node %v in NodeController", added[i].Name))
|
||||
nc.knownNodeSet[added[i].Name] = added[i]
|
||||
// When adding new Nodes we need to check if new zone appeared, and if so add new evictor.
|
||||
zone := utilnode.GetZoneKey(added[i])
|
||||
@@ -434,15 +435,15 @@ func (nc *NodeController) monitorNodeStatus() error {
|
||||
|
||||
for i := range deleted {
|
||||
glog.V(1).Infof("NodeController observed a Node deletion: %v", deleted[i].Name)
|
||||
recordNodeEvent(nc.recorder, deleted[i].Name, string(deleted[i].UID), api.EventTypeNormal, "RemovingNode", fmt.Sprintf("Removing Node %v from NodeController", deleted[i].Name))
|
||||
recordNodeEvent(nc.recorder, deleted[i].Name, string(deleted[i].UID), v1.EventTypeNormal, "RemovingNode", fmt.Sprintf("Removing Node %v from NodeController", deleted[i].Name))
|
||||
delete(nc.knownNodeSet, deleted[i].Name)
|
||||
}
|
||||
|
||||
zoneToNodeConditions := map[string][]*api.NodeCondition{}
|
||||
zoneToNodeConditions := map[string][]*v1.NodeCondition{}
|
||||
for i := range nodes.Items {
|
||||
var gracePeriod time.Duration
|
||||
var observedReadyCondition api.NodeCondition
|
||||
var currentReadyCondition *api.NodeCondition
|
||||
var observedReadyCondition v1.NodeCondition
|
||||
var currentReadyCondition *v1.NodeCondition
|
||||
node := &nodes.Items[i]
|
||||
for rep := 0; rep < nodeStatusUpdateRetry; rep++ {
|
||||
gracePeriod, observedReadyCondition, currentReadyCondition, err = nc.tryUpdateNodeStatus(node)
|
||||
@@ -463,33 +464,33 @@ func (nc *NodeController) monitorNodeStatus() error {
|
||||
continue
|
||||
}
|
||||
// We do not treat a master node as a part of the cluster for network disruption checking.
|
||||
if !system.IsMasterNode(node) {
|
||||
if !system.IsMasterNode(node.Name) {
|
||||
zoneToNodeConditions[utilnode.GetZoneKey(node)] = append(zoneToNodeConditions[utilnode.GetZoneKey(node)], currentReadyCondition)
|
||||
}
|
||||
|
||||
decisionTimestamp := nc.now()
|
||||
if currentReadyCondition != nil {
|
||||
// Check eviction timeout against decisionTimestamp
|
||||
if observedReadyCondition.Status == api.ConditionFalse &&
|
||||
if observedReadyCondition.Status == v1.ConditionFalse &&
|
||||
decisionTimestamp.After(nc.nodeStatusMap[node.Name].readyTransitionTimestamp.Add(nc.podEvictionTimeout)) {
|
||||
if nc.evictPods(node) {
|
||||
glog.V(2).Infof("Evicting pods on node %s: %v is later than %v + %v", node.Name, decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout)
|
||||
}
|
||||
}
|
||||
if observedReadyCondition.Status == api.ConditionUnknown &&
|
||||
if observedReadyCondition.Status == v1.ConditionUnknown &&
|
||||
decisionTimestamp.After(nc.nodeStatusMap[node.Name].probeTimestamp.Add(nc.podEvictionTimeout)) {
|
||||
if nc.evictPods(node) {
|
||||
glog.V(2).Infof("Evicting pods on node %s: %v is later than %v + %v", node.Name, decisionTimestamp, nc.nodeStatusMap[node.Name].readyTransitionTimestamp, nc.podEvictionTimeout-gracePeriod)
|
||||
}
|
||||
}
|
||||
if observedReadyCondition.Status == api.ConditionTrue {
|
||||
if observedReadyCondition.Status == v1.ConditionTrue {
|
||||
if nc.cancelPodEviction(node) {
|
||||
glog.V(2).Infof("Node %s is ready again, cancelled pod eviction", node.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// Report node event.
|
||||
if currentReadyCondition.Status != api.ConditionTrue && observedReadyCondition.Status == api.ConditionTrue {
|
||||
if currentReadyCondition.Status != v1.ConditionTrue && observedReadyCondition.Status == v1.ConditionTrue {
|
||||
recordNodeStatusChange(nc.recorder, node, "NodeNotReady")
|
||||
if err = markAllPodsNotReady(nc.kubeClient, node); err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Unable to mark all pods NotReady on node %v: %v", node.Name, err))
|
||||
@@ -498,7 +499,7 @@ func (nc *NodeController) monitorNodeStatus() error {
|
||||
|
||||
// Check with the cloud provider to see if the node still exists. If it
|
||||
// doesn't, delete the node immediately.
|
||||
if currentReadyCondition.Status != api.ConditionTrue && nc.cloud != nil {
|
||||
if currentReadyCondition.Status != v1.ConditionTrue && nc.cloud != nil {
|
||||
exists, err := nc.nodeExistsInCloudProvider(types.NodeName(node.Name))
|
||||
if err != nil {
|
||||
glog.Errorf("Error determining if node %v exists in cloud: %v", node.Name, err)
|
||||
@@ -506,7 +507,7 @@ func (nc *NodeController) monitorNodeStatus() error {
|
||||
}
|
||||
if !exists {
|
||||
glog.V(2).Infof("Deleting node (no longer present in cloud provider): %s", node.Name)
|
||||
recordNodeEvent(nc.recorder, node.Name, string(node.UID), api.EventTypeNormal, "DeletingNode", fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name))
|
||||
recordNodeEvent(nc.recorder, node.Name, string(node.UID), v1.EventTypeNormal, "DeletingNode", fmt.Sprintf("Deleting Node %v because it's not present according to cloud provider", node.Name))
|
||||
go func(nodeName string) {
|
||||
defer utilruntime.HandleCrash()
|
||||
// Kubelet is not reporting and Cloud Provider says node
|
||||
@@ -526,7 +527,7 @@ func (nc *NodeController) monitorNodeStatus() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nc *NodeController) handleDisruption(zoneToNodeConditions map[string][]*api.NodeCondition, nodes *api.NodeList) {
|
||||
func (nc *NodeController) handleDisruption(zoneToNodeConditions map[string][]*v1.NodeCondition, nodes *v1.NodeList) {
|
||||
newZoneStates := map[string]zoneState{}
|
||||
allAreFullyDisrupted := true
|
||||
for k, v := range zoneToNodeConditions {
|
||||
@@ -627,18 +628,18 @@ func (nc *NodeController) setLimiterInZone(zone string, zoneSize int, state zone
|
||||
|
||||
// For a given node checks its conditions and tries to update it. Returns grace period to which given node
|
||||
// is entitled, state of current and last observed Ready Condition, and an error if it occurred.
|
||||
func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, api.NodeCondition, *api.NodeCondition, error) {
|
||||
func (nc *NodeController) tryUpdateNodeStatus(node *v1.Node) (time.Duration, v1.NodeCondition, *v1.NodeCondition, error) {
|
||||
var err error
|
||||
var gracePeriod time.Duration
|
||||
var observedReadyCondition api.NodeCondition
|
||||
_, currentReadyCondition := api.GetNodeCondition(&node.Status, api.NodeReady)
|
||||
var observedReadyCondition v1.NodeCondition
|
||||
_, currentReadyCondition := v1.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
if currentReadyCondition == nil {
|
||||
// If ready condition is nil, then kubelet (or nodecontroller) never posted node status.
|
||||
// A fake ready condition is created, where LastProbeTime and LastTransitionTime is set
|
||||
// to node.CreationTimestamp to avoid handle the corner case.
|
||||
observedReadyCondition = api.NodeCondition{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionUnknown,
|
||||
observedReadyCondition = v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionUnknown,
|
||||
LastHeartbeatTime: node.CreationTimestamp,
|
||||
LastTransitionTime: node.CreationTimestamp,
|
||||
}
|
||||
@@ -669,11 +670,11 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
|
||||
// - if 'LastProbeTime' have gone back in time its probably an error, currently we ignore it,
|
||||
// - currently only correct Ready State transition outside of Node Controller is marking it ready by Kubelet, we don't check
|
||||
// if that's the case, but it does not seem necessary.
|
||||
var savedCondition *api.NodeCondition
|
||||
var savedCondition *v1.NodeCondition
|
||||
if found {
|
||||
_, savedCondition = api.GetNodeCondition(&savedNodeStatus.status, api.NodeReady)
|
||||
_, savedCondition = v1.GetNodeCondition(&savedNodeStatus.status, v1.NodeReady)
|
||||
}
|
||||
_, observedCondition := api.GetNodeCondition(&node.Status, api.NodeReady)
|
||||
_, observedCondition := v1.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
if !found {
|
||||
glog.Warningf("Missing timestamp for Node %s. Assuming now as a timestamp.", node.Name)
|
||||
savedNodeStatus = nodeStatusData{
|
||||
@@ -725,9 +726,9 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
|
||||
// (regardless of its current value) in the master.
|
||||
if currentReadyCondition == nil {
|
||||
glog.V(2).Infof("node %v is never updated by kubelet", node.Name)
|
||||
node.Status.Conditions = append(node.Status.Conditions, api.NodeCondition{
|
||||
Type: api.NodeReady,
|
||||
Status: api.ConditionUnknown,
|
||||
node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionUnknown,
|
||||
Reason: "NodeStatusNeverUpdated",
|
||||
Message: fmt.Sprintf("Kubelet never posted node status."),
|
||||
LastHeartbeatTime: node.CreationTimestamp,
|
||||
@@ -736,8 +737,8 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
|
||||
} else {
|
||||
glog.V(4).Infof("node %v hasn't been updated for %+v. Last ready condition is: %+v",
|
||||
node.Name, nc.now().Time.Sub(savedNodeStatus.probeTimestamp.Time), observedReadyCondition)
|
||||
if observedReadyCondition.Status != api.ConditionUnknown {
|
||||
currentReadyCondition.Status = api.ConditionUnknown
|
||||
if observedReadyCondition.Status != v1.ConditionUnknown {
|
||||
currentReadyCondition.Status = v1.ConditionUnknown
|
||||
currentReadyCondition.Reason = "NodeStatusUnknown"
|
||||
currentReadyCondition.Message = fmt.Sprintf("Kubelet stopped posting node status.")
|
||||
// LastProbeTime is the last time we heard from kubelet.
|
||||
@@ -749,12 +750,12 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
|
||||
// Like NodeReady condition, NodeOutOfDisk was last set longer ago than gracePeriod, so update
|
||||
// it to Unknown (regardless of its current value) in the master.
|
||||
// TODO(madhusudancs): Refactor this with readyCondition to remove duplicated code.
|
||||
_, oodCondition := api.GetNodeCondition(&node.Status, api.NodeOutOfDisk)
|
||||
_, oodCondition := v1.GetNodeCondition(&node.Status, v1.NodeOutOfDisk)
|
||||
if oodCondition == nil {
|
||||
glog.V(2).Infof("Out of disk condition of node %v is never updated by kubelet", node.Name)
|
||||
node.Status.Conditions = append(node.Status.Conditions, api.NodeCondition{
|
||||
Type: api.NodeOutOfDisk,
|
||||
Status: api.ConditionUnknown,
|
||||
node.Status.Conditions = append(node.Status.Conditions, v1.NodeCondition{
|
||||
Type: v1.NodeOutOfDisk,
|
||||
Status: v1.ConditionUnknown,
|
||||
Reason: "NodeStatusNeverUpdated",
|
||||
Message: fmt.Sprintf("Kubelet never posted node status."),
|
||||
LastHeartbeatTime: node.CreationTimestamp,
|
||||
@@ -763,16 +764,16 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
|
||||
} else {
|
||||
glog.V(4).Infof("node %v hasn't been updated for %+v. Last out of disk condition is: %+v",
|
||||
node.Name, nc.now().Time.Sub(savedNodeStatus.probeTimestamp.Time), oodCondition)
|
||||
if oodCondition.Status != api.ConditionUnknown {
|
||||
oodCondition.Status = api.ConditionUnknown
|
||||
if oodCondition.Status != v1.ConditionUnknown {
|
||||
oodCondition.Status = v1.ConditionUnknown
|
||||
oodCondition.Reason = "NodeStatusUnknown"
|
||||
oodCondition.Message = fmt.Sprintf("Kubelet stopped posting node status.")
|
||||
oodCondition.LastTransitionTime = nc.now()
|
||||
}
|
||||
}
|
||||
|
||||
_, currentCondition := api.GetNodeCondition(&node.Status, api.NodeReady)
|
||||
if !api.Semantic.DeepEqual(currentCondition, &observedReadyCondition) {
|
||||
_, currentCondition := v1.GetNodeCondition(&node.Status, v1.NodeReady)
|
||||
if !v1.Semantic.DeepEqual(currentCondition, &observedReadyCondition) {
|
||||
if _, err = nc.kubeClient.Core().Nodes().UpdateStatus(node); err != nil {
|
||||
glog.Errorf("Error updating node %s: %v", node.Name, err)
|
||||
return gracePeriod, observedReadyCondition, currentReadyCondition, err
|
||||
@@ -790,7 +791,7 @@ func (nc *NodeController) tryUpdateNodeStatus(node *api.Node) (time.Duration, ap
|
||||
return gracePeriod, observedReadyCondition, currentReadyCondition, err
|
||||
}
|
||||
|
||||
func (nc *NodeController) checkForNodeAddedDeleted(nodes *api.NodeList) (added, deleted []*api.Node) {
|
||||
func (nc *NodeController) checkForNodeAddedDeleted(nodes *v1.NodeList) (added, deleted []*v1.Node) {
|
||||
for i := range nodes.Items {
|
||||
if _, has := nc.knownNodeSet[nodes.Items[i].Name]; !has {
|
||||
added = append(added, &nodes.Items[i])
|
||||
@@ -799,7 +800,7 @@ func (nc *NodeController) checkForNodeAddedDeleted(nodes *api.NodeList) (added,
|
||||
// If there's a difference between lengths of known Nodes and observed nodes
|
||||
// we must have removed some Node.
|
||||
if len(nc.knownNodeSet)+len(added) != len(nodes.Items) {
|
||||
knowSetCopy := map[string]*api.Node{}
|
||||
knowSetCopy := map[string]*v1.Node{}
|
||||
for k, v := range nc.knownNodeSet {
|
||||
knowSetCopy[k] = v
|
||||
}
|
||||
@@ -815,7 +816,7 @@ func (nc *NodeController) checkForNodeAddedDeleted(nodes *api.NodeList) (added,
|
||||
|
||||
// cancelPodEviction removes any queued evictions, typically because the node is available again. It
|
||||
// returns true if an eviction was queued.
|
||||
func (nc *NodeController) cancelPodEviction(node *api.Node) bool {
|
||||
func (nc *NodeController) cancelPodEviction(node *v1.Node) bool {
|
||||
zone := utilnode.GetZoneKey(node)
|
||||
nc.evictorLock.Lock()
|
||||
defer nc.evictorLock.Unlock()
|
||||
@@ -829,7 +830,7 @@ func (nc *NodeController) cancelPodEviction(node *api.Node) bool {
|
||||
|
||||
// evictPods queues an eviction for the provided node name, and returns false if the node is already
|
||||
// queued for eviction.
|
||||
func (nc *NodeController) evictPods(node *api.Node) bool {
|
||||
func (nc *NodeController) evictPods(node *v1.Node) bool {
|
||||
nc.evictorLock.Lock()
|
||||
defer nc.evictorLock.Unlock()
|
||||
return nc.zonePodEvictor[utilnode.GetZoneKey(node)].Add(node.Name, string(node.UID))
|
||||
@@ -853,11 +854,11 @@ func (nc *NodeController) ReducedQPSFunc(nodeNum int) float32 {
|
||||
// - fullyDisrupted if there're no Ready Nodes,
|
||||
// - partiallyDisrupted if at least than nc.unhealthyZoneThreshold percent of Nodes are not Ready,
|
||||
// - normal otherwise
|
||||
func (nc *NodeController) ComputeZoneState(nodeReadyConditions []*api.NodeCondition) (int, zoneState) {
|
||||
func (nc *NodeController) ComputeZoneState(nodeReadyConditions []*v1.NodeCondition) (int, zoneState) {
|
||||
readyNodes := 0
|
||||
notReadyNodes := 0
|
||||
for i := range nodeReadyConditions {
|
||||
if nodeReadyConditions[i] != nil && nodeReadyConditions[i].Status == api.ConditionTrue {
|
||||
if nodeReadyConditions[i] != nil && nodeReadyConditions[i].Status == v1.ConditionTrue {
|
||||
readyNodes++
|
||||
} else {
|
||||
notReadyNodes++
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -26,8 +26,9 @@ import (
|
||||
apierrors "k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/clock"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
@@ -43,14 +44,14 @@ type FakeNodeHandler struct {
|
||||
*fake.Clientset
|
||||
|
||||
// Input: Hooks determine if request is valid or not
|
||||
CreateHook func(*FakeNodeHandler, *api.Node) bool
|
||||
Existing []*api.Node
|
||||
CreateHook func(*FakeNodeHandler, *v1.Node) bool
|
||||
Existing []*v1.Node
|
||||
|
||||
// Output
|
||||
CreatedNodes []*api.Node
|
||||
DeletedNodes []*api.Node
|
||||
UpdatedNodes []*api.Node
|
||||
UpdatedNodeStatuses []*api.Node
|
||||
CreatedNodes []*v1.Node
|
||||
DeletedNodes []*v1.Node
|
||||
UpdatedNodes []*v1.Node
|
||||
UpdatedNodeStatuses []*v1.Node
|
||||
RequestCount int
|
||||
|
||||
// Synchronization
|
||||
@@ -59,29 +60,29 @@ type FakeNodeHandler struct {
|
||||
}
|
||||
|
||||
type FakeLegacyHandler struct {
|
||||
unversionedcore.CoreInterface
|
||||
v1core.CoreV1Interface
|
||||
n *FakeNodeHandler
|
||||
}
|
||||
|
||||
func (c *FakeNodeHandler) getUpdatedNodesCopy() []*api.Node {
|
||||
func (c *FakeNodeHandler) getUpdatedNodesCopy() []*v1.Node {
|
||||
c.lock.Lock()
|
||||
defer c.lock.Unlock()
|
||||
updatedNodesCopy := make([]*api.Node, len(c.UpdatedNodes), len(c.UpdatedNodes))
|
||||
updatedNodesCopy := make([]*v1.Node, len(c.UpdatedNodes), len(c.UpdatedNodes))
|
||||
for i, ptr := range c.UpdatedNodes {
|
||||
updatedNodesCopy[i] = ptr
|
||||
}
|
||||
return updatedNodesCopy
|
||||
}
|
||||
|
||||
func (c *FakeNodeHandler) Core() unversionedcore.CoreInterface {
|
||||
func (c *FakeNodeHandler) Core() v1core.CoreV1Interface {
|
||||
return &FakeLegacyHandler{c.Clientset.Core(), c}
|
||||
}
|
||||
|
||||
func (m *FakeLegacyHandler) Nodes() unversionedcore.NodeInterface {
|
||||
func (m *FakeLegacyHandler) Nodes() v1core.NodeInterface {
|
||||
return m.n
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) Create(node *api.Node) (*api.Node, error) {
|
||||
func (m *FakeNodeHandler) Create(node *v1.Node) (*v1.Node, error) {
|
||||
m.lock.Lock()
|
||||
defer func() {
|
||||
m.RequestCount++
|
||||
@@ -101,7 +102,7 @@ func (m *FakeNodeHandler) Create(node *api.Node) (*api.Node, error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) Get(name string) (*api.Node, error) {
|
||||
func (m *FakeNodeHandler) Get(name string) (*v1.Node, error) {
|
||||
m.lock.Lock()
|
||||
defer func() {
|
||||
m.RequestCount++
|
||||
@@ -122,13 +123,13 @@ func (m *FakeNodeHandler) Get(name string) (*api.Node, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) List(opts api.ListOptions) (*api.NodeList, error) {
|
||||
func (m *FakeNodeHandler) List(opts v1.ListOptions) (*v1.NodeList, error) {
|
||||
m.lock.Lock()
|
||||
defer func() {
|
||||
m.RequestCount++
|
||||
m.lock.Unlock()
|
||||
}()
|
||||
var nodes []*api.Node
|
||||
var nodes []*v1.Node
|
||||
for i := 0; i < len(m.UpdatedNodes); i++ {
|
||||
if !contains(m.UpdatedNodes[i], m.DeletedNodes) {
|
||||
nodes = append(nodes, m.UpdatedNodes[i])
|
||||
@@ -144,14 +145,14 @@ func (m *FakeNodeHandler) List(opts api.ListOptions) (*api.NodeList, error) {
|
||||
nodes = append(nodes, m.CreatedNodes[i])
|
||||
}
|
||||
}
|
||||
nodeList := &api.NodeList{}
|
||||
nodeList := &v1.NodeList{}
|
||||
for _, node := range nodes {
|
||||
nodeList.Items = append(nodeList.Items, *node)
|
||||
}
|
||||
return nodeList, nil
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) Delete(id string, opt *api.DeleteOptions) error {
|
||||
func (m *FakeNodeHandler) Delete(id string, opt *v1.DeleteOptions) error {
|
||||
m.lock.Lock()
|
||||
defer func() {
|
||||
m.RequestCount++
|
||||
@@ -164,11 +165,11 @@ func (m *FakeNodeHandler) Delete(id string, opt *api.DeleteOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) DeleteCollection(opt *api.DeleteOptions, listOpts api.ListOptions) error {
|
||||
func (m *FakeNodeHandler) DeleteCollection(opt *v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) Update(node *api.Node) (*api.Node, error) {
|
||||
func (m *FakeNodeHandler) Update(node *v1.Node) (*v1.Node, error) {
|
||||
m.lock.Lock()
|
||||
defer func() {
|
||||
m.RequestCount++
|
||||
@@ -185,7 +186,7 @@ func (m *FakeNodeHandler) Update(node *api.Node) (*api.Node, error) {
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) UpdateStatus(node *api.Node) (*api.Node, error) {
|
||||
func (m *FakeNodeHandler) UpdateStatus(node *v1.Node) (*v1.Node, error) {
|
||||
m.lock.Lock()
|
||||
defer func() {
|
||||
m.RequestCount++
|
||||
@@ -196,23 +197,23 @@ func (m *FakeNodeHandler) UpdateStatus(node *api.Node) (*api.Node, error) {
|
||||
return node, nil
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) PatchStatus(nodeName string, data []byte) (*api.Node, error) {
|
||||
func (m *FakeNodeHandler) PatchStatus(nodeName string, data []byte) (*v1.Node, error) {
|
||||
m.RequestCount++
|
||||
return &api.Node{}, nil
|
||||
return &v1.Node{}, nil
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) Watch(opts api.ListOptions) (watch.Interface, error) {
|
||||
func (m *FakeNodeHandler) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
return watch.NewFake(), nil
|
||||
}
|
||||
|
||||
func (m *FakeNodeHandler) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (*api.Node, error) {
|
||||
func (m *FakeNodeHandler) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (*v1.Node, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// FakeRecorder is used as a fake during testing.
|
||||
type FakeRecorder struct {
|
||||
source api.EventSource
|
||||
events []*api.Event
|
||||
source v1.EventSource
|
||||
events []*v1.Event
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
@@ -228,7 +229,7 @@ func (f *FakeRecorder) PastEventf(obj runtime.Object, timestamp unversioned.Time
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) generateEvent(obj runtime.Object, timestamp unversioned.Time, eventtype, reason, message string) {
|
||||
ref, err := api.GetReference(obj)
|
||||
ref, err := v1.GetReference(obj)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
@@ -240,15 +241,15 @@ func (f *FakeRecorder) generateEvent(obj runtime.Object, timestamp unversioned.T
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FakeRecorder) makeEvent(ref *api.ObjectReference, eventtype, reason, message string) *api.Event {
|
||||
func (f *FakeRecorder) makeEvent(ref *v1.ObjectReference, eventtype, reason, message string) *v1.Event {
|
||||
fmt.Println("make event")
|
||||
t := unversioned.Time{Time: f.clock.Now()}
|
||||
namespace := ref.Namespace
|
||||
if namespace == "" {
|
||||
namespace = api.NamespaceDefault
|
||||
namespace = v1.NamespaceDefault
|
||||
}
|
||||
return &api.Event{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
return &v1.Event{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%v.%x", ref.Name, t.UnixNano()),
|
||||
Namespace: namespace,
|
||||
},
|
||||
@@ -264,41 +265,41 @@ func (f *FakeRecorder) makeEvent(ref *api.ObjectReference, eventtype, reason, me
|
||||
|
||||
func NewFakeRecorder() *FakeRecorder {
|
||||
return &FakeRecorder{
|
||||
source: api.EventSource{Component: "nodeControllerTest"},
|
||||
events: []*api.Event{},
|
||||
source: v1.EventSource{Component: "nodeControllerTest"},
|
||||
events: []*v1.Event{},
|
||||
clock: clock.NewFakeClock(time.Now()),
|
||||
}
|
||||
}
|
||||
|
||||
func newNode(name string) *api.Node {
|
||||
return &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: name},
|
||||
Spec: api.NodeSpec{
|
||||
func newNode(name string) *v1.Node {
|
||||
return &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: name},
|
||||
Spec: v1.NodeSpec{
|
||||
ExternalID: name,
|
||||
},
|
||||
Status: api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
api.ResourceName(api.ResourceCPU): resource.MustParse("10"),
|
||||
api.ResourceName(api.ResourceMemory): resource.MustParse("10G"),
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse("10"),
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("10G"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newPod(name, host string) *api.Pod {
|
||||
pod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func newPod(name, host string) *v1.Pod {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: name,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Spec: v1.PodSpec{
|
||||
NodeName: host,
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
Conditions: []api.PodCondition{
|
||||
Status: v1.PodStatus{
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: api.PodReady,
|
||||
Status: api.ConditionTrue,
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -307,7 +308,7 @@ func newPod(name, host string) *api.Pod {
|
||||
return pod
|
||||
}
|
||||
|
||||
func contains(node *api.Node, nodes []*api.Node) bool {
|
||||
func contains(node *v1.Node, nodes []*v1.Node) bool {
|
||||
for i := 0; i < len(nodes); i++ {
|
||||
if node.Name == nodes[i].Name {
|
||||
return true
|
||||
@@ -318,7 +319,7 @@ func contains(node *api.Node, nodes []*api.Node) bool {
|
||||
|
||||
// Returns list of zones for all Nodes stored in FakeNodeHandler
|
||||
func getZones(nodeHandler *FakeNodeHandler) []string {
|
||||
nodes, _ := nodeHandler.List(api.ListOptions{})
|
||||
nodes, _ := nodeHandler.List(v1.ListOptions{})
|
||||
zones := sets.NewString()
|
||||
for _, node := range nodes.Items {
|
||||
zones.Insert(utilnode.GetZoneKey(&node))
|
||||
|
||||
@@ -22,11 +22,11 @@ import (
|
||||
|
||||
inf "gopkg.in/inf.v0"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
api_pod "k8s.io/kubernetes/pkg/api/pod"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
api_pod "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
@@ -36,34 +36,34 @@ func dec(i int64, exponent int) *inf.Dec {
|
||||
return inf.NewDec(i, inf.Scale(-exponent))
|
||||
}
|
||||
|
||||
func newPVC(name string) api.PersistentVolumeClaim {
|
||||
return api.PersistentVolumeClaim{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func newPVC(name string) v1.PersistentVolumeClaim {
|
||||
return v1.PersistentVolumeClaim{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: api.PersistentVolumeClaimSpec{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI),
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: *resource.NewQuantity(1, resource.BinarySI),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newStatefulSetWithVolumes(replicas int, name string, petMounts []api.VolumeMount, podMounts []api.VolumeMount) *apps.StatefulSet {
|
||||
func newStatefulSetWithVolumes(replicas int, name string, petMounts []v1.VolumeMount, podMounts []v1.VolumeMount) *apps.StatefulSet {
|
||||
mounts := append(petMounts, podMounts...)
|
||||
claims := []api.PersistentVolumeClaim{}
|
||||
claims := []v1.PersistentVolumeClaim{}
|
||||
for _, m := range petMounts {
|
||||
claims = append(claims, newPVC(m.Name))
|
||||
}
|
||||
|
||||
vols := []api.Volume{}
|
||||
vols := []v1.Volume{}
|
||||
for _, m := range podMounts {
|
||||
vols = append(vols, api.Volume{
|
||||
vols = append(vols, v1.Volume{
|
||||
Name: m.Name,
|
||||
VolumeSource: api.VolumeSource{
|
||||
HostPath: &api.HostPathVolumeSource{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
Path: fmt.Sprintf("/tmp/%v", m.Name),
|
||||
},
|
||||
},
|
||||
@@ -75,19 +75,19 @@ func newStatefulSetWithVolumes(replicas int, name string, petMounts []api.Volume
|
||||
Kind: "StatefulSet",
|
||||
APIVersion: "apps/v1beta1",
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
UID: types.UID("test"),
|
||||
},
|
||||
Spec: apps.StatefulSetSpec{
|
||||
Selector: &unversioned.LabelSelector{
|
||||
MatchLabels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Replicas: int32(replicas),
|
||||
Template: api.PodTemplateSpec{
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "nginx",
|
||||
Image: "nginx",
|
||||
@@ -103,16 +103,16 @@ func newStatefulSetWithVolumes(replicas int, name string, petMounts []api.Volume
|
||||
}
|
||||
}
|
||||
|
||||
func runningPod(ns, name string) *api.Pod {
|
||||
p := &api.Pod{Status: api.PodStatus{Phase: api.PodRunning}}
|
||||
func runningPod(ns, name string) *v1.Pod {
|
||||
p := &v1.Pod{Status: v1.PodStatus{Phase: v1.PodRunning}}
|
||||
p.Namespace = ns
|
||||
p.Name = name
|
||||
return p
|
||||
}
|
||||
|
||||
func newPodList(ps *apps.StatefulSet, num int) []*api.Pod {
|
||||
func newPodList(ps *apps.StatefulSet, num int) []*v1.Pod {
|
||||
// knownPods are pods in the system
|
||||
knownPods := []*api.Pod{}
|
||||
knownPods := []*v1.Pod{}
|
||||
for i := 0; i < num; i++ {
|
||||
k, _ := newPCB(fmt.Sprintf("%v", i), ps)
|
||||
knownPods = append(knownPods, k.pod)
|
||||
@@ -121,16 +121,16 @@ func newPodList(ps *apps.StatefulSet, num int) []*api.Pod {
|
||||
}
|
||||
|
||||
func newStatefulSet(replicas int) *apps.StatefulSet {
|
||||
petMounts := []api.VolumeMount{
|
||||
petMounts := []v1.VolumeMount{
|
||||
{Name: "datadir", MountPath: "/tmp/zookeeper"},
|
||||
}
|
||||
podMounts := []api.VolumeMount{
|
||||
podMounts := []v1.VolumeMount{
|
||||
{Name: "home", MountPath: "/home"},
|
||||
}
|
||||
return newStatefulSetWithVolumes(replicas, "foo", petMounts, podMounts)
|
||||
}
|
||||
|
||||
func checkPodForMount(pod *api.Pod, mountName string) error {
|
||||
func checkPodForMount(pod *v1.Pod, mountName string) error {
|
||||
for _, c := range pod.Spec.Containers {
|
||||
for _, v := range c.VolumeMounts {
|
||||
if v.Name == mountName {
|
||||
@@ -144,7 +144,7 @@ func checkPodForMount(pod *api.Pod, mountName string) error {
|
||||
func newFakePetClient() *fakePetClient {
|
||||
return &fakePetClient{
|
||||
pets: []*pcb{},
|
||||
claims: []api.PersistentVolumeClaim{},
|
||||
claims: []v1.PersistentVolumeClaim{},
|
||||
recorder: &record.FakeRecorder{},
|
||||
petHealthChecker: &defaultPetHealthChecker{},
|
||||
}
|
||||
@@ -152,7 +152,7 @@ func newFakePetClient() *fakePetClient {
|
||||
|
||||
type fakePetClient struct {
|
||||
pets []*pcb
|
||||
claims []api.PersistentVolumeClaim
|
||||
claims []v1.PersistentVolumeClaim
|
||||
petsCreated int
|
||||
petsDeleted int
|
||||
claimsCreated int
|
||||
@@ -168,7 +168,7 @@ func (f *fakePetClient) Delete(p *pcb) error {
|
||||
for i, pet := range f.pets {
|
||||
if p.pod.Name == pet.pod.Name {
|
||||
found = true
|
||||
f.recorder.Eventf(pet.parent, api.EventTypeNormal, "SuccessfulDelete", "pod: %v", pet.pod.Name)
|
||||
f.recorder.Eventf(pet.parent, v1.EventTypeNormal, "SuccessfulDelete", "pod: %v", pet.pod.Name)
|
||||
continue
|
||||
}
|
||||
pets = append(pets, f.pets[i])
|
||||
@@ -199,7 +199,7 @@ func (f *fakePetClient) Create(p *pcb) error {
|
||||
return fmt.Errorf("Create failed: pod %v already exists", p.pod.Name)
|
||||
}
|
||||
}
|
||||
f.recorder.Eventf(p.parent, api.EventTypeNormal, "SuccessfulCreate", "pod: %v", p.pod.Name)
|
||||
f.recorder.Eventf(p.parent, v1.EventTypeNormal, "SuccessfulCreate", "pod: %v", p.pod.Name)
|
||||
f.pets = append(f.pets, p)
|
||||
f.petsCreated++
|
||||
return nil
|
||||
@@ -226,8 +226,8 @@ func (f *fakePetClient) Update(expected, wanted *pcb) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *fakePetClient) getPodList() []*api.Pod {
|
||||
p := []*api.Pod{}
|
||||
func (f *fakePetClient) getPodList() []*v1.Pod {
|
||||
p := []*v1.Pod{}
|
||||
for i, pet := range f.pets {
|
||||
if pet.pod == nil {
|
||||
continue
|
||||
@@ -251,10 +251,10 @@ func (f *fakePetClient) setHealthy(index int) error {
|
||||
if len(f.pets) <= index {
|
||||
return fmt.Errorf("Index out of range, len %v index %v", len(f.pets), index)
|
||||
}
|
||||
f.pets[index].pod.Status.Phase = api.PodRunning
|
||||
f.pets[index].pod.Status.Phase = v1.PodRunning
|
||||
f.pets[index].pod.Annotations[StatefulSetInitAnnotation] = "true"
|
||||
f.pets[index].pod.Status.Conditions = []api.PodCondition{
|
||||
{Type: api.PodReady, Status: api.ConditionTrue},
|
||||
f.pets[index].pod.Status.Conditions = []v1.PodCondition{
|
||||
{Type: v1.PodReady, Status: v1.ConditionTrue},
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -262,7 +262,7 @@ func (f *fakePetClient) setHealthy(index int) error {
|
||||
// isHealthy is a convenience wrapper around the default health checker.
|
||||
// The first invocation returns not-healthy, but marks the pet healthy so
|
||||
// subsequent invocations see it as healthy.
|
||||
func (f *fakePetClient) isHealthy(pod *api.Pod) bool {
|
||||
func (f *fakePetClient) isHealthy(pod *v1.Pod) bool {
|
||||
if f.petHealthChecker.isHealthy(pod) {
|
||||
return true
|
||||
}
|
||||
@@ -280,11 +280,11 @@ func (f *fakePetClient) setDeletionTimestamp(index int) error {
|
||||
// SyncPVCs fakes pvc syncing.
|
||||
func (f *fakePetClient) SyncPVCs(pet *pcb) error {
|
||||
v := pet.pvcs
|
||||
updateClaims := map[string]api.PersistentVolumeClaim{}
|
||||
updateClaims := map[string]v1.PersistentVolumeClaim{}
|
||||
for i, update := range v {
|
||||
updateClaims[update.Name] = v[i]
|
||||
}
|
||||
claimList := []api.PersistentVolumeClaim{}
|
||||
claimList := []v1.PersistentVolumeClaim{}
|
||||
for i, existing := range f.claims {
|
||||
if update, ok := updateClaims[existing.Name]; ok {
|
||||
claimList = append(claimList, update)
|
||||
@@ -296,7 +296,7 @@ func (f *fakePetClient) SyncPVCs(pet *pcb) error {
|
||||
for _, remaining := range updateClaims {
|
||||
claimList = append(claimList, remaining)
|
||||
f.claimsCreated++
|
||||
f.recorder.Eventf(pet.parent, api.EventTypeNormal, "SuccessfulCreate", "pvc: %v", remaining.Name)
|
||||
f.recorder.Eventf(pet.parent, v1.EventTypeNormal, "SuccessfulCreate", "pvc: %v", remaining.Name)
|
||||
}
|
||||
f.claims = claimList
|
||||
return nil
|
||||
@@ -309,12 +309,12 @@ func (f *fakePetClient) DeletePVCs(pet *pcb) error {
|
||||
for _, c := range claimsToDelete {
|
||||
deleteClaimNames.Insert(c.Name)
|
||||
}
|
||||
pvcs := []api.PersistentVolumeClaim{}
|
||||
pvcs := []v1.PersistentVolumeClaim{}
|
||||
for i, existing := range f.claims {
|
||||
if deleteClaimNames.Has(existing.Name) {
|
||||
deleteClaimNames.Delete(existing.Name)
|
||||
f.claimsDeleted++
|
||||
f.recorder.Eventf(pet.parent, api.EventTypeNormal, "SuccessfulDelete", "pvc: %v", existing.Name)
|
||||
f.recorder.Eventf(pet.parent, v1.EventTypeNormal, "SuccessfulDelete", "pvc: %v", existing.Name)
|
||||
continue
|
||||
}
|
||||
pvcs = append(pvcs, f.claims[i])
|
||||
|
||||
@@ -23,9 +23,9 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
podapi "k8s.io/kubernetes/pkg/api/pod"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
podapi "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
@@ -41,10 +41,10 @@ type identityMapper interface {
|
||||
// SetIdentity takes an id and assigns the given pet an identity based
|
||||
// on the stateful set spec. The is must be unique amongst members of the
|
||||
// stateful set.
|
||||
SetIdentity(id string, pet *api.Pod)
|
||||
SetIdentity(id string, pet *v1.Pod)
|
||||
|
||||
// Identity returns the identity of the pet.
|
||||
Identity(pod *api.Pod) string
|
||||
Identity(pod *v1.Pod) string
|
||||
}
|
||||
|
||||
func newIdentityMappers(ps *apps.StatefulSet) []identityMapper {
|
||||
@@ -61,19 +61,19 @@ type NetworkIdentityMapper struct {
|
||||
}
|
||||
|
||||
// SetIdentity sets network identity on the pet.
|
||||
func (n *NetworkIdentityMapper) SetIdentity(id string, pet *api.Pod) {
|
||||
func (n *NetworkIdentityMapper) SetIdentity(id string, pet *v1.Pod) {
|
||||
pet.Annotations[podapi.PodHostnameAnnotation] = fmt.Sprintf("%v-%v", n.ps.Name, id)
|
||||
pet.Annotations[podapi.PodSubdomainAnnotation] = n.ps.Spec.ServiceName
|
||||
return
|
||||
}
|
||||
|
||||
// Identity returns the network identity of the pet.
|
||||
func (n *NetworkIdentityMapper) Identity(pet *api.Pod) string {
|
||||
func (n *NetworkIdentityMapper) Identity(pet *v1.Pod) string {
|
||||
return n.String(pet)
|
||||
}
|
||||
|
||||
// String is a string function for the network identity of the pet.
|
||||
func (n *NetworkIdentityMapper) String(pet *api.Pod) string {
|
||||
func (n *NetworkIdentityMapper) String(pet *v1.Pod) string {
|
||||
hostname := pet.Annotations[podapi.PodHostnameAnnotation]
|
||||
subdomain := pet.Annotations[podapi.PodSubdomainAnnotation]
|
||||
return strings.Join([]string{hostname, subdomain, n.ps.Namespace}, ".")
|
||||
@@ -85,13 +85,13 @@ type VolumeIdentityMapper struct {
|
||||
}
|
||||
|
||||
// SetIdentity sets storge identity on the pet.
|
||||
func (v *VolumeIdentityMapper) SetIdentity(id string, pet *api.Pod) {
|
||||
petVolumes := []api.Volume{}
|
||||
func (v *VolumeIdentityMapper) SetIdentity(id string, pet *v1.Pod) {
|
||||
petVolumes := []v1.Volume{}
|
||||
petClaims := v.GetClaims(id)
|
||||
|
||||
// These volumes will all go down with the pod. If a name matches one of
|
||||
// the claims in the stateful set, it gets clobbered.
|
||||
podVolumes := map[string]api.Volume{}
|
||||
podVolumes := map[string]v1.Volume{}
|
||||
for _, podVol := range pet.Spec.Volumes {
|
||||
podVolumes[podVol.Name] = podVol
|
||||
}
|
||||
@@ -105,10 +105,10 @@ func (v *VolumeIdentityMapper) SetIdentity(id string, pet *api.Pod) {
|
||||
// TODO: Validate and reject this.
|
||||
glog.V(4).Infof("Overwriting existing volume source %v", podVol.Name)
|
||||
}
|
||||
newVol := api.Volume{
|
||||
newVol := v1.Volume{
|
||||
Name: name,
|
||||
VolumeSource: api.VolumeSource{
|
||||
PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: claim.Name,
|
||||
// TODO: Use source definition to set this value when we have one.
|
||||
ReadOnly: false,
|
||||
@@ -129,13 +129,13 @@ func (v *VolumeIdentityMapper) SetIdentity(id string, pet *api.Pod) {
|
||||
}
|
||||
|
||||
// Identity returns the storage identity of the pet.
|
||||
func (v *VolumeIdentityMapper) Identity(pet *api.Pod) string {
|
||||
func (v *VolumeIdentityMapper) Identity(pet *v1.Pod) string {
|
||||
// TODO: Make this a hash?
|
||||
return v.String(pet)
|
||||
}
|
||||
|
||||
// String is a string function for the network identity of the pet.
|
||||
func (v *VolumeIdentityMapper) String(pet *api.Pod) string {
|
||||
func (v *VolumeIdentityMapper) String(pet *v1.Pod) string {
|
||||
ids := []string{}
|
||||
petVols := sets.NewString()
|
||||
for _, petVol := range v.ps.Spec.VolumeClaimTemplates {
|
||||
@@ -160,8 +160,8 @@ func (v *VolumeIdentityMapper) String(pet *api.Pod) string {
|
||||
|
||||
// GetClaims returns the volume claims associated with the given id.
|
||||
// The claims belong to the statefulset. The id should be unique within a statefulset.
|
||||
func (v *VolumeIdentityMapper) GetClaims(id string) map[string]api.PersistentVolumeClaim {
|
||||
petClaims := map[string]api.PersistentVolumeClaim{}
|
||||
func (v *VolumeIdentityMapper) GetClaims(id string) map[string]v1.PersistentVolumeClaim {
|
||||
petClaims := map[string]v1.PersistentVolumeClaim{}
|
||||
for _, pvc := range v.ps.Spec.VolumeClaimTemplates {
|
||||
claim := pvc
|
||||
// TODO: Name length checking in validation.
|
||||
@@ -177,12 +177,12 @@ func (v *VolumeIdentityMapper) GetClaims(id string) map[string]api.PersistentVol
|
||||
}
|
||||
|
||||
// GetClaimsForPet returns the pvcs for the given pet.
|
||||
func (v *VolumeIdentityMapper) GetClaimsForPet(pet *api.Pod) []api.PersistentVolumeClaim {
|
||||
func (v *VolumeIdentityMapper) GetClaimsForPet(pet *v1.Pod) []v1.PersistentVolumeClaim {
|
||||
// Strip out the "-(index)" from the pet name and use it to generate
|
||||
// claim names.
|
||||
id := strings.Split(pet.Name, "-")
|
||||
petID := id[len(id)-1]
|
||||
pvcs := []api.PersistentVolumeClaim{}
|
||||
pvcs := []v1.PersistentVolumeClaim{}
|
||||
for _, pvc := range v.GetClaims(petID) {
|
||||
pvcs = append(pvcs, pvc)
|
||||
}
|
||||
@@ -196,25 +196,25 @@ type NameIdentityMapper struct {
|
||||
}
|
||||
|
||||
// SetIdentity sets the pet namespace and name.
|
||||
func (n *NameIdentityMapper) SetIdentity(id string, pet *api.Pod) {
|
||||
func (n *NameIdentityMapper) SetIdentity(id string, pet *v1.Pod) {
|
||||
pet.Name = fmt.Sprintf("%v-%v", n.ps.Name, id)
|
||||
pet.Namespace = n.ps.Namespace
|
||||
return
|
||||
}
|
||||
|
||||
// Identity returns the name identity of the pet.
|
||||
func (n *NameIdentityMapper) Identity(pet *api.Pod) string {
|
||||
func (n *NameIdentityMapper) Identity(pet *v1.Pod) string {
|
||||
return n.String(pet)
|
||||
}
|
||||
|
||||
// String is a string function for the name identity of the pet.
|
||||
func (n *NameIdentityMapper) String(pet *api.Pod) string {
|
||||
func (n *NameIdentityMapper) String(pet *v1.Pod) string {
|
||||
return fmt.Sprintf("%v/%v", pet.Namespace, pet.Name)
|
||||
}
|
||||
|
||||
// identityHash computes a hash of the pet by running all the above identity
|
||||
// mappers.
|
||||
func identityHash(ps *apps.StatefulSet, pet *api.Pod) string {
|
||||
func identityHash(ps *apps.StatefulSet, pet *v1.Pod) string {
|
||||
id := ""
|
||||
for _, idMapper := range newIdentityMappers(ps) {
|
||||
id += idMapper.Identity(pet)
|
||||
@@ -226,7 +226,7 @@ func identityHash(ps *apps.StatefulSet, pet *api.Pod) string {
|
||||
// Note that this is *not* a literal copy, but a copy of the fields that
|
||||
// contribute to the pet's identity. The returned boolean 'needsUpdate' will
|
||||
// be false if the realPet already has the same identity as the expectedPet.
|
||||
func copyPetID(realPet, expectedPet *pcb) (pod api.Pod, needsUpdate bool, err error) {
|
||||
func copyPetID(realPet, expectedPet *pcb) (pod v1.Pod, needsUpdate bool, err error) {
|
||||
if realPet.pod == nil || expectedPet.pod == nil {
|
||||
return pod, false, fmt.Errorf("Need a valid to and from pet for copy")
|
||||
}
|
||||
|
||||
@@ -23,8 +23,8 @@ import (
|
||||
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
api_pod "k8s.io/kubernetes/pkg/api/pod"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
api_pod "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
)
|
||||
|
||||
func TestPetIDName(t *testing.T) {
|
||||
@@ -150,10 +150,10 @@ func TestPetIDReset(t *testing.T) {
|
||||
if identityHash(ps, firstPCB.pod) == identityHash(ps, secondPCB.pod) {
|
||||
t.Fatalf("Failed to generate uniquey identities:\n%+v\n%+v", firstPCB.pod.Spec, secondPCB.pod.Spec)
|
||||
}
|
||||
userAdded := api.Volume{
|
||||
userAdded := v1.Volume{
|
||||
Name: "test",
|
||||
VolumeSource: api.VolumeSource{
|
||||
EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory},
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory},
|
||||
},
|
||||
}
|
||||
firstPCB.pod.Spec.Volumes = append(firstPCB.pod.Spec.Volumes, userAdded)
|
||||
|
||||
@@ -21,8 +21,8 @@ import (
|
||||
"sort"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
)
|
||||
|
||||
@@ -35,7 +35,7 @@ func newPCB(id string, ps *apps.StatefulSet) (*pcb, error) {
|
||||
for _, im := range newIdentityMappers(ps) {
|
||||
im.SetIdentity(id, petPod)
|
||||
}
|
||||
petPVCs := []api.PersistentVolumeClaim{}
|
||||
petPVCs := []v1.PersistentVolumeClaim{}
|
||||
vMapper := &VolumeIdentityMapper{ps}
|
||||
for _, c := range vMapper.GetClaims(id) {
|
||||
petPVCs = append(petPVCs, c)
|
||||
@@ -87,7 +87,7 @@ func (pt *petQueue) empty() bool {
|
||||
}
|
||||
|
||||
// NewPetQueue returns a queue for tracking pets
|
||||
func NewPetQueue(ps *apps.StatefulSet, podList []*api.Pod) *petQueue {
|
||||
func NewPetQueue(ps *apps.StatefulSet, podList []*v1.Pod) *petQueue {
|
||||
pt := petQueue{pets: []*pcb{}, idMapper: &NameIdentityMapper{ps}}
|
||||
// Seed the queue with existing pets. Assume all pets are scheduled for
|
||||
// deletion, enqueuing a pet will "undelete" it. We always want to delete
|
||||
@@ -118,7 +118,7 @@ type statefulSetIterator struct {
|
||||
func (pi *statefulSetIterator) Next() bool {
|
||||
var pet *pcb
|
||||
var err error
|
||||
if pi.petCount < pi.ps.Spec.Replicas {
|
||||
if pi.petCount < *(pi.ps.Spec.Replicas) {
|
||||
pet, err = newPCB(fmt.Sprintf("%d", pi.petCount), pi.ps)
|
||||
if err != nil {
|
||||
pi.errs = append(pi.errs, err)
|
||||
@@ -139,7 +139,7 @@ func (pi *statefulSetIterator) Value() *pcb {
|
||||
|
||||
// NewStatefulSetIterator returns a new iterator. All pods in the given podList
|
||||
// are used to seed the queue of the iterator.
|
||||
func NewStatefulSetIterator(ps *apps.StatefulSet, podList []*api.Pod) *statefulSetIterator {
|
||||
func NewStatefulSetIterator(ps *apps.StatefulSet, podList []*v1.Pod) *statefulSetIterator {
|
||||
pi := &statefulSetIterator{
|
||||
ps: ps,
|
||||
queue: NewPetQueue(ps, podList),
|
||||
@@ -150,7 +150,7 @@ func NewStatefulSetIterator(ps *apps.StatefulSet, podList []*api.Pod) *statefulS
|
||||
}
|
||||
|
||||
// PodsByCreationTimestamp sorts a list of Pods by creation timestamp, using their names as a tie breaker.
|
||||
type PodsByCreationTimestamp []*api.Pod
|
||||
type PodsByCreationTimestamp []*v1.Pod
|
||||
|
||||
func (o PodsByCreationTimestamp) Len() int { return len(o) }
|
||||
func (o PodsByCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
@@ -21,14 +21,14 @@ import (
|
||||
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
func TestPetQueueCreates(t *testing.T) {
|
||||
replicas := 3
|
||||
ps := newStatefulSet(replicas)
|
||||
q := NewPetQueue(ps, []*api.Pod{})
|
||||
q := NewPetQueue(ps, []*v1.Pod{})
|
||||
for i := 0; i < replicas; i++ {
|
||||
pet, _ := newPCB(fmt.Sprintf("%v", i), ps)
|
||||
q.enqueue(pet)
|
||||
@@ -107,7 +107,7 @@ func TestStatefulSetIteratorRelist(t *testing.T) {
|
||||
knownPods := newPodList(ps, 5)
|
||||
for i := range knownPods {
|
||||
knownPods[i].Spec.NodeName = fmt.Sprintf("foo-node-%v", i)
|
||||
knownPods[i].Status.Phase = api.PodRunning
|
||||
knownPods[i].Status.Phase = v1.PodRunning
|
||||
}
|
||||
pi := NewStatefulSetIterator(ps, knownPods)
|
||||
|
||||
@@ -128,7 +128,7 @@ func TestStatefulSetIteratorRelist(t *testing.T) {
|
||||
}
|
||||
|
||||
// Scale to 0 should delete all pods in system
|
||||
ps.Spec.Replicas = 0
|
||||
*(ps.Spec.Replicas) = 0
|
||||
pi = NewStatefulSetIterator(ps, knownPods)
|
||||
i = 0
|
||||
for pi.Next() {
|
||||
@@ -143,7 +143,7 @@ func TestStatefulSetIteratorRelist(t *testing.T) {
|
||||
}
|
||||
|
||||
// Relist with 0 replicas should no-op
|
||||
pi = NewStatefulSetIterator(ps, []*api.Pod{})
|
||||
pi = NewStatefulSetIterator(ps, []*v1.Pod{})
|
||||
if pi.Next() != false {
|
||||
t.Errorf("Unexpected iteration without any replicas or pods in system")
|
||||
}
|
||||
|
||||
@@ -20,10 +20,10 @@ import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
|
||||
@@ -52,9 +52,9 @@ const (
|
||||
// and parent fields to pass it around safely.
|
||||
type pcb struct {
|
||||
// pod is the desired pet pod.
|
||||
pod *api.Pod
|
||||
pod *v1.Pod
|
||||
// pvcs is a list of desired persistent volume claims for the pet pod.
|
||||
pvcs []api.PersistentVolumeClaim
|
||||
pvcs []v1.PersistentVolumeClaim
|
||||
// event is the lifecycle event associated with this update.
|
||||
event petLifeCycleEvent
|
||||
// id is the identity index of this pet.
|
||||
@@ -106,7 +106,7 @@ func (p *petSyncer) Sync(pet *pcb) error {
|
||||
return err
|
||||
}
|
||||
// if pet failed - we need to remove old one because of consistent naming
|
||||
if exists && realPet.pod.Status.Phase == api.PodFailed {
|
||||
if exists && realPet.pod.Status.Phase == v1.PodFailed {
|
||||
glog.V(2).Infof("Deleting evicted pod %v/%v", realPet.pod.Namespace, realPet.pod.Name)
|
||||
if err := p.petClient.Delete(realPet); err != nil {
|
||||
return err
|
||||
@@ -175,7 +175,7 @@ type petClient interface {
|
||||
|
||||
// apiServerPetClient is a statefulset aware Kubernetes client.
|
||||
type apiServerPetClient struct {
|
||||
c internalclientset.Interface
|
||||
c clientset.Interface
|
||||
recorder record.EventRecorder
|
||||
petHealthChecker
|
||||
}
|
||||
@@ -242,12 +242,12 @@ func (p *apiServerPetClient) DeletePVCs(pet *pcb) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *apiServerPetClient) getPVC(pvcName, pvcNamespace string) (*api.PersistentVolumeClaim, error) {
|
||||
func (p *apiServerPetClient) getPVC(pvcName, pvcNamespace string) (*v1.PersistentVolumeClaim, error) {
|
||||
pvc, err := p.c.Core().PersistentVolumeClaims(pvcNamespace).Get(pvcName)
|
||||
return pvc, err
|
||||
}
|
||||
|
||||
func (p *apiServerPetClient) createPVC(pvc *api.PersistentVolumeClaim) error {
|
||||
func (p *apiServerPetClient) createPVC(pvc *v1.PersistentVolumeClaim) error {
|
||||
_, err := p.c.Core().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
|
||||
return err
|
||||
}
|
||||
@@ -280,17 +280,17 @@ func (p *apiServerPetClient) SyncPVCs(pet *pcb) error {
|
||||
// event formats an event for the given runtime object.
|
||||
func (p *apiServerPetClient) event(obj runtime.Object, reason, msg string, err error) {
|
||||
if err != nil {
|
||||
p.recorder.Eventf(obj, api.EventTypeWarning, fmt.Sprintf("Failed%v", reason), fmt.Sprintf("%v, error: %v", msg, err))
|
||||
p.recorder.Eventf(obj, v1.EventTypeWarning, fmt.Sprintf("Failed%v", reason), fmt.Sprintf("%v, error: %v", msg, err))
|
||||
} else {
|
||||
p.recorder.Eventf(obj, api.EventTypeNormal, fmt.Sprintf("Successful%v", reason), msg)
|
||||
p.recorder.Eventf(obj, v1.EventTypeNormal, fmt.Sprintf("Successful%v", reason), msg)
|
||||
}
|
||||
}
|
||||
|
||||
// petHealthChecker is an interface to check pet health. It makes a boolean
|
||||
// decision based on the given pod.
|
||||
type petHealthChecker interface {
|
||||
isHealthy(*api.Pod) bool
|
||||
isDying(*api.Pod) bool
|
||||
isHealthy(*v1.Pod) bool
|
||||
isDying(*v1.Pod) bool
|
||||
}
|
||||
|
||||
// defaultPetHealthChecks does basic health checking.
|
||||
@@ -299,11 +299,11 @@ type defaultPetHealthChecker struct{}
|
||||
|
||||
// isHealthy returns true if the pod is ready & running. If the pod has the
|
||||
// "pod.alpha.kubernetes.io/initialized" annotation set to "false", pod state is ignored.
|
||||
func (d *defaultPetHealthChecker) isHealthy(pod *api.Pod) bool {
|
||||
if pod == nil || pod.Status.Phase != api.PodRunning {
|
||||
func (d *defaultPetHealthChecker) isHealthy(pod *v1.Pod) bool {
|
||||
if pod == nil || pod.Status.Phase != v1.PodRunning {
|
||||
return false
|
||||
}
|
||||
podReady := api.IsPodReady(pod)
|
||||
podReady := v1.IsPodReady(pod)
|
||||
|
||||
// User may have specified a pod readiness override through a debug annotation.
|
||||
initialized, ok := pod.Annotations[StatefulSetInitAnnotation]
|
||||
@@ -321,6 +321,6 @@ func (d *defaultPetHealthChecker) isHealthy(pod *api.Pod) bool {
|
||||
// isDying returns true if the pod has a non-nil deletion timestamp. Since the
|
||||
// timestamp can only decrease, once this method returns true for a given pet, it
|
||||
// will never return false.
|
||||
func (d *defaultPetHealthChecker) isDying(pod *api.Pod) bool {
|
||||
func (d *defaultPetHealthChecker) isDying(pod *v1.Pod) bool {
|
||||
return pod != nil && pod.DeletionTimestamp != nil
|
||||
}
|
||||
|
||||
@@ -22,12 +22,12 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@@ -52,7 +52,7 @@ const (
|
||||
|
||||
// StatefulSetController controls statefulsets.
|
||||
type StatefulSetController struct {
|
||||
kubeClient internalclientset.Interface
|
||||
kubeClient clientset.Interface
|
||||
|
||||
// newSyncer returns an interface capable of syncing a single pet.
|
||||
// Abstracted out for testing.
|
||||
@@ -83,11 +83,11 @@ type StatefulSetController struct {
|
||||
}
|
||||
|
||||
// NewStatefulSetController creates a new statefulset controller.
|
||||
func NewStatefulSetController(podInformer cache.SharedIndexInformer, kubeClient internalclientset.Interface, resyncPeriod time.Duration) *StatefulSetController {
|
||||
func NewStatefulSetController(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod time.Duration) *StatefulSetController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "statefulset"})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(v1.EventSource{Component: "statefulset"})
|
||||
pc := &apiServerPetClient{kubeClient, recorder, &defaultPetHealthChecker{}}
|
||||
|
||||
psc := &StatefulSetController{
|
||||
@@ -112,11 +112,11 @@ func NewStatefulSetController(podInformer cache.SharedIndexInformer, kubeClient
|
||||
|
||||
psc.psStore.Store, psc.psController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return psc.kubeClient.Apps().StatefulSets(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return psc.kubeClient.Apps().StatefulSets(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return psc.kubeClient.Apps().StatefulSets(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return psc.kubeClient.Apps().StatefulSets(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&apps.StatefulSet{},
|
||||
@@ -156,7 +156,7 @@ func (psc *StatefulSetController) Run(workers int, stopCh <-chan struct{}) {
|
||||
|
||||
// addPod adds the statefulset for the pod to the sync queue
|
||||
func (psc *StatefulSetController) addPod(obj interface{}) {
|
||||
pod := obj.(*api.Pod)
|
||||
pod := obj.(*v1.Pod)
|
||||
glog.V(4).Infof("Pod %s created, labels: %+v", pod.Name, pod.Labels)
|
||||
ps := psc.getStatefulSetForPod(pod)
|
||||
if ps == nil {
|
||||
@@ -168,8 +168,8 @@ func (psc *StatefulSetController) addPod(obj interface{}) {
|
||||
// updatePod adds the statefulset for the current and old pods to the sync queue.
|
||||
// If the labels of the pod didn't change, this method enqueues a single statefulset.
|
||||
func (psc *StatefulSetController) updatePod(old, cur interface{}) {
|
||||
curPod := cur.(*api.Pod)
|
||||
oldPod := old.(*api.Pod)
|
||||
curPod := cur.(*v1.Pod)
|
||||
oldPod := old.(*v1.Pod)
|
||||
if curPod.ResourceVersion == oldPod.ResourceVersion {
|
||||
// Periodic resync will send update events for all known pods.
|
||||
// Two different versions of the same pod will always have different RVs.
|
||||
@@ -189,7 +189,7 @@ func (psc *StatefulSetController) updatePod(old, cur interface{}) {
|
||||
|
||||
// deletePod enqueues the statefulset for the pod accounting for deletion tombstones.
|
||||
func (psc *StatefulSetController) deletePod(obj interface{}) {
|
||||
pod, ok := obj.(*api.Pod)
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
|
||||
// When a delete is dropped, the relist will notice a pod in the store not
|
||||
// in the list, leading to the insertion of a tombstone object which contains
|
||||
@@ -201,7 +201,7 @@ func (psc *StatefulSetController) deletePod(obj interface{}) {
|
||||
glog.Errorf("couldn't get object from tombstone %+v", obj)
|
||||
return
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*api.Pod)
|
||||
pod, ok = tombstone.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("tombstone contained object that is not a pod %+v", obj)
|
||||
return
|
||||
@@ -214,18 +214,18 @@ func (psc *StatefulSetController) deletePod(obj interface{}) {
|
||||
}
|
||||
|
||||
// getPodsForStatefulSets returns the pods that match the selectors of the given statefulset.
|
||||
func (psc *StatefulSetController) getPodsForStatefulSet(ps *apps.StatefulSet) ([]*api.Pod, error) {
|
||||
func (psc *StatefulSetController) getPodsForStatefulSet(ps *apps.StatefulSet) ([]*v1.Pod, error) {
|
||||
// TODO: Do we want the statefulset to fight with RCs? check parent statefulset annoation, or name prefix?
|
||||
sel, err := unversioned.LabelSelectorAsSelector(ps.Spec.Selector)
|
||||
if err != nil {
|
||||
return []*api.Pod{}, err
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
pods, err := psc.podStore.Pods(ps.Namespace).List(sel)
|
||||
if err != nil {
|
||||
return []*api.Pod{}, err
|
||||
return []*v1.Pod{}, err
|
||||
}
|
||||
// TODO: Do we need to copy?
|
||||
result := make([]*api.Pod, 0, len(pods))
|
||||
result := make([]*v1.Pod, 0, len(pods))
|
||||
for i := range pods {
|
||||
result = append(result, &(*pods[i]))
|
||||
}
|
||||
@@ -233,7 +233,7 @@ func (psc *StatefulSetController) getPodsForStatefulSet(ps *apps.StatefulSet) ([
|
||||
}
|
||||
|
||||
// getStatefulSetForPod returns the pet set managing the given pod.
|
||||
func (psc *StatefulSetController) getStatefulSetForPod(pod *api.Pod) *apps.StatefulSet {
|
||||
func (psc *StatefulSetController) getStatefulSetForPod(pod *v1.Pod) *apps.StatefulSet {
|
||||
ps, err := psc.psStore.GetPodStatefulSets(pod)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("No StatefulSets found for pod %v, StatefulSet controller will avoid syncing", pod.Name)
|
||||
@@ -320,7 +320,7 @@ func (psc *StatefulSetController) Sync(key string) error {
|
||||
}
|
||||
|
||||
// syncStatefulSet syncs a tuple of (statefulset, pets).
|
||||
func (psc *StatefulSetController) syncStatefulSet(ps *apps.StatefulSet, pets []*api.Pod) (int, error) {
|
||||
func (psc *StatefulSetController) syncStatefulSet(ps *apps.StatefulSet, pets []*v1.Pod) (int, error) {
|
||||
glog.V(2).Infof("Syncing StatefulSet %v/%v with %d pods", ps.Namespace, ps.Name, len(pets))
|
||||
|
||||
it := NewStatefulSetIterator(ps, pets)
|
||||
|
||||
@@ -22,12 +22,12 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
fake_internal "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion/fake"
|
||||
fake_internal "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1/fake"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/errors"
|
||||
)
|
||||
@@ -50,7 +50,7 @@ func checkPets(ps *apps.StatefulSet, creates, deletes int, fc *fakePetClient, t
|
||||
if fc.petsCreated != creates || fc.petsDeleted != deletes {
|
||||
t.Errorf("Found (creates: %d, deletes: %d), expected (creates: %d, deletes: %d)", fc.petsCreated, fc.petsDeleted, creates, deletes)
|
||||
}
|
||||
gotClaims := map[string]api.PersistentVolumeClaim{}
|
||||
gotClaims := map[string]v1.PersistentVolumeClaim{}
|
||||
for _, pvc := range fc.claims {
|
||||
gotClaims[pvc.Name] = pvc
|
||||
}
|
||||
@@ -88,7 +88,7 @@ func scaleStatefulSet(t *testing.T, ps *apps.StatefulSet, psc *StatefulSetContro
|
||||
}
|
||||
|
||||
func saturateStatefulSet(t *testing.T, ps *apps.StatefulSet, psc *StatefulSetController, fc *fakePetClient) {
|
||||
err := scaleStatefulSet(t, ps, psc, fc, int(ps.Spec.Replicas))
|
||||
err := scaleStatefulSet(t, ps, psc, fc, int(*(ps.Spec.Replicas)))
|
||||
if err != nil {
|
||||
t.Errorf("Error scaleStatefulSet: %v", err)
|
||||
}
|
||||
@@ -119,7 +119,7 @@ func TestStatefulSetControllerDeletes(t *testing.T) {
|
||||
|
||||
// Drain
|
||||
errs := []error{}
|
||||
ps.Spec.Replicas = 0
|
||||
*(ps.Spec.Replicas) = 0
|
||||
knownPods := fc.getPodList()
|
||||
for i := replicas - 1; i >= 0; i-- {
|
||||
if len(fc.pets) != i+1 {
|
||||
@@ -143,7 +143,7 @@ func TestStatefulSetControllerRespectsTermination(t *testing.T) {
|
||||
saturateStatefulSet(t, ps, psc, fc)
|
||||
|
||||
fc.setDeletionTimestamp(replicas - 1)
|
||||
ps.Spec.Replicas = 2
|
||||
*(ps.Spec.Replicas) = 2
|
||||
_, err := psc.syncStatefulSet(ps, fc.getPodList())
|
||||
if err != nil {
|
||||
t.Errorf("Error syncing StatefulSet: %v", err)
|
||||
@@ -169,7 +169,7 @@ func TestStatefulSetControllerRespectsOrder(t *testing.T) {
|
||||
saturateStatefulSet(t, ps, psc, fc)
|
||||
|
||||
errs := []error{}
|
||||
ps.Spec.Replicas = 0
|
||||
*(ps.Spec.Replicas) = 0
|
||||
// Shuffle known list and check that pets are deleted in reverse
|
||||
knownPods := fc.getPodList()
|
||||
for i := range knownPods {
|
||||
@@ -285,16 +285,16 @@ type fakeClient struct {
|
||||
statefulsetClient *fakeStatefulSetClient
|
||||
}
|
||||
|
||||
func (c *fakeClient) Apps() internalversion.AppsInterface {
|
||||
return &fakeApps{c, &fake.FakeApps{}}
|
||||
func (c *fakeClient) Apps() v1beta1.AppsV1beta1Interface {
|
||||
return &fakeApps{c, &fake.FakeAppsV1beta1{}}
|
||||
}
|
||||
|
||||
type fakeApps struct {
|
||||
*fakeClient
|
||||
*fake.FakeApps
|
||||
*fake.FakeAppsV1beta1
|
||||
}
|
||||
|
||||
func (c *fakeApps) StatefulSets(namespace string) internalversion.StatefulSetInterface {
|
||||
func (c *fakeApps) StatefulSets(namespace string) v1beta1.StatefulSetInterface {
|
||||
c.statefulsetClient.Namespace = namespace
|
||||
return c.statefulsetClient
|
||||
}
|
||||
|
||||
@@ -20,10 +20,10 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/apps"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
apps "k8s.io/kubernetes/pkg/apis/apps/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
appsclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/internalversion"
|
||||
appsclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/apps/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
|
||||
"github.com/golang/glog"
|
||||
@@ -51,7 +51,7 @@ func updatePetCount(psClient appsclientset.StatefulSetsGetter, ps apps.StatefulS
|
||||
var getErr error
|
||||
for i, ps := 0, &ps; ; i++ {
|
||||
glog.V(4).Infof(fmt.Sprintf("Updating replica count for StatefulSet: %s/%s, ", ps.Namespace, ps.Name) +
|
||||
fmt.Sprintf("replicas %d->%d (need %d), ", ps.Status.Replicas, numPets, ps.Spec.Replicas))
|
||||
fmt.Sprintf("replicas %d->%d (need %d), ", ps.Status.Replicas, numPets, *(ps.Spec.Replicas)))
|
||||
|
||||
ps.Status = apps.StatefulSetStatus{Replicas: int32(numPets)}
|
||||
_, updateErr = psClient.StatefulSets(ps.Namespace).UpdateStatus(ps)
|
||||
@@ -72,7 +72,7 @@ type unhealthyPetTracker struct {
|
||||
}
|
||||
|
||||
// Get returns a previously recorded blocking pet for the given statefulset.
|
||||
func (u *unhealthyPetTracker) Get(ps *apps.StatefulSet, knownPets []*api.Pod) (*pcb, error) {
|
||||
func (u *unhealthyPetTracker) Get(ps *apps.StatefulSet, knownPets []*v1.Pod) (*pcb, error) {
|
||||
u.storeLock.Lock()
|
||||
defer u.storeLock.Unlock()
|
||||
|
||||
|
||||
@@ -21,11 +21,11 @@ import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
@@ -39,10 +39,10 @@ func newPetClient(client *clientset.Clientset) *apiServerPetClient {
|
||||
}
|
||||
|
||||
func makeTwoDifferntPCB() (pcb1, pcb2 *pcb) {
|
||||
userAdded := api.Volume{
|
||||
userAdded := v1.Volume{
|
||||
Name: "test",
|
||||
VolumeSource: api.VolumeSource{
|
||||
EmptyDir: &api.EmptyDirVolumeSource{Medium: api.StorageMediumMemory},
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{Medium: v1.StorageMediumMemory},
|
||||
},
|
||||
}
|
||||
ps := newStatefulSet(2)
|
||||
@@ -88,14 +88,14 @@ func TestUpdatePetWithoutRetry(t *testing.T) {
|
||||
}
|
||||
|
||||
for k, tc := range testCases {
|
||||
body := runtime.EncodeOrDie(testapi.Default.Codec(), &api.Pod{ObjectMeta: api.ObjectMeta{Name: "empty_pod"}})
|
||||
body := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "empty_pod"}})
|
||||
fakeHandler := utiltesting.FakeHandler{
|
||||
StatusCode: 200,
|
||||
ResponseBody: string(body),
|
||||
}
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
petClient := newPetClient(client)
|
||||
err := petClient.Update(tc.realPet, tc.expectedPet)
|
||||
|
||||
@@ -115,7 +115,7 @@ func TestUpdatePetWithFailure(t *testing.T) {
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
petClient := newPetClient(client)
|
||||
|
||||
pcb1, pcb2 := makeTwoDifferntPCB()
|
||||
|
||||
@@ -23,15 +23,15 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
unversionedautoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/autoscaling/internalversion"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion"
|
||||
unversionedautoscaling "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/autoscaling/v1"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilruntime "k8s.io/kubernetes/pkg/util/runtime"
|
||||
@@ -75,11 +75,11 @@ var upscaleForbiddenWindow = 3 * time.Minute
|
||||
func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (cache.Store, *cache.Controller) {
|
||||
return cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return controller.hpaNamespacer.HorizontalPodAutoscalers(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return controller.hpaNamespacer.HorizontalPodAutoscalers(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return controller.hpaNamespacer.HorizontalPodAutoscalers(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return controller.hpaNamespacer.HorizontalPodAutoscalers(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&autoscaling.HorizontalPodAutoscaler{},
|
||||
@@ -90,7 +90,7 @@ func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (
|
||||
hasCPUPolicy := hpa.Spec.TargetCPUUtilizationPercentage != nil
|
||||
_, hasCustomMetricsPolicy := hpa.Annotations[HpaCustomMetricsTargetAnnotationName]
|
||||
if !hasCPUPolicy && !hasCustomMetricsPolicy {
|
||||
controller.eventRecorder.Event(hpa, api.EventTypeNormal, "DefaultPolicy", "No scaling policy specified - will use default one. See documentation for details")
|
||||
controller.eventRecorder.Event(hpa, v1.EventTypeNormal, "DefaultPolicy", "No scaling policy specified - will use default one. See documentation for details")
|
||||
}
|
||||
err := controller.reconcileAutoscaler(hpa)
|
||||
if err != nil {
|
||||
@@ -109,10 +109,10 @@ func newInformer(controller *HorizontalController, resyncPeriod time.Duration) (
|
||||
)
|
||||
}
|
||||
|
||||
func NewHorizontalController(evtNamespacer unversionedcore.EventsGetter, scaleNamespacer unversionedextensions.ScalesGetter, hpaNamespacer unversionedautoscaling.HorizontalPodAutoscalersGetter, replicaCalc *ReplicaCalculator, resyncPeriod time.Duration) *HorizontalController {
|
||||
func NewHorizontalController(evtNamespacer v1core.EventsGetter, scaleNamespacer unversionedextensions.ScalesGetter, hpaNamespacer unversionedautoscaling.HorizontalPodAutoscalersGetter, replicaCalc *ReplicaCalculator, resyncPeriod time.Duration) *HorizontalController {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: evtNamespacer.Events("")})
|
||||
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"})
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: evtNamespacer.Events("")})
|
||||
recorder := broadcaster.NewRecorder(v1.EventSource{Component: "horizontal-pod-autoscaler"})
|
||||
|
||||
controller := &HorizontalController{
|
||||
replicaCalc: replicaCalc,
|
||||
@@ -153,31 +153,31 @@ func (a *HorizontalController) computeReplicasForCPUUtilization(hpa *autoscaling
|
||||
|
||||
if scale.Status.Selector == nil {
|
||||
errMsg := "selector is required"
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "SelectorRequired", errMsg)
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg)
|
||||
return 0, nil, time.Time{}, fmt.Errorf(errMsg)
|
||||
}
|
||||
|
||||
selector, err := unversioned.LabelSelectorAsSelector(scale.Status.Selector)
|
||||
selector, err := unversioned.LabelSelectorAsSelector(&unversioned.LabelSelector{MatchLabels: scale.Status.Selector})
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("couldn't convert selector string to a corresponding selector object: %v", err)
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "InvalidSelector", errMsg)
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg)
|
||||
return 0, nil, time.Time{}, fmt.Errorf(errMsg)
|
||||
}
|
||||
|
||||
desiredReplicas, utilization, timestamp, err := a.replicaCalc.GetResourceReplicas(currentReplicas, targetUtilization, api.ResourceCPU, hpa.Namespace, selector)
|
||||
desiredReplicas, utilization, timestamp, err := a.replicaCalc.GetResourceReplicas(currentReplicas, targetUtilization, v1.ResourceCPU, hpa.Namespace, selector)
|
||||
if err != nil {
|
||||
lastScaleTime := getLastScaleTime(hpa)
|
||||
if time.Now().After(lastScaleTime.Add(upscaleForbiddenWindow)) {
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedGetMetrics", err.Error())
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetMetrics", err.Error())
|
||||
} else {
|
||||
a.eventRecorder.Event(hpa, api.EventTypeNormal, "MetricsNotAvailableYet", err.Error())
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeNormal, "MetricsNotAvailableYet", err.Error())
|
||||
}
|
||||
|
||||
return 0, nil, time.Time{}, fmt.Errorf("failed to get CPU utilization: %v", err)
|
||||
}
|
||||
|
||||
if desiredReplicas != currentReplicas {
|
||||
a.eventRecorder.Eventf(hpa, api.EventTypeNormal, "DesiredReplicasComputed",
|
||||
a.eventRecorder.Eventf(hpa, v1.EventTypeNormal, "DesiredReplicasComputed",
|
||||
"Computed the desired num of replicas: %d (avgCPUutil: %d, current replicas: %d)",
|
||||
desiredReplicas, utilization, scale.Status.Replicas)
|
||||
}
|
||||
@@ -201,11 +201,11 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *autoscaling.
|
||||
|
||||
var targetList extensions.CustomMetricTargetList
|
||||
if err := json.Unmarshal([]byte(cmAnnotation), &targetList); err != nil {
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedParseCustomMetricsAnnotation", err.Error())
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedParseCustomMetricsAnnotation", err.Error())
|
||||
return 0, "", "", time.Time{}, fmt.Errorf("failed to parse custom metrics annotation: %v", err)
|
||||
}
|
||||
if len(targetList.Items) == 0 {
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "NoCustomMetricsInAnnotation", err.Error())
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "NoCustomMetricsInAnnotation", err.Error())
|
||||
return 0, "", "", time.Time{}, fmt.Errorf("no custom metrics in annotation")
|
||||
}
|
||||
|
||||
@@ -216,14 +216,14 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *autoscaling.
|
||||
for _, customMetricTarget := range targetList.Items {
|
||||
if scale.Status.Selector == nil {
|
||||
errMsg := "selector is required"
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "SelectorRequired", errMsg)
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "SelectorRequired", errMsg)
|
||||
return 0, "", "", time.Time{}, fmt.Errorf("selector is required")
|
||||
}
|
||||
|
||||
selector, err := unversioned.LabelSelectorAsSelector(scale.Status.Selector)
|
||||
selector, err := unversioned.LabelSelectorAsSelector(&unversioned.LabelSelector{MatchLabels: scale.Status.Selector})
|
||||
if err != nil {
|
||||
errMsg := fmt.Sprintf("couldn't convert selector string to a corresponding selector object: %v", err)
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "InvalidSelector", errMsg)
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "InvalidSelector", errMsg)
|
||||
return 0, "", "", time.Time{}, fmt.Errorf("couldn't convert selector string to a corresponding selector object: %v", err)
|
||||
}
|
||||
floatTarget := float64(customMetricTarget.TargetValue.MilliValue()) / 1000.0
|
||||
@@ -231,9 +231,9 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *autoscaling.
|
||||
if err != nil {
|
||||
lastScaleTime := getLastScaleTime(hpa)
|
||||
if time.Now().After(lastScaleTime.Add(upscaleForbiddenWindow)) {
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedGetCustomMetrics", err.Error())
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetCustomMetrics", err.Error())
|
||||
} else {
|
||||
a.eventRecorder.Event(hpa, api.EventTypeNormal, "CustomMetricsNotAvailableYet", err.Error())
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeNormal, "CustomMetricsNotAvailableYet", err.Error())
|
||||
}
|
||||
|
||||
return 0, "", "", time.Time{}, fmt.Errorf("failed to get custom metric value: %v", err)
|
||||
@@ -246,7 +246,7 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *autoscaling.
|
||||
}
|
||||
quantity, err := resource.ParseQuantity(fmt.Sprintf("%.3f", utilizationProposal))
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedSetCustomMetrics", err.Error())
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedSetCustomMetrics", err.Error())
|
||||
return 0, "", "", time.Time{}, fmt.Errorf("failed to set custom metric value: %v", err)
|
||||
}
|
||||
statusList.Items = append(statusList.Items, extensions.CustomMetricCurrentStatus{
|
||||
@@ -256,14 +256,14 @@ func (a *HorizontalController) computeReplicasForCustomMetrics(hpa *autoscaling.
|
||||
}
|
||||
byteStatusList, err := json.Marshal(statusList)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedSerializeCustomMetrics", err.Error())
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedSerializeCustomMetrics", err.Error())
|
||||
return 0, "", "", time.Time{}, fmt.Errorf("failed to serialize custom metric status: %v", err)
|
||||
}
|
||||
|
||||
if replicas != currentReplicas {
|
||||
a.eventRecorder.Eventf(hpa, api.EventTypeNormal, "DesiredReplicasComputedCustomMetric",
|
||||
a.eventRecorder.Eventf(hpa, v1.EventTypeNormal, "DesiredReplicasComputedCustomMetric",
|
||||
"Computed the desired num of replicas: %d, metric: %s, current replicas: %d",
|
||||
int32(replicas), metric, scale.Status.Replicas)
|
||||
func() *int32 { i := int32(replicas); return &i }(), metric, scale.Status.Replicas)
|
||||
}
|
||||
|
||||
return replicas, metric, string(byteStatusList), timestamp, nil
|
||||
@@ -274,7 +274,7 @@ func (a *HorizontalController) reconcileAutoscaler(hpa *autoscaling.HorizontalPo
|
||||
|
||||
scale, err := a.scaleNamespacer.Scales(hpa.Namespace).Get(hpa.Spec.ScaleTargetRef.Kind, hpa.Spec.ScaleTargetRef.Name)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedGetScale", err.Error())
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedGetScale", err.Error())
|
||||
return fmt.Errorf("failed to query scale subresource for %s: %v", reference, err)
|
||||
}
|
||||
currentReplicas := scale.Status.Replicas
|
||||
@@ -367,10 +367,10 @@ func (a *HorizontalController) reconcileAutoscaler(hpa *autoscaling.HorizontalPo
|
||||
scale.Spec.Replicas = desiredReplicas
|
||||
_, err = a.scaleNamespacer.Scales(hpa.Namespace).Update(hpa.Spec.ScaleTargetRef.Kind, scale)
|
||||
if err != nil {
|
||||
a.eventRecorder.Eventf(hpa, api.EventTypeWarning, "FailedRescale", "New size: %d; reason: %s; error: %v", desiredReplicas, rescaleReason, err.Error())
|
||||
a.eventRecorder.Eventf(hpa, v1.EventTypeWarning, "FailedRescale", "New size: %d; reason: %s; error: %v", desiredReplicas, rescaleReason, err.Error())
|
||||
return fmt.Errorf("failed to rescale %s: %v", reference, err)
|
||||
}
|
||||
a.eventRecorder.Eventf(hpa, api.EventTypeNormal, "SuccessfulRescale", "New size: %d; reason: %s", desiredReplicas, rescaleReason)
|
||||
a.eventRecorder.Eventf(hpa, v1.EventTypeNormal, "SuccessfulRescale", "New size: %d; reason: %s", desiredReplicas, rescaleReason)
|
||||
glog.Infof("Successfull rescale of %s, old size: %d, new size: %d, reason: %s",
|
||||
hpa.Name, currentReplicas, desiredReplicas, rescaleReason)
|
||||
} else {
|
||||
@@ -428,7 +428,7 @@ func (a *HorizontalController) updateStatus(hpa *autoscaling.HorizontalPodAutosc
|
||||
|
||||
_, err := a.hpaNamespacer.HorizontalPodAutoscalers(hpa.Namespace).UpdateStatus(hpa)
|
||||
if err != nil {
|
||||
a.eventRecorder.Event(hpa, api.EventTypeWarning, "FailedUpdateStatus", err.Error())
|
||||
a.eventRecorder.Event(hpa, v1.EventTypeWarning, "FailedUpdateStatus", err.Error())
|
||||
return fmt.Errorf("failed to update status for %s: %v", hpa.Name, err)
|
||||
}
|
||||
glog.V(2).Infof("Successfully updated status for %s", hpa.Name)
|
||||
|
||||
@@ -27,15 +27,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
_ "k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/apis/autoscaling"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
autoscaling "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
@@ -84,7 +83,7 @@ type testCase struct {
|
||||
verifyCPUCurrent bool
|
||||
reportedLevels []uint64
|
||||
reportedCPURequests []resource.Quantity
|
||||
reportedPodReadiness []api.ConditionStatus
|
||||
reportedPodReadiness []v1.ConditionStatus
|
||||
cmTarget *extensions.CustomMetricTargetList
|
||||
scaleUpdated bool
|
||||
statusUpdated bool
|
||||
@@ -151,7 +150,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
obj := &autoscaling.HorizontalPodAutoscalerList{
|
||||
Items: []autoscaling.HorizontalPodAutoscaler{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: hpaName,
|
||||
Namespace: namespace,
|
||||
SelfLink: "experimental/v1/namespaces/" + namespace + "/horizontalpodautoscalers/" + hpaName,
|
||||
@@ -192,7 +191,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &extensions.Scale{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
@@ -201,7 +200,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
},
|
||||
Status: extensions.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
Selector: selector.MatchLabels,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
@@ -212,7 +211,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &extensions.Scale{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
@@ -221,7 +220,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
},
|
||||
Status: extensions.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
Selector: selector.MatchLabels,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
@@ -232,7 +231,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &extensions.Scale{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: tc.resource.name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
@@ -241,7 +240,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
},
|
||||
Status: extensions.ScaleStatus{
|
||||
Replicas: tc.initialReplicas,
|
||||
Selector: selector,
|
||||
Selector: selector.MatchLabels,
|
||||
},
|
||||
}
|
||||
return true, obj, nil
|
||||
@@ -251,36 +250,36 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := &api.PodList{}
|
||||
obj := &v1.PodList{}
|
||||
for i := 0; i < len(tc.reportedCPURequests); i++ {
|
||||
podReadiness := api.ConditionTrue
|
||||
podReadiness := v1.ConditionTrue
|
||||
if tc.reportedPodReadiness != nil {
|
||||
podReadiness = tc.reportedPodReadiness[i]
|
||||
}
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
pod := api.Pod{
|
||||
Status: api.PodStatus{
|
||||
Phase: api.PodRunning,
|
||||
Conditions: []api.PodCondition{
|
||||
pod := v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: api.PodReady,
|
||||
Type: v1.PodReady,
|
||||
Status: podReadiness,
|
||||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
"name": podNamePrefix,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceCPU: tc.reportedCPURequests[i],
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: tc.reportedCPURequests[i],
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -420,7 +419,7 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
tc.Lock()
|
||||
defer tc.Unlock()
|
||||
|
||||
obj := action.(core.CreateAction).GetObject().(*api.Event)
|
||||
obj := action.(core.CreateAction).GetObject().(*v1.Event)
|
||||
if tc.verifyEvents {
|
||||
switch obj.Reason {
|
||||
case "SuccessfulRescale":
|
||||
@@ -460,8 +459,8 @@ func (tc *testCase) runTest(t *testing.T) {
|
||||
metricsClient := metrics.NewHeapsterMetricsClient(testClient, metrics.DefaultHeapsterNamespace, metrics.DefaultHeapsterScheme, metrics.DefaultHeapsterService, metrics.DefaultHeapsterPort)
|
||||
|
||||
broadcaster := record.NewBroadcasterForTests(0)
|
||||
broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: testClient.Core().Events("")})
|
||||
recorder := broadcaster.NewRecorder(api.EventSource{Component: "horizontal-pod-autoscaler"})
|
||||
broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: testClient.Core().Events("")})
|
||||
recorder := broadcaster.NewRecorder(v1.EventSource{Component: "horizontal-pod-autoscaler"})
|
||||
|
||||
replicaCalc := &ReplicaCalculator{
|
||||
metricsClient: metricsClient,
|
||||
@@ -574,7 +573,7 @@ func TestScaleUpUnreadyLessScale(t *testing.T) {
|
||||
verifyCPUCurrent: true,
|
||||
reportedLevels: []uint64{300, 500, 700},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
reportedPodReadiness: []api.ConditionStatus{api.ConditionFalse, api.ConditionTrue, api.ConditionTrue},
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
|
||||
useMetricsApi: true,
|
||||
}
|
||||
tc.runTest(t)
|
||||
@@ -591,7 +590,7 @@ func TestScaleUpUnreadyNoScale(t *testing.T) {
|
||||
verifyCPUCurrent: true,
|
||||
reportedLevels: []uint64{400, 500, 700},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
reportedPodReadiness: []api.ConditionStatus{api.ConditionTrue, api.ConditionFalse, api.ConditionFalse},
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
useMetricsApi: true,
|
||||
}
|
||||
tc.runTest(t)
|
||||
@@ -670,7 +669,7 @@ func TestScaleUpCMUnreadyLessScale(t *testing.T) {
|
||||
}},
|
||||
},
|
||||
reportedLevels: []uint64{50, 10, 30},
|
||||
reportedPodReadiness: []api.ConditionStatus{api.ConditionTrue, api.ConditionTrue, api.ConditionFalse},
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@@ -690,7 +689,7 @@ func TestScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
||||
}},
|
||||
},
|
||||
reportedLevels: []uint64{50, 15, 30},
|
||||
reportedPodReadiness: []api.ConditionStatus{api.ConditionFalse, api.ConditionTrue, api.ConditionFalse},
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@@ -755,7 +754,7 @@ func TestScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
reportedLevels: []uint64{100, 300, 500, 250, 250},
|
||||
reportedCPURequests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
useMetricsApi: true,
|
||||
reportedPodReadiness: []api.ConditionStatus{api.ConditionTrue, api.ConditionTrue, api.ConditionTrue, api.ConditionFalse, api.ConditionFalse},
|
||||
reportedPodReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
}
|
||||
tc.runTest(t)
|
||||
}
|
||||
|
||||
@@ -23,10 +23,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
|
||||
heapster "k8s.io/heapster/metrics/api/v1/types"
|
||||
@@ -46,7 +45,7 @@ type PodMetricsInfo map[string]float64
|
||||
type MetricsClient interface {
|
||||
// GetResourceMetric gets the given resource metric (and an associated oldest timestamp)
|
||||
// for all pods matching the specified selector in the given namespace
|
||||
GetResourceMetric(resource api.ResourceName, namespace string, selector labels.Selector) (PodResourceInfo, time.Time, error)
|
||||
GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodResourceInfo, time.Time, error)
|
||||
|
||||
// GetRawMetric gets the given metric (and an associated oldest timestamp)
|
||||
// for all pods matching the specified selector in the given namespace
|
||||
@@ -63,8 +62,8 @@ const (
|
||||
var heapsterQueryStart = -5 * time.Minute
|
||||
|
||||
type HeapsterMetricsClient struct {
|
||||
services unversionedcore.ServiceInterface
|
||||
podsGetter unversionedcore.PodsGetter
|
||||
services v1core.ServiceInterface
|
||||
podsGetter v1core.PodsGetter
|
||||
heapsterScheme string
|
||||
heapsterService string
|
||||
heapsterPort string
|
||||
@@ -80,7 +79,7 @@ func NewHeapsterMetricsClient(client clientset.Interface, namespace, scheme, ser
|
||||
}
|
||||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) GetResourceMetric(resource api.ResourceName, namespace string, selector labels.Selector) (PodResourceInfo, time.Time, error) {
|
||||
func (h *HeapsterMetricsClient) GetResourceMetric(resource v1.ResourceName, namespace string, selector labels.Selector) (PodResourceInfo, time.Time, error) {
|
||||
metricPath := fmt.Sprintf("/apis/metrics/v1alpha1/namespaces/%s/pods", namespace)
|
||||
params := map[string]string{"labelSelector": selector.String()}
|
||||
|
||||
@@ -132,7 +131,7 @@ func (h *HeapsterMetricsClient) GetResourceMetric(resource api.ResourceName, nam
|
||||
}
|
||||
|
||||
func (h *HeapsterMetricsClient) GetRawMetric(metricName string, namespace string, selector labels.Selector) (PodMetricsInfo, time.Time, error) {
|
||||
podList, err := h.podsGetter.Pods(namespace).List(api.ListOptions{LabelSelector: selector})
|
||||
podList, err := h.podsGetter.Pods(namespace).List(v1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return nil, time.Time{}, fmt.Errorf("failed to get pod list while fetching metrics: %v", err)
|
||||
}
|
||||
|
||||
@@ -23,12 +23,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
_ "k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
@@ -76,7 +75,7 @@ type testCase struct {
|
||||
|
||||
namespace string
|
||||
selector labels.Selector
|
||||
resourceName api.ResourceName
|
||||
resourceName v1.ResourceName
|
||||
metricName string
|
||||
}
|
||||
|
||||
@@ -93,10 +92,10 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
fakeClient := &fake.Clientset{}
|
||||
|
||||
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &api.PodList{}
|
||||
obj := &v1.PodList{}
|
||||
for i := 0; i < tc.replicas; i++ {
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
pod := buildPod(namespace, podName, podLabels, api.PodRunning, "1024")
|
||||
pod := buildPod(namespace, podName, podLabels, v1.PodRunning, "1024")
|
||||
obj.Items = append(obj.Items, pod)
|
||||
}
|
||||
return true, obj, nil
|
||||
@@ -161,30 +160,30 @@ func (tc *testCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
return fakeClient
|
||||
}
|
||||
|
||||
func buildPod(namespace, podName string, podLabels map[string]string, phase api.PodPhase, request string) api.Pod {
|
||||
return api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func buildPod(namespace, podName string, podLabels map[string]string, phase v1.PodPhase, request string) v1.Pod {
|
||||
return v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: namespace,
|
||||
Labels: podLabels,
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse(request),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse(request),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: api.PodStatus{
|
||||
Status: v1.PodStatus{
|
||||
Phase: phase,
|
||||
Conditions: []api.PodCondition{
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: api.PodReady,
|
||||
Status: api.ConditionTrue,
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -231,7 +230,7 @@ func TestCPU(t *testing.T) {
|
||||
desiredResourceValues: PodResourceInfo{
|
||||
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
|
||||
},
|
||||
resourceName: api.ResourceCPU,
|
||||
resourceName: v1.ResourceCPU,
|
||||
targetTimestamp: 1,
|
||||
reportedPodMetrics: [][]int64{{5000}, {5000}, {5000}},
|
||||
}
|
||||
@@ -271,7 +270,7 @@ func TestCPUMoreMetrics(t *testing.T) {
|
||||
"test-pod-0": 5000, "test-pod-1": 5000, "test-pod-2": 5000,
|
||||
"test-pod-3": 5000, "test-pod-4": 5000,
|
||||
},
|
||||
resourceName: api.ResourceCPU,
|
||||
resourceName: v1.ResourceCPU,
|
||||
targetTimestamp: 10,
|
||||
reportedPodMetrics: [][]int64{{1000, 2000, 2000}, {5000}, {1000, 1000, 1000, 2000}, {4000, 1000}, {5000}},
|
||||
}
|
||||
@@ -284,7 +283,7 @@ func TestCPUMissingMetrics(t *testing.T) {
|
||||
desiredResourceValues: PodResourceInfo{
|
||||
"test-pod-0": 4000,
|
||||
},
|
||||
resourceName: api.ResourceCPU,
|
||||
resourceName: v1.ResourceCPU,
|
||||
reportedPodMetrics: [][]int64{{4000}},
|
||||
}
|
||||
tc.runTest(t)
|
||||
@@ -314,7 +313,7 @@ func TestQpsSuperfluousMetrics(t *testing.T) {
|
||||
func TestCPUEmptyMetrics(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
resourceName: api.ResourceCPU,
|
||||
resourceName: v1.ResourceCPU,
|
||||
desiredError: fmt.Errorf("no metrics returned from heapster"),
|
||||
reportedMetricsPoints: [][]metricPoint{},
|
||||
reportedPodMetrics: [][]int64{},
|
||||
@@ -338,7 +337,7 @@ func TestQpsEmptyEntries(t *testing.T) {
|
||||
func TestCPUZeroReplicas(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 0,
|
||||
resourceName: api.ResourceCPU,
|
||||
resourceName: v1.ResourceCPU,
|
||||
desiredError: fmt.Errorf("no metrics returned from heapster"),
|
||||
reportedPodMetrics: [][]int64{},
|
||||
}
|
||||
@@ -348,7 +347,7 @@ func TestCPUZeroReplicas(t *testing.T) {
|
||||
func TestCPUEmptyMetricsForOnePod(t *testing.T) {
|
||||
tc := testCase{
|
||||
replicas: 3,
|
||||
resourceName: api.ResourceCPU,
|
||||
resourceName: v1.ResourceCPU,
|
||||
desiredResourceValues: PodResourceInfo{
|
||||
"test-pod-0": 100, "test-pod-1": 700,
|
||||
},
|
||||
|
||||
@@ -21,8 +21,8 @@ import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
metricsclient "k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
@@ -30,10 +30,10 @@ import (
|
||||
|
||||
type ReplicaCalculator struct {
|
||||
metricsClient metricsclient.MetricsClient
|
||||
podsGetter unversionedcore.PodsGetter
|
||||
podsGetter v1core.PodsGetter
|
||||
}
|
||||
|
||||
func NewReplicaCalculator(metricsClient metricsclient.MetricsClient, podsGetter unversionedcore.PodsGetter) *ReplicaCalculator {
|
||||
func NewReplicaCalculator(metricsClient metricsclient.MetricsClient, podsGetter v1core.PodsGetter) *ReplicaCalculator {
|
||||
return &ReplicaCalculator{
|
||||
metricsClient: metricsClient,
|
||||
podsGetter: podsGetter,
|
||||
@@ -42,13 +42,13 @@ func NewReplicaCalculator(metricsClient metricsclient.MetricsClient, podsGetter
|
||||
|
||||
// GetResourceReplicas calculates the desired replica count based on a target resource utilization percentage
|
||||
// of the given resource for pods matching the given selector in the given namespace, and the current replica count
|
||||
func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUtilization int32, resource api.ResourceName, namespace string, selector labels.Selector) (replicaCount int32, utilization int32, timestamp time.Time, err error) {
|
||||
func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUtilization int32, resource v1.ResourceName, namespace string, selector labels.Selector) (replicaCount int32, utilization int32, timestamp time.Time, err error) {
|
||||
metrics, timestamp, err := c.metricsClient.GetResourceMetric(resource, namespace, selector)
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to get metrics for resource %s: %v", resource, err)
|
||||
}
|
||||
|
||||
podList, err := c.podsGetter.Pods(namespace).List(api.ListOptions{LabelSelector: selector})
|
||||
podList, err := c.podsGetter.Pods(namespace).List(v1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to get pods while calculating replica count: %v", err)
|
||||
}
|
||||
@@ -74,7 +74,7 @@ func (c *ReplicaCalculator) GetResourceReplicas(currentReplicas int32, targetUti
|
||||
|
||||
requests[pod.Name] = podSum
|
||||
|
||||
if pod.Status.Phase != api.PodRunning || !api.IsPodReady(&pod) {
|
||||
if pod.Status.Phase != v1.PodRunning || !v1.IsPodReady(&pod) {
|
||||
// save this pod name for later, but pretend it doesn't exist for now
|
||||
unreadyPods.Insert(pod.Name)
|
||||
delete(metrics, pod.Name)
|
||||
@@ -156,7 +156,7 @@ func (c *ReplicaCalculator) GetMetricReplicas(currentReplicas int32, targetUtili
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to get metric %s: %v", metricName, err)
|
||||
}
|
||||
|
||||
podList, err := c.podsGetter.Pods(namespace).List(api.ListOptions{LabelSelector: selector})
|
||||
podList, err := c.podsGetter.Pods(namespace).List(v1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return 0, 0, time.Time{}, fmt.Errorf("unable to get pods while calculating replica count: %v", err)
|
||||
}
|
||||
@@ -170,7 +170,7 @@ func (c *ReplicaCalculator) GetMetricReplicas(currentReplicas int32, targetUtili
|
||||
missingPods := sets.NewString()
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
if pod.Status.Phase != api.PodRunning || !api.IsPodReady(&pod) {
|
||||
if pod.Status.Phase != v1.PodRunning || !v1.IsPodReady(&pod) {
|
||||
// save this pod name for later, but pretend it doesn't exist for now
|
||||
unreadyPods.Insert(pod.Name)
|
||||
delete(metrics, pod.Name)
|
||||
|
||||
@@ -25,12 +25,11 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
_ "k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
|
||||
@@ -44,7 +43,7 @@ import (
|
||||
)
|
||||
|
||||
type resourceInfo struct {
|
||||
name api.ResourceName
|
||||
name v1.ResourceName
|
||||
requests []resource.Quantity
|
||||
levels []int64
|
||||
|
||||
@@ -70,7 +69,7 @@ type replicaCalcTestCase struct {
|
||||
resource *resourceInfo
|
||||
metric *metricInfo
|
||||
|
||||
podReadiness []api.ConditionStatus
|
||||
podReadiness []v1.ConditionStatus
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -82,43 +81,43 @@ func (tc *replicaCalcTestCase) prepareTestClient(t *testing.T) *fake.Clientset {
|
||||
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &api.PodList{}
|
||||
obj := &v1.PodList{}
|
||||
for i := 0; i < int(tc.currentReplicas); i++ {
|
||||
podReadiness := api.ConditionTrue
|
||||
podReadiness := v1.ConditionTrue
|
||||
if tc.podReadiness != nil {
|
||||
podReadiness = tc.podReadiness[i]
|
||||
}
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
pod := api.Pod{
|
||||
Status: api.PodStatus{
|
||||
Phase: api.PodRunning,
|
||||
Conditions: []api.PodCondition{
|
||||
pod := v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: api.PodReady,
|
||||
Type: v1.PodReady,
|
||||
Status: podReadiness,
|
||||
},
|
||||
},
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: testNamespace,
|
||||
Labels: map[string]string{
|
||||
"name": podNamePrefix,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{{}, {}},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{{}, {}},
|
||||
},
|
||||
}
|
||||
|
||||
if tc.resource != nil && i < len(tc.resource.requests) {
|
||||
pod.Spec.Containers[0].Resources = api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
pod.Spec.Containers[0].Resources = v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
tc.resource.name: tc.resource.requests[i],
|
||||
},
|
||||
}
|
||||
pod.Spec.Containers[1].Resources = api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
pod.Spec.Containers[1].Resources = v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
tc.resource.name: tc.resource.requests[i],
|
||||
},
|
||||
}
|
||||
@@ -255,7 +254,7 @@ func TestReplicaCalcScaleUp(t *testing.T) {
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 5,
|
||||
resource: &resourceInfo{
|
||||
name: api.ResourceCPU,
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{300, 500, 700},
|
||||
|
||||
@@ -270,9 +269,9 @@ func TestReplicaCalcScaleUpUnreadyLessScale(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 4,
|
||||
podReadiness: []api.ConditionStatus{api.ConditionFalse, api.ConditionTrue, api.ConditionTrue},
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
|
||||
resource: &resourceInfo{
|
||||
name: api.ResourceCPU,
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{300, 500, 700},
|
||||
|
||||
@@ -287,9 +286,9 @@ func TestReplicaCalcScaleUpUnreadyNoScale(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
podReadiness: []api.ConditionStatus{api.ConditionTrue, api.ConditionFalse, api.ConditionFalse},
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
resource: &resourceInfo{
|
||||
name: api.ResourceCPU,
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{400, 500, 700},
|
||||
|
||||
@@ -318,7 +317,7 @@ func TestReplicaCalcScaleUpCMUnreadyLessScale(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 4,
|
||||
podReadiness: []api.ConditionStatus{api.ConditionTrue, api.ConditionTrue, api.ConditionFalse},
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse},
|
||||
metric: &metricInfo{
|
||||
name: "qps",
|
||||
levels: []float64{50.0, 10.0, 30.0},
|
||||
@@ -333,7 +332,7 @@ func TestReplicaCalcScaleUpCMUnreadyNoScaleWouldScaleDown(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
podReadiness: []api.ConditionStatus{api.ConditionFalse, api.ConditionTrue, api.ConditionFalse},
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionFalse},
|
||||
metric: &metricInfo{
|
||||
name: "qps",
|
||||
levels: []float64{50.0, 15.0, 30.0},
|
||||
@@ -349,7 +348,7 @@ func TestReplicaCalcScaleDown(t *testing.T) {
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 3,
|
||||
resource: &resourceInfo{
|
||||
name: api.ResourceCPU,
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 300, 500, 250, 250},
|
||||
|
||||
@@ -378,9 +377,9 @@ func TestReplicaCalcScaleDownIgnoresUnreadyPods(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 5,
|
||||
expectedReplicas: 2,
|
||||
podReadiness: []api.ConditionStatus{api.ConditionTrue, api.ConditionTrue, api.ConditionTrue, api.ConditionFalse, api.ConditionFalse},
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionFalse, v1.ConditionFalse},
|
||||
resource: &resourceInfo{
|
||||
name: api.ResourceCPU,
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 300, 500, 250, 250},
|
||||
|
||||
@@ -396,7 +395,7 @@ func TestReplicaCalcTolerance(t *testing.T) {
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
resource: &resourceInfo{
|
||||
name: api.ResourceCPU,
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("0.9"), resource.MustParse("1.0"), resource.MustParse("1.1")},
|
||||
levels: []int64{1010, 1030, 1020},
|
||||
|
||||
@@ -426,7 +425,7 @@ func TestReplicaCalcSuperfluousMetrics(t *testing.T) {
|
||||
currentReplicas: 4,
|
||||
expectedReplicas: 24,
|
||||
resource: &resourceInfo{
|
||||
name: api.ResourceCPU,
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{4000, 9500, 3000, 7000, 3200, 2000},
|
||||
targetUtilization: 100,
|
||||
@@ -441,7 +440,7 @@ func TestReplicaCalcMissingMetrics(t *testing.T) {
|
||||
currentReplicas: 4,
|
||||
expectedReplicas: 3,
|
||||
resource: &resourceInfo{
|
||||
name: api.ResourceCPU,
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{400, 95},
|
||||
|
||||
@@ -457,7 +456,7 @@ func TestReplicaCalcEmptyMetrics(t *testing.T) {
|
||||
currentReplicas: 4,
|
||||
expectedError: fmt.Errorf("unable to get metrics for resource cpu: no metrics returned from heapster"),
|
||||
resource: &resourceInfo{
|
||||
name: api.ResourceCPU,
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{},
|
||||
|
||||
@@ -472,7 +471,7 @@ func TestReplicaCalcEmptyCPURequest(t *testing.T) {
|
||||
currentReplicas: 1,
|
||||
expectedError: fmt.Errorf("missing request for"),
|
||||
resource: &resourceInfo{
|
||||
name: api.ResourceCPU,
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{},
|
||||
levels: []int64{200},
|
||||
|
||||
@@ -487,7 +486,7 @@ func TestReplicaCalcMissingMetricsNoChangeEq(t *testing.T) {
|
||||
currentReplicas: 2,
|
||||
expectedReplicas: 2,
|
||||
resource: &resourceInfo{
|
||||
name: api.ResourceCPU,
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{1000},
|
||||
|
||||
@@ -503,7 +502,7 @@ func TestReplicaCalcMissingMetricsNoChangeGt(t *testing.T) {
|
||||
currentReplicas: 2,
|
||||
expectedReplicas: 2,
|
||||
resource: &resourceInfo{
|
||||
name: api.ResourceCPU,
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{1900},
|
||||
|
||||
@@ -519,7 +518,7 @@ func TestReplicaCalcMissingMetricsNoChangeLt(t *testing.T) {
|
||||
currentReplicas: 2,
|
||||
expectedReplicas: 2,
|
||||
resource: &resourceInfo{
|
||||
name: api.ResourceCPU,
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{600},
|
||||
|
||||
@@ -534,9 +533,9 @@ func TestReplicaCalcMissingMetricsUnreadyNoChange(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 3,
|
||||
podReadiness: []api.ConditionStatus{api.ConditionFalse, api.ConditionTrue, api.ConditionTrue},
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
|
||||
resource: &resourceInfo{
|
||||
name: api.ResourceCPU,
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 450},
|
||||
|
||||
@@ -551,9 +550,9 @@ func TestReplicaCalcMissingMetricsUnreadyScaleUp(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 3,
|
||||
expectedReplicas: 4,
|
||||
podReadiness: []api.ConditionStatus{api.ConditionFalse, api.ConditionTrue, api.ConditionTrue},
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue},
|
||||
resource: &resourceInfo{
|
||||
name: api.ResourceCPU,
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 2000},
|
||||
|
||||
@@ -568,9 +567,9 @@ func TestReplicaCalcMissingMetricsUnreadyScaleDown(t *testing.T) {
|
||||
tc := replicaCalcTestCase{
|
||||
currentReplicas: 4,
|
||||
expectedReplicas: 3,
|
||||
podReadiness: []api.ConditionStatus{api.ConditionFalse, api.ConditionTrue, api.ConditionTrue, api.ConditionTrue},
|
||||
podReadiness: []v1.ConditionStatus{v1.ConditionFalse, v1.ConditionTrue, v1.ConditionTrue, v1.ConditionTrue},
|
||||
resource: &resourceInfo{
|
||||
name: api.ResourceCPU,
|
||||
name: v1.ResourceCPU,
|
||||
requests: []resource.Quantity{resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0"), resource.MustParse("1.0")},
|
||||
levels: []int64{100, 100, 100},
|
||||
|
||||
@@ -609,7 +608,7 @@ func TestReplicaCalcComputedToleranceAlgImplementation(t *testing.T) {
|
||||
currentReplicas: startPods,
|
||||
expectedReplicas: finalPods,
|
||||
resource: &resourceInfo{
|
||||
name: api.ResourceCPU,
|
||||
name: v1.ResourceCPU,
|
||||
levels: []int64{
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
totalUsedCPUOfAllPods / 10,
|
||||
|
||||
@@ -21,9 +21,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
@@ -69,7 +69,7 @@ func NewPodGC(kubeClient clientset.Interface, podInformer cache.SharedIndexInfor
|
||||
terminatedPodThreshold: terminatedPodThreshold,
|
||||
deletePod: func(namespace, name string) error {
|
||||
glog.Infof("PodGC is force deleting Pod: %v:%v", namespace, name)
|
||||
return kubeClient.Core().Pods(namespace).Delete(name, api.NewDeleteOptions(0))
|
||||
return kubeClient.Core().Pods(namespace).Delete(name, v1.NewDeleteOptions(0))
|
||||
},
|
||||
}
|
||||
|
||||
@@ -78,14 +78,14 @@ func NewPodGC(kubeClient clientset.Interface, podInformer cache.SharedIndexInfor
|
||||
|
||||
gcc.nodeStore.Store, gcc.nodeController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return gcc.kubeClient.Core().Nodes().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return gcc.kubeClient.Core().Nodes().Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Node{},
|
||||
&v1.Node{},
|
||||
controller.NoResyncPeriodFunc(),
|
||||
cache.ResourceEventHandlerFuncs{},
|
||||
)
|
||||
@@ -129,15 +129,15 @@ func (gcc *PodGCController) gc() {
|
||||
gcc.gcUnscheduledTerminating(pods)
|
||||
}
|
||||
|
||||
func isPodTerminated(pod *api.Pod) bool {
|
||||
if phase := pod.Status.Phase; phase != api.PodPending && phase != api.PodRunning && phase != api.PodUnknown {
|
||||
func isPodTerminated(pod *v1.Pod) bool {
|
||||
if phase := pod.Status.Phase; phase != v1.PodPending && phase != v1.PodRunning && phase != v1.PodUnknown {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (gcc *PodGCController) gcTerminated(pods []*api.Pod) {
|
||||
terminatedPods := []*api.Pod{}
|
||||
func (gcc *PodGCController) gcTerminated(pods []*v1.Pod) {
|
||||
terminatedPods := []*v1.Pod{}
|
||||
for _, pod := range pods {
|
||||
if isPodTerminated(pod) {
|
||||
terminatedPods = append(terminatedPods, pod)
|
||||
@@ -171,7 +171,7 @@ func (gcc *PodGCController) gcTerminated(pods []*api.Pod) {
|
||||
}
|
||||
|
||||
// gcOrphaned deletes pods that are bound to nodes that don't exist.
|
||||
func (gcc *PodGCController) gcOrphaned(pods []*api.Pod) {
|
||||
func (gcc *PodGCController) gcOrphaned(pods []*v1.Pod) {
|
||||
glog.V(4).Infof("GC'ing orphaned")
|
||||
|
||||
for _, pod := range pods {
|
||||
@@ -191,7 +191,7 @@ func (gcc *PodGCController) gcOrphaned(pods []*api.Pod) {
|
||||
}
|
||||
|
||||
// gcUnscheduledTerminating deletes pods that are terminating and haven't been scheduled to a particular node.
|
||||
func (gcc *PodGCController) gcUnscheduledTerminating(pods []*api.Pod) {
|
||||
func (gcc *PodGCController) gcUnscheduledTerminating(pods []*v1.Pod) {
|
||||
glog.V(4).Infof("GC'ing unscheduled pods which are terminating.")
|
||||
|
||||
for _, pod := range pods {
|
||||
@@ -209,7 +209,7 @@ func (gcc *PodGCController) gcUnscheduledTerminating(pods []*api.Pod) {
|
||||
}
|
||||
|
||||
// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker.
|
||||
type byCreationTimestamp []*api.Pod
|
||||
type byCreationTimestamp []*v1.Pod
|
||||
|
||||
func (o byCreationTimestamp) Len() int { return len(o) }
|
||||
func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
|
||||
@@ -21,10 +21,10 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
@@ -40,7 +40,7 @@ func (*FakeController) HasSynced() bool {
|
||||
func TestGCTerminated(t *testing.T) {
|
||||
type nameToPhase struct {
|
||||
name string
|
||||
phase api.PodPhase
|
||||
phase v1.PodPhase
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
@@ -50,8 +50,8 @@ func TestGCTerminated(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
pods: []nameToPhase{
|
||||
{name: "a", phase: api.PodFailed},
|
||||
{name: "b", phase: api.PodSucceeded},
|
||||
{name: "a", phase: v1.PodFailed},
|
||||
{name: "b", phase: v1.PodSucceeded},
|
||||
},
|
||||
threshold: 0,
|
||||
// threshold = 0 disables terminated pod deletion
|
||||
@@ -59,34 +59,34 @@ func TestGCTerminated(t *testing.T) {
|
||||
},
|
||||
{
|
||||
pods: []nameToPhase{
|
||||
{name: "a", phase: api.PodFailed},
|
||||
{name: "b", phase: api.PodSucceeded},
|
||||
{name: "c", phase: api.PodFailed},
|
||||
{name: "a", phase: v1.PodFailed},
|
||||
{name: "b", phase: v1.PodSucceeded},
|
||||
{name: "c", phase: v1.PodFailed},
|
||||
},
|
||||
threshold: 1,
|
||||
deletedPodNames: sets.NewString("a", "b"),
|
||||
},
|
||||
{
|
||||
pods: []nameToPhase{
|
||||
{name: "a", phase: api.PodRunning},
|
||||
{name: "b", phase: api.PodSucceeded},
|
||||
{name: "c", phase: api.PodFailed},
|
||||
{name: "a", phase: v1.PodRunning},
|
||||
{name: "b", phase: v1.PodSucceeded},
|
||||
{name: "c", phase: v1.PodFailed},
|
||||
},
|
||||
threshold: 1,
|
||||
deletedPodNames: sets.NewString("b"),
|
||||
},
|
||||
{
|
||||
pods: []nameToPhase{
|
||||
{name: "a", phase: api.PodFailed},
|
||||
{name: "b", phase: api.PodSucceeded},
|
||||
{name: "a", phase: v1.PodFailed},
|
||||
{name: "b", phase: v1.PodSucceeded},
|
||||
},
|
||||
threshold: 1,
|
||||
deletedPodNames: sets.NewString("a"),
|
||||
},
|
||||
{
|
||||
pods: []nameToPhase{
|
||||
{name: "a", phase: api.PodFailed},
|
||||
{name: "b", phase: api.PodSucceeded},
|
||||
{name: "a", phase: v1.PodFailed},
|
||||
{name: "b", phase: v1.PodSucceeded},
|
||||
},
|
||||
threshold: 5,
|
||||
deletedPodNames: sets.NewString(),
|
||||
@@ -108,16 +108,16 @@ func TestGCTerminated(t *testing.T) {
|
||||
creationTime := time.Unix(0, 0)
|
||||
for _, pod := range test.pods {
|
||||
creationTime = creationTime.Add(1 * time.Hour)
|
||||
gcc.podStore.Indexer.Add(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: pod.name, CreationTimestamp: unversioned.Time{Time: creationTime}},
|
||||
Status: api.PodStatus{Phase: pod.phase},
|
||||
Spec: api.PodSpec{NodeName: "node"},
|
||||
gcc.podStore.Indexer.Add(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: pod.name, CreationTimestamp: unversioned.Time{Time: creationTime}},
|
||||
Status: v1.PodStatus{Phase: pod.phase},
|
||||
Spec: v1.PodSpec{NodeName: "node"},
|
||||
})
|
||||
}
|
||||
|
||||
store := cache.NewStore(cache.MetaNamespaceKeyFunc)
|
||||
store.Add(&api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: "node"},
|
||||
store.Add(&v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "node"},
|
||||
})
|
||||
gcc.nodeStore = cache.StoreToNodeLister{Store: store}
|
||||
gcc.podController = &FakeController{}
|
||||
@@ -143,7 +143,7 @@ func TestGCTerminated(t *testing.T) {
|
||||
func TestGCOrphaned(t *testing.T) {
|
||||
type nameToPhase struct {
|
||||
name string
|
||||
phase api.PodPhase
|
||||
phase v1.PodPhase
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
@@ -153,15 +153,15 @@ func TestGCOrphaned(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
pods: []nameToPhase{
|
||||
{name: "a", phase: api.PodFailed},
|
||||
{name: "b", phase: api.PodSucceeded},
|
||||
{name: "a", phase: v1.PodFailed},
|
||||
{name: "b", phase: v1.PodSucceeded},
|
||||
},
|
||||
threshold: 0,
|
||||
deletedPodNames: sets.NewString("a", "b"),
|
||||
},
|
||||
{
|
||||
pods: []nameToPhase{
|
||||
{name: "a", phase: api.PodRunning},
|
||||
{name: "a", phase: v1.PodRunning},
|
||||
},
|
||||
threshold: 1,
|
||||
deletedPodNames: sets.NewString("a"),
|
||||
@@ -183,10 +183,10 @@ func TestGCOrphaned(t *testing.T) {
|
||||
creationTime := time.Unix(0, 0)
|
||||
for _, pod := range test.pods {
|
||||
creationTime = creationTime.Add(1 * time.Hour)
|
||||
gcc.podStore.Indexer.Add(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: pod.name, CreationTimestamp: unversioned.Time{Time: creationTime}},
|
||||
Status: api.PodStatus{Phase: pod.phase},
|
||||
Spec: api.PodSpec{NodeName: "node"},
|
||||
gcc.podStore.Indexer.Add(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: pod.name, CreationTimestamp: unversioned.Time{Time: creationTime}},
|
||||
Status: v1.PodStatus{Phase: pod.phase},
|
||||
Spec: v1.PodSpec{NodeName: "node"},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -220,7 +220,7 @@ func TestGCOrphaned(t *testing.T) {
|
||||
func TestGCUnscheduledTerminating(t *testing.T) {
|
||||
type nameToPhase struct {
|
||||
name string
|
||||
phase api.PodPhase
|
||||
phase v1.PodPhase
|
||||
deletionTimeStamp *unversioned.Time
|
||||
nodeName string
|
||||
}
|
||||
@@ -233,18 +233,18 @@ func TestGCUnscheduledTerminating(t *testing.T) {
|
||||
{
|
||||
name: "Unscheduled pod in any phase must be deleted",
|
||||
pods: []nameToPhase{
|
||||
{name: "a", phase: api.PodFailed, deletionTimeStamp: &unversioned.Time{}, nodeName: ""},
|
||||
{name: "b", phase: api.PodSucceeded, deletionTimeStamp: &unversioned.Time{}, nodeName: ""},
|
||||
{name: "c", phase: api.PodRunning, deletionTimeStamp: &unversioned.Time{}, nodeName: ""},
|
||||
{name: "a", phase: v1.PodFailed, deletionTimeStamp: &unversioned.Time{}, nodeName: ""},
|
||||
{name: "b", phase: v1.PodSucceeded, deletionTimeStamp: &unversioned.Time{}, nodeName: ""},
|
||||
{name: "c", phase: v1.PodRunning, deletionTimeStamp: &unversioned.Time{}, nodeName: ""},
|
||||
},
|
||||
deletedPodNames: sets.NewString("a", "b", "c"),
|
||||
},
|
||||
{
|
||||
name: "Scheduled pod in any phase must not be deleted",
|
||||
pods: []nameToPhase{
|
||||
{name: "a", phase: api.PodFailed, deletionTimeStamp: nil, nodeName: ""},
|
||||
{name: "b", phase: api.PodSucceeded, deletionTimeStamp: nil, nodeName: "node"},
|
||||
{name: "c", phase: api.PodRunning, deletionTimeStamp: &unversioned.Time{}, nodeName: "node"},
|
||||
{name: "a", phase: v1.PodFailed, deletionTimeStamp: nil, nodeName: ""},
|
||||
{name: "b", phase: v1.PodSucceeded, deletionTimeStamp: nil, nodeName: "node"},
|
||||
{name: "c", phase: v1.PodRunning, deletionTimeStamp: &unversioned.Time{}, nodeName: "node"},
|
||||
},
|
||||
deletedPodNames: sets.NewString(),
|
||||
},
|
||||
@@ -265,11 +265,11 @@ func TestGCUnscheduledTerminating(t *testing.T) {
|
||||
creationTime := time.Unix(0, 0)
|
||||
for _, pod := range test.pods {
|
||||
creationTime = creationTime.Add(1 * time.Hour)
|
||||
gcc.podStore.Indexer.Add(&api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Name: pod.name, CreationTimestamp: unversioned.Time{Time: creationTime},
|
||||
gcc.podStore.Indexer.Add(&v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Name: pod.name, CreationTimestamp: unversioned.Time{Time: creationTime},
|
||||
DeletionTimestamp: pod.deletionTimeStamp},
|
||||
Status: api.PodStatus{Phase: pod.phase},
|
||||
Spec: api.PodSpec{NodeName: pod.nodeName},
|
||||
Status: v1.PodStatus{Phase: pod.phase},
|
||||
Spec: v1.PodSpec{NodeName: pod.nodeName},
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -26,14 +26,14 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
@@ -107,13 +107,13 @@ func NewReplicaSetController(rsInformer informers.ReplicaSetInformer, podInforme
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
|
||||
rsc := &ReplicaSetController{
|
||||
kubeClient: kubeClient,
|
||||
podControl: controller.RealPodControl{
|
||||
KubeClient: kubeClient,
|
||||
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "replicaset-controller"}),
|
||||
Recorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "replicaset-controller"}),
|
||||
},
|
||||
burstReplicas: burstReplicas,
|
||||
expectations: controller.NewUIDTrackingControllerExpectations(controller.NewControllerExpectations()),
|
||||
@@ -176,7 +176,7 @@ func (rsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) {
|
||||
// getPodReplicaSet returns the replica set managing the given pod.
|
||||
// TODO: Surface that we are ignoring multiple replica sets for a single pod.
|
||||
// TODO: use ownerReference.Controller to determine if the rs controls the pod.
|
||||
func (rsc *ReplicaSetController) getPodReplicaSet(pod *api.Pod) *extensions.ReplicaSet {
|
||||
func (rsc *ReplicaSetController) getPodReplicaSet(pod *v1.Pod) *extensions.ReplicaSet {
|
||||
// look up in the cache, if cached and the cache is valid, just return cached value
|
||||
if obj, cached := rsc.lookupCache.GetMatchingObject(pod); cached {
|
||||
rs, ok := obj.(*extensions.ReplicaSet)
|
||||
@@ -254,7 +254,7 @@ func (rsc *ReplicaSetController) updateRS(old, cur interface{}) {
|
||||
}
|
||||
|
||||
// isCacheValid check if the cache is valid
|
||||
func (rsc *ReplicaSetController) isCacheValid(pod *api.Pod, cachedRS *extensions.ReplicaSet) bool {
|
||||
func (rsc *ReplicaSetController) isCacheValid(pod *v1.Pod, cachedRS *extensions.ReplicaSet) bool {
|
||||
_, err := rsc.rsLister.ReplicaSets(cachedRS.Namespace).Get(cachedRS.Name)
|
||||
// rs has been deleted or updated, cache is invalid
|
||||
if err != nil || !isReplicaSetMatch(pod, cachedRS) {
|
||||
@@ -265,7 +265,7 @@ func (rsc *ReplicaSetController) isCacheValid(pod *api.Pod, cachedRS *extensions
|
||||
|
||||
// isReplicaSetMatch take a Pod and ReplicaSet, return whether the Pod and ReplicaSet are matching
|
||||
// TODO(mqliang): This logic is a copy from GetPodReplicaSets(), remove the duplication
|
||||
func isReplicaSetMatch(pod *api.Pod, rs *extensions.ReplicaSet) bool {
|
||||
func isReplicaSetMatch(pod *v1.Pod, rs *extensions.ReplicaSet) bool {
|
||||
if rs.Namespace != pod.Namespace {
|
||||
return false
|
||||
}
|
||||
@@ -284,7 +284,7 @@ func isReplicaSetMatch(pod *api.Pod, rs *extensions.ReplicaSet) bool {
|
||||
|
||||
// When a pod is created, enqueue the replica set that manages it and update it's expectations.
|
||||
func (rsc *ReplicaSetController) addPod(obj interface{}) {
|
||||
pod := obj.(*api.Pod)
|
||||
pod := obj.(*v1.Pod)
|
||||
glog.V(4).Infof("Pod %s created: %#v.", pod.Name, pod)
|
||||
|
||||
rs := rsc.getPodReplicaSet(pod)
|
||||
@@ -308,10 +308,10 @@ func (rsc *ReplicaSetController) addPod(obj interface{}) {
|
||||
|
||||
// When a pod is updated, figure out what replica set/s manage it and wake them
|
||||
// up. If the labels of the pod have changed we need to awaken both the old
|
||||
// and new replica set. old and cur must be *api.Pod types.
|
||||
// and new replica set. old and cur must be *v1.Pod types.
|
||||
func (rsc *ReplicaSetController) updatePod(old, cur interface{}) {
|
||||
curPod := cur.(*api.Pod)
|
||||
oldPod := old.(*api.Pod)
|
||||
curPod := cur.(*v1.Pod)
|
||||
oldPod := old.(*v1.Pod)
|
||||
if curPod.ResourceVersion == oldPod.ResourceVersion {
|
||||
// Periodic resync will send update events for all known pods.
|
||||
// Two different versions of the same pod will always have different RVs.
|
||||
@@ -348,9 +348,9 @@ func (rsc *ReplicaSetController) updatePod(old, cur interface{}) {
|
||||
}
|
||||
|
||||
// When a pod is deleted, enqueue the replica set that manages the pod and update its expectations.
|
||||
// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item.
|
||||
// obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
|
||||
func (rsc *ReplicaSetController) deletePod(obj interface{}) {
|
||||
pod, ok := obj.(*api.Pod)
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
|
||||
// When a delete is dropped, the relist will notice a pod in the store not
|
||||
// in the list, leading to the insertion of a tombstone object which contains
|
||||
@@ -362,7 +362,7 @@ func (rsc *ReplicaSetController) deletePod(obj interface{}) {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %+v", obj))
|
||||
return
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*api.Pod)
|
||||
pod, ok = tombstone.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a pod %#v", obj))
|
||||
return
|
||||
@@ -426,8 +426,8 @@ func (rsc *ReplicaSetController) processNextWorkItem() bool {
|
||||
// manageReplicas checks and updates replicas for the given ReplicaSet.
|
||||
// Does NOT modify <filteredPods>.
|
||||
// It will requeue the replica set in case of an error while creating/deleting pods.
|
||||
func (rsc *ReplicaSetController) manageReplicas(filteredPods []*api.Pod, rs *extensions.ReplicaSet) error {
|
||||
diff := len(filteredPods) - int(rs.Spec.Replicas)
|
||||
func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *extensions.ReplicaSet) error {
|
||||
diff := len(filteredPods) - int(*(rs.Spec.Replicas))
|
||||
rsKey, err := controller.KeyFunc(rs)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for ReplicaSet %#v: %v", rs, err))
|
||||
@@ -448,7 +448,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*api.Pod, rs *ext
|
||||
rsc.expectations.ExpectCreations(rsKey, diff)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(diff)
|
||||
glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rs.Namespace, rs.Name, rs.Spec.Replicas, diff)
|
||||
glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff)
|
||||
for i := 0; i < diff; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
@@ -456,7 +456,7 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*api.Pod, rs *ext
|
||||
|
||||
if rsc.garbageCollectorEnabled {
|
||||
var trueVar = true
|
||||
controllerRef := &api.OwnerReference{
|
||||
controllerRef := &v1.OwnerReference{
|
||||
APIVersion: getRSKind().GroupVersion().String(),
|
||||
Kind: getRSKind().Kind,
|
||||
Name: rs.Name,
|
||||
@@ -481,9 +481,9 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*api.Pod, rs *ext
|
||||
diff = rsc.burstReplicas
|
||||
}
|
||||
errCh = make(chan error, diff)
|
||||
glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rs.Namespace, rs.Name, rs.Spec.Replicas, diff)
|
||||
glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff)
|
||||
// No need to sort pods if we are about to delete all of them
|
||||
if rs.Spec.Replicas != 0 {
|
||||
if *(rs.Spec.Replicas) != 0 {
|
||||
// Sort the pods in the order such that not-ready < ready, unscheduled
|
||||
// < scheduled, and pending < running. This ensures that we delete pods
|
||||
// in the earlier stages whenever possible.
|
||||
@@ -567,7 +567,7 @@ func (rsc *ReplicaSetController) syncReplicaSet(key string) error {
|
||||
// NOTE: filteredPods are pointing to objects from cache - if you need to
|
||||
// modify them, you need to copy it first.
|
||||
// TODO: Do the List and Filter in a single pass, or use an index.
|
||||
var filteredPods []*api.Pod
|
||||
var filteredPods []*v1.Pod
|
||||
if rsc.garbageCollectorEnabled {
|
||||
// list all pods to include the pods that don't match the rs`s selector
|
||||
// anymore but has the stale controller ref.
|
||||
|
||||
@@ -28,15 +28,15 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
fakeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
fakeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@@ -51,7 +51,7 @@ import (
|
||||
)
|
||||
|
||||
func testNewReplicaSetControllerFromClient(client clientset.Interface, stopCh chan struct{}, burstReplicas int, lookupCacheSize int) *ReplicaSetController {
|
||||
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||
informers := informers.NewSharedInformerFactory(client, nil, controller.NoResyncPeriodFunc())
|
||||
ret := NewReplicaSetController(informers.ReplicaSets(), informers.Pods(), client, burstReplicas, lookupCacheSize, false)
|
||||
ret.podLister = &cache.StoreToPodLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
ret.rsLister = &cache.StoreToReplicaSetLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
@@ -98,34 +98,34 @@ func getKey(rs *extensions.ReplicaSet, t *testing.T) string {
|
||||
|
||||
func newReplicaSet(replicas int, selectorMap map[string]string) *extensions.ReplicaSet {
|
||||
rs := &extensions.ReplicaSet{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Replicas: int32(replicas),
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: selectorMap},
|
||||
Template: api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Template: v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
"type": "production",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "foo/bar",
|
||||
TerminationMessagePath: api.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: api.PullIfNotPresent,
|
||||
TerminationMessagePath: v1.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
|
||||
},
|
||||
},
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSDefault,
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
DNSPolicy: v1.DNSDefault,
|
||||
NodeSelector: map[string]string{
|
||||
"baz": "blah",
|
||||
},
|
||||
@@ -137,40 +137,40 @@ func newReplicaSet(replicas int, selectorMap map[string]string) *extensions.Repl
|
||||
}
|
||||
|
||||
// create a pod with the given phase for the given rs (same selectors and namespace)
|
||||
func newPod(name string, rs *extensions.ReplicaSet, status api.PodPhase, lastTransitionTime *unversioned.Time) *api.Pod {
|
||||
var conditions []api.PodCondition
|
||||
if status == api.PodRunning {
|
||||
condition := api.PodCondition{Type: api.PodReady, Status: api.ConditionTrue}
|
||||
func newPod(name string, rs *extensions.ReplicaSet, status v1.PodPhase, lastTransitionTime *unversioned.Time) *v1.Pod {
|
||||
var conditions []v1.PodCondition
|
||||
if status == v1.PodRunning {
|
||||
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
|
||||
if lastTransitionTime != nil {
|
||||
condition.LastTransitionTime = *lastTransitionTime
|
||||
}
|
||||
conditions = append(conditions, condition)
|
||||
}
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: rs.Namespace,
|
||||
Labels: rs.Spec.Selector.MatchLabels,
|
||||
},
|
||||
Status: api.PodStatus{Phase: status, Conditions: conditions},
|
||||
Status: v1.PodStatus{Phase: status, Conditions: conditions},
|
||||
}
|
||||
}
|
||||
|
||||
// create count pods with the given phase for the given ReplicaSet (same selectors and namespace), and add them to the store.
|
||||
func newPodList(store cache.Store, count int, status api.PodPhase, labelMap map[string]string, rs *extensions.ReplicaSet, name string) *api.PodList {
|
||||
pods := []api.Pod{}
|
||||
func newPodList(store cache.Store, count int, status v1.PodPhase, labelMap map[string]string, rs *extensions.ReplicaSet, name string) *v1.PodList {
|
||||
pods := []v1.Pod{}
|
||||
var trueVar = true
|
||||
controllerReference := api.OwnerReference{UID: rs.UID, APIVersion: "v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &trueVar}
|
||||
controllerReference := v1.OwnerReference{UID: rs.UID, APIVersion: "v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &trueVar}
|
||||
for i := 0; i < count; i++ {
|
||||
pod := newPod(fmt.Sprintf("%s%d", name, i), rs, status, nil)
|
||||
pod.ObjectMeta.Labels = labelMap
|
||||
pod.OwnerReferences = []api.OwnerReference{controllerReference}
|
||||
pod.OwnerReferences = []v1.OwnerReference{controllerReference}
|
||||
if store != nil {
|
||||
store.Add(pod)
|
||||
}
|
||||
pods = append(pods, *pod)
|
||||
}
|
||||
return &api.PodList{
|
||||
return &v1.PodList{
|
||||
Items: pods,
|
||||
}
|
||||
}
|
||||
@@ -197,7 +197,7 @@ type serverResponse struct {
|
||||
}
|
||||
|
||||
func TestSyncReplicaSetDoesNothing(t *testing.T) {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
@@ -208,7 +208,7 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) {
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
rsSpec := newReplicaSet(2, labelMap)
|
||||
manager.rsLister.Indexer.Add(rsSpec)
|
||||
newPodList(manager.podLister.Indexer, 2, api.PodRunning, labelMap, rsSpec, "pod")
|
||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rsSpec, "pod")
|
||||
|
||||
manager.podControl = &fakePodControl
|
||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
||||
@@ -216,7 +216,7 @@ func TestSyncReplicaSetDoesNothing(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncReplicaSetDeletes(t *testing.T) {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
@@ -228,14 +228,14 @@ func TestSyncReplicaSetDeletes(t *testing.T) {
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
rsSpec := newReplicaSet(1, labelMap)
|
||||
manager.rsLister.Indexer.Add(rsSpec)
|
||||
newPodList(manager.podLister.Indexer, 2, api.PodRunning, labelMap, rsSpec, "pod")
|
||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rsSpec, "pod")
|
||||
|
||||
manager.syncReplicaSet(getKey(rsSpec, t))
|
||||
validateSyncReplicaSet(t, &fakePodControl, 0, 1, 0)
|
||||
}
|
||||
|
||||
func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
@@ -254,7 +254,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
rsSpec := newReplicaSet(1, labelMap)
|
||||
manager.rsLister.Indexer.Add(rsSpec)
|
||||
pods := newPodList(nil, 1, api.PodRunning, labelMap, rsSpec, "pod")
|
||||
pods := newPodList(nil, 1, v1.PodRunning, labelMap, rsSpec, "pod")
|
||||
manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})
|
||||
|
||||
go manager.worker()
|
||||
@@ -271,7 +271,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncReplicaSetCreates(t *testing.T) {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||
@@ -297,7 +297,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
||||
}
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||
@@ -309,7 +309,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
||||
rs := newReplicaSet(activePods, labelMap)
|
||||
manager.rsLister.Indexer.Add(rs)
|
||||
rs.Status = extensions.ReplicaSetStatus{Replicas: int32(activePods), ReadyReplicas: int32(activePods), AvailableReplicas: int32(activePods)}
|
||||
newPodList(manager.podLister.Indexer, activePods, api.PodRunning, labelMap, rs, "pod")
|
||||
newPodList(manager.podLister.Indexer, activePods, v1.PodRunning, labelMap, rs, "pod")
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@@ -343,7 +343,7 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||
@@ -358,8 +358,8 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
||||
manager.rsLister.Indexer.Add(rs)
|
||||
rs.Status = extensions.ReplicaSetStatus{Replicas: 2, FullyLabeledReplicas: 6, ReadyReplicas: 2, AvailableReplicas: 2, ObservedGeneration: 0}
|
||||
rs.Generation = 1
|
||||
newPodList(manager.podLister.Indexer, 2, api.PodRunning, labelMap, rs, "pod")
|
||||
newPodList(manager.podLister.Indexer, 2, api.PodRunning, extraLabelMap, rs, "podWithExtraLabel")
|
||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rs, "pod")
|
||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, extraLabelMap, rs, "podWithExtraLabel")
|
||||
|
||||
// This response body is just so we don't err out decoding the http response
|
||||
response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{})
|
||||
@@ -391,7 +391,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
||||
}
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
stopCh := make(chan struct{})
|
||||
@@ -404,7 +404,7 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
rsSpec := newReplicaSet(2, labelMap)
|
||||
manager.rsLister.Indexer.Add(rsSpec)
|
||||
newPodList(manager.podLister.Indexer, 1, api.PodRunning, labelMap, rsSpec, "pod")
|
||||
newPodList(manager.podLister.Indexer, 1, v1.PodRunning, labelMap, rsSpec, "pod")
|
||||
|
||||
// Creates a replica and sets expectations
|
||||
rsSpec.Status.Replicas = 1
|
||||
@@ -453,32 +453,32 @@ func TestSyncReplicaSetDormancy(t *testing.T) {
|
||||
func TestPodControllerLookup(t *testing.T) {
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
manager := testNewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}), stopCh, BurstReplicas, 0)
|
||||
manager := testNewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}}), stopCh, BurstReplicas, 0)
|
||||
manager.podListerSynced = alwaysReady
|
||||
testCases := []struct {
|
||||
inRSs []*extensions.ReplicaSet
|
||||
pod *api.Pod
|
||||
pod *v1.Pod
|
||||
outRSName string
|
||||
}{
|
||||
// pods without labels don't match any ReplicaSets
|
||||
{
|
||||
inRSs: []*extensions.ReplicaSet{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "basic"}}},
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo1", Namespace: api.NamespaceAll}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "basic"}}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo1", Namespace: v1.NamespaceAll}},
|
||||
outRSName: "",
|
||||
},
|
||||
// Matching labels, not namespace
|
||||
{
|
||||
inRSs: []*extensions.ReplicaSet{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo"},
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo"},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo2", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
|
||||
outRSName: "",
|
||||
},
|
||||
@@ -486,14 +486,14 @@ func TestPodControllerLookup(t *testing.T) {
|
||||
{
|
||||
inRSs: []*extensions.ReplicaSet{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"},
|
||||
ObjectMeta: v1.ObjectMeta{Name: "bar", Namespace: "ns"},
|
||||
Spec: extensions.ReplicaSetSpec{
|
||||
Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo3", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
|
||||
outRSName: "bar",
|
||||
},
|
||||
@@ -523,7 +523,7 @@ func TestWatchControllers(t *testing.T) {
|
||||
client.AddWatchReactor("replicasets", core.DefaultWatchReactor(fakeWatch, nil))
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
informers := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||
informers := informers.NewSharedInformerFactory(client, nil, controller.NoResyncPeriodFunc())
|
||||
manager := NewReplicaSetController(informers.ReplicaSets(), informers.Pods(), client, BurstReplicas, 0, false)
|
||||
informers.Start(stopCh)
|
||||
manager.podListerSynced = alwaysReady
|
||||
@@ -540,7 +540,7 @@ func TestWatchControllers(t *testing.T) {
|
||||
t.Errorf("Expected to find replica set under key %v", key)
|
||||
}
|
||||
rsSpec := *obj.(*extensions.ReplicaSet)
|
||||
if !api.Semantic.DeepDerivative(rsSpec, testRSSpec) {
|
||||
if !v1.Semantic.DeepDerivative(rsSpec, testRSSpec) {
|
||||
t.Errorf("Expected %#v, but got %#v", testRSSpec, rsSpec)
|
||||
}
|
||||
close(received)
|
||||
@@ -582,7 +582,7 @@ func TestWatchPods(t *testing.T) {
|
||||
t.Errorf("Expected to find replica set under key %v", key)
|
||||
}
|
||||
rsSpec := obj.(*extensions.ReplicaSet)
|
||||
if !api.Semantic.DeepDerivative(rsSpec, testRSSpec) {
|
||||
if !v1.Semantic.DeepDerivative(rsSpec, testRSSpec) {
|
||||
t.Errorf("\nExpected %#v,\nbut got %#v", testRSSpec, rsSpec)
|
||||
}
|
||||
close(received)
|
||||
@@ -592,9 +592,9 @@ func TestWatchPods(t *testing.T) {
|
||||
// and make sure it hits the sync method for the right ReplicaSet.
|
||||
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
||||
|
||||
pods := newPodList(nil, 1, api.PodRunning, labelMap, testRSSpec, "pod")
|
||||
pods := newPodList(nil, 1, v1.PodRunning, labelMap, testRSSpec, "pod")
|
||||
testPod := pods.Items[0]
|
||||
testPod.Status.Phase = api.PodFailed
|
||||
testPod.Status.Phase = v1.PodFailed
|
||||
fakeWatch.Add(&testPod)
|
||||
|
||||
select {
|
||||
@@ -636,7 +636,7 @@ func TestUpdatePods(t *testing.T) {
|
||||
// case 1: We put in the podLister a pod with labels matching testRSSpec1,
|
||||
// then update its labels to match testRSSpec2. We expect to receive a sync
|
||||
// request for both replica sets.
|
||||
pod1 := newPodList(manager.podLister.Indexer, 1, api.PodRunning, labelMap1, testRSSpec1, "pod").Items[0]
|
||||
pod1 := newPodList(manager.podLister.Indexer, 1, v1.PodRunning, labelMap1, testRSSpec1, "pod").Items[0]
|
||||
pod1.ResourceVersion = "1"
|
||||
pod2 := pod1
|
||||
pod2.Labels = labelMap2
|
||||
@@ -687,7 +687,7 @@ func TestControllerUpdateRequeue(t *testing.T) {
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||
manager.podListerSynced = alwaysReady
|
||||
|
||||
@@ -695,7 +695,7 @@ func TestControllerUpdateRequeue(t *testing.T) {
|
||||
rs := newReplicaSet(1, labelMap)
|
||||
manager.rsLister.Indexer.Add(rs)
|
||||
rs.Status = extensions.ReplicaSetStatus{Replicas: 2}
|
||||
newPodList(manager.podLister.Indexer, 1, api.PodRunning, labelMap, rs, "pod")
|
||||
newPodList(manager.podLister.Indexer, 1, v1.PodRunning, labelMap, rs, "pod")
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@@ -756,7 +756,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
||||
|
||||
// TODO: This test is too hairy for a unittest. It should be moved to an E2E suite.
|
||||
func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
@@ -769,7 +769,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
manager.rsLister.Indexer.Add(rsSpec)
|
||||
|
||||
expectedPods := int32(0)
|
||||
pods := newPodList(nil, numReplicas, api.PodPending, labelMap, rsSpec, "pod")
|
||||
pods := newPodList(nil, numReplicas, v1.PodPending, labelMap, rsSpec, "pod")
|
||||
|
||||
rsKey, err := controller.KeyFunc(rsSpec)
|
||||
if err != nil {
|
||||
@@ -779,7 +779,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
// Size up the controller, then size it down, and confirm the expected create/delete pattern
|
||||
for _, replicas := range []int32{int32(numReplicas), 0} {
|
||||
|
||||
rsSpec.Spec.Replicas = replicas
|
||||
*(rsSpec.Spec.Replicas) = replicas
|
||||
manager.rsLister.Indexer.Add(rsSpec)
|
||||
|
||||
for i := 0; i < numReplicas; i += burstReplicas {
|
||||
@@ -823,11 +823,11 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
// To accurately simulate a watch we must delete the exact pods
|
||||
// the rs is waiting for.
|
||||
expectedDels := manager.expectations.GetUIDs(getKey(rsSpec, t))
|
||||
podsToDelete := []*api.Pod{}
|
||||
podsToDelete := []*v1.Pod{}
|
||||
for _, key := range expectedDels.List() {
|
||||
nsName := strings.Split(key, "/")
|
||||
podsToDelete = append(podsToDelete, &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
podsToDelete = append(podsToDelete, &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: nsName[1],
|
||||
Namespace: nsName[0],
|
||||
Labels: rsSpec.Spec.Selector.MatchLabels,
|
||||
@@ -867,8 +867,8 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
t.Fatalf("Waiting on unexpected number of deletes.")
|
||||
}
|
||||
nsName := strings.Split(expectedDel.List()[0], "/")
|
||||
lastPod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
lastPod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: nsName[1],
|
||||
Namespace: nsName[0],
|
||||
Labels: rsSpec.Spec.Selector.MatchLabels,
|
||||
@@ -882,11 +882,11 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
|
||||
// Confirm that we've created the right number of replicas
|
||||
activePods := int32(len(manager.podLister.Indexer.List()))
|
||||
if activePods != rsSpec.Spec.Replicas {
|
||||
t.Fatalf("Unexpected number of active pods, expected %d, got %d", rsSpec.Spec.Replicas, activePods)
|
||||
if activePods != *(rsSpec.Spec.Replicas) {
|
||||
t.Fatalf("Unexpected number of active pods, expected %d, got %d", *(rsSpec.Spec.Replicas), activePods)
|
||||
}
|
||||
// Replenish the pod list, since we cut it down sizing up
|
||||
pods = newPodList(nil, int(replicas), api.PodRunning, labelMap, rsSpec, "pod")
|
||||
pods = newPodList(nil, int(replicas), v1.PodRunning, labelMap, rsSpec, "pod")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -910,7 +910,7 @@ func (fe FakeRSExpectations) SatisfiedExpectations(controllerKey string) bool {
|
||||
// TestRSSyncExpectations tests that a pod cannot sneak in between counting active pods
|
||||
// and checking expectations.
|
||||
func TestRSSyncExpectations(t *testing.T) {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
@@ -921,7 +921,7 @@ func TestRSSyncExpectations(t *testing.T) {
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
rsSpec := newReplicaSet(2, labelMap)
|
||||
manager.rsLister.Indexer.Add(rsSpec)
|
||||
pods := newPodList(nil, 2, api.PodPending, labelMap, rsSpec, "pod")
|
||||
pods := newPodList(nil, 2, v1.PodPending, labelMap, rsSpec, "pod")
|
||||
manager.podLister.Indexer.Add(&pods.Items[0])
|
||||
postExpectationsPod := pods.Items[1]
|
||||
|
||||
@@ -938,7 +938,7 @@ func TestRSSyncExpectations(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, 10, 0)
|
||||
@@ -993,7 +993,7 @@ func shuffle(controllers []*extensions.ReplicaSet) []*extensions.ReplicaSet {
|
||||
}
|
||||
|
||||
func TestOverlappingRSs(t *testing.T) {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
@@ -1016,7 +1016,7 @@ func TestOverlappingRSs(t *testing.T) {
|
||||
manager.rsLister.Indexer.Add(shuffledControllers[j])
|
||||
}
|
||||
// Add a pod and make sure only the oldest ReplicaSet is synced
|
||||
pods := newPodList(nil, 1, api.PodPending, labelMap, controllers[0], "pod")
|
||||
pods := newPodList(nil, 1, v1.PodPending, labelMap, controllers[0], "pod")
|
||||
rsKey := getKey(controllers[0], t)
|
||||
|
||||
manager.addPod(&pods.Items[0])
|
||||
@@ -1029,7 +1029,7 @@ func TestOverlappingRSs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeletionTimestamp(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
labelMap := map[string]string{"foo": "bar"}
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
@@ -1042,7 +1042,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't get key for object %#v: %v", rs, err)
|
||||
}
|
||||
pod := newPodList(nil, 1, api.PodPending, labelMap, rs, "pod").Items[0]
|
||||
pod := newPodList(nil, 1, v1.PodPending, labelMap, rs, "pod").Items[0]
|
||||
pod.DeletionTimestamp = &unversioned.Time{Time: time.Now()}
|
||||
pod.ResourceVersion = "1"
|
||||
manager.expectations.ExpectDeletions(rsKey, []string{controller.PodKey(&pod)})
|
||||
@@ -1063,7 +1063,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
||||
|
||||
// An update from no deletion timestamp to having one should be treated
|
||||
// as a deletion.
|
||||
oldPod := newPodList(nil, 1, api.PodPending, labelMap, rs, "pod").Items[0]
|
||||
oldPod := newPodList(nil, 1, v1.PodPending, labelMap, rs, "pod").Items[0]
|
||||
oldPod.ResourceVersion = "2"
|
||||
manager.expectations.ExpectDeletions(rsKey, []string{controller.PodKey(&pod)})
|
||||
manager.updatePod(&oldPod, &pod)
|
||||
@@ -1081,8 +1081,8 @@ func TestDeletionTimestamp(t *testing.T) {
|
||||
|
||||
// An update to the pod (including an update to the deletion timestamp)
|
||||
// should not be counted as a second delete.
|
||||
secondPod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
secondPod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: pod.Namespace,
|
||||
Name: "secondPod",
|
||||
Labels: pod.Labels,
|
||||
@@ -1142,10 +1142,10 @@ func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
|
||||
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
|
||||
manager.rsLister.Indexer.Add(rs)
|
||||
var trueVar = true
|
||||
otherControllerReference := api.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1beta1", Kind: "ReplicaSet", Name: "AnotherRS", Controller: &trueVar}
|
||||
otherControllerReference := v1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1beta1", Kind: "ReplicaSet", Name: "AnotherRS", Controller: &trueVar}
|
||||
// add to podLister a matching Pod controlled by another controller. Expect no patch.
|
||||
pod := newPod("pod", rs, api.PodRunning, nil)
|
||||
pod.OwnerReferences = []api.OwnerReference{otherControllerReference}
|
||||
pod := newPod("pod", rs, v1.PodRunning, nil)
|
||||
pod.OwnerReferences = []v1.OwnerReference{otherControllerReference}
|
||||
manager.podLister.Indexer.Add(pod)
|
||||
err := manager.syncReplicaSet(getKey(rs, t))
|
||||
if err != nil {
|
||||
@@ -1165,9 +1165,9 @@ func TestPatchPodWithOtherOwnerRef(t *testing.T) {
|
||||
// add to podLister one more matching pod that doesn't have a controller
|
||||
// ref, but has an owner ref pointing to other object. Expect a patch to
|
||||
// take control of it.
|
||||
unrelatedOwnerReference := api.OwnerReference{UID: uuid.NewUUID(), APIVersion: "batch/v1", Kind: "Job", Name: "Job"}
|
||||
pod := newPod("pod", rs, api.PodRunning, nil)
|
||||
pod.OwnerReferences = []api.OwnerReference{unrelatedOwnerReference}
|
||||
unrelatedOwnerReference := v1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "batch/v1", Kind: "Job", Name: "Job"}
|
||||
pod := newPod("pod", rs, v1.PodRunning, nil)
|
||||
pod.OwnerReferences = []v1.OwnerReference{unrelatedOwnerReference}
|
||||
manager.podLister.Indexer.Add(pod)
|
||||
|
||||
err := manager.syncReplicaSet(getKey(rs, t))
|
||||
@@ -1187,9 +1187,9 @@ func TestPatchPodWithCorrectOwnerRef(t *testing.T) {
|
||||
manager.rsLister.Indexer.Add(rs)
|
||||
// add to podLister a matching pod that has an ownerRef pointing to the rs,
|
||||
// but ownerRef.Controller is false. Expect a patch to take control it.
|
||||
rsOwnerReference := api.OwnerReference{UID: rs.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: rs.Name}
|
||||
pod := newPod("pod", rs, api.PodRunning, nil)
|
||||
pod.OwnerReferences = []api.OwnerReference{rsOwnerReference}
|
||||
rsOwnerReference := v1.OwnerReference{UID: rs.UID, APIVersion: "v1", Kind: "ReplicaSet", Name: rs.Name}
|
||||
pod := newPod("pod", rs, v1.PodRunning, nil)
|
||||
pod.OwnerReferences = []v1.OwnerReference{rsOwnerReference}
|
||||
manager.podLister.Indexer.Add(pod)
|
||||
|
||||
err := manager.syncReplicaSet(getKey(rs, t))
|
||||
@@ -1209,8 +1209,8 @@ func TestPatchPodFails(t *testing.T) {
|
||||
manager.rsLister.Indexer.Add(rs)
|
||||
// add to podLister two matching pods. Expect two patches to take control
|
||||
// them.
|
||||
manager.podLister.Indexer.Add(newPod("pod1", rs, api.PodRunning, nil))
|
||||
manager.podLister.Indexer.Add(newPod("pod2", rs, api.PodRunning, nil))
|
||||
manager.podLister.Indexer.Add(newPod("pod1", rs, v1.PodRunning, nil))
|
||||
manager.podLister.Indexer.Add(newPod("pod2", rs, v1.PodRunning, nil))
|
||||
// let both patches fail. The rs controller will assume it fails to take
|
||||
// control of the pods and create new ones.
|
||||
fakePodControl.Err = fmt.Errorf("Fake Error")
|
||||
@@ -1231,9 +1231,9 @@ func TestPatchExtraPodsThenDelete(t *testing.T) {
|
||||
manager.rsLister.Indexer.Add(rs)
|
||||
// add to podLister three matching pods. Expect three patches to take control
|
||||
// them, and later delete one of them.
|
||||
manager.podLister.Indexer.Add(newPod("pod1", rs, api.PodRunning, nil))
|
||||
manager.podLister.Indexer.Add(newPod("pod2", rs, api.PodRunning, nil))
|
||||
manager.podLister.Indexer.Add(newPod("pod3", rs, api.PodRunning, nil))
|
||||
manager.podLister.Indexer.Add(newPod("pod1", rs, v1.PodRunning, nil))
|
||||
manager.podLister.Indexer.Add(newPod("pod2", rs, v1.PodRunning, nil))
|
||||
manager.podLister.Indexer.Add(newPod("pod3", rs, v1.PodRunning, nil))
|
||||
err := manager.syncReplicaSet(getKey(rs, t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -1250,11 +1250,11 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
|
||||
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
|
||||
manager.rsLister.Indexer.Add(rs)
|
||||
// put one pod in the podLister
|
||||
pod := newPod("pod", rs, api.PodRunning, nil)
|
||||
pod := newPod("pod", rs, v1.PodRunning, nil)
|
||||
pod.ResourceVersion = "1"
|
||||
var trueVar = true
|
||||
rsOwnerReference := api.OwnerReference{UID: rs.UID, APIVersion: "v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &trueVar}
|
||||
pod.OwnerReferences = []api.OwnerReference{rsOwnerReference}
|
||||
rsOwnerReference := v1.OwnerReference{UID: rs.UID, APIVersion: "v1beta1", Kind: "ReplicaSet", Name: rs.Name, Controller: &trueVar}
|
||||
pod.OwnerReferences = []v1.OwnerReference{rsOwnerReference}
|
||||
updatedPod := *pod
|
||||
// reset the labels
|
||||
updatedPod.Labels = make(map[string]string)
|
||||
@@ -1277,7 +1277,7 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// expect 1 patch to be sent to remove the controllerRef for the pod.
|
||||
// expect 2 creates because the rs.Spec.Replicas=2 and there exists no
|
||||
// expect 2 creates because the *(rs.Spec.Replicas)=2 and there exists no
|
||||
// matching pod.
|
||||
validateSyncReplicaSet(t, fakePodControl, 2, 0, 1)
|
||||
fakePodControl.Clear()
|
||||
@@ -1290,7 +1290,7 @@ func TestUpdateSelectorControllerRef(t *testing.T) {
|
||||
defer close(stopCh)
|
||||
manager, fakePodControl := setupManagerWithGCEnabled(stopCh, rs)
|
||||
// put 2 pods in the podLister
|
||||
newPodList(manager.podLister.Indexer, 2, api.PodRunning, labelMap, rs, "pod")
|
||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rs, "pod")
|
||||
// update the RS so that its selector no longer matches the pods
|
||||
updatedRS := *rs
|
||||
updatedRS.Spec.Selector.MatchLabels = map[string]string{"foo": "baz"}
|
||||
@@ -1311,7 +1311,7 @@ func TestUpdateSelectorControllerRef(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// expect 2 patches to be sent to remove the controllerRef for the pods.
|
||||
// expect 2 creates because the rc.Spec.Replicas=2 and there exists no
|
||||
// expect 2 creates because the *(rc.Spec.Replicas)=2 and there exists no
|
||||
// matching pod.
|
||||
validateSyncReplicaSet(t, fakePodControl, 2, 0, 2)
|
||||
fakePodControl.Clear()
|
||||
@@ -1328,7 +1328,7 @@ func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
|
||||
now := unversioned.Now()
|
||||
rs.DeletionTimestamp = &now
|
||||
manager.rsLister.Indexer.Add(rs)
|
||||
pod1 := newPod("pod1", rs, api.PodRunning, nil)
|
||||
pod1 := newPod("pod1", rs, v1.PodRunning, nil)
|
||||
manager.podLister.Indexer.Add(pod1)
|
||||
|
||||
// no patch, no create
|
||||
@@ -1349,7 +1349,7 @@ func TestReadyReplicas(t *testing.T) {
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||
@@ -1362,8 +1362,8 @@ func TestReadyReplicas(t *testing.T) {
|
||||
rs.Generation = 1
|
||||
manager.rsLister.Indexer.Add(rs)
|
||||
|
||||
newPodList(manager.podLister.Indexer, 2, api.PodPending, labelMap, rs, "pod")
|
||||
newPodList(manager.podLister.Indexer, 2, api.PodRunning, labelMap, rs, "pod")
|
||||
newPodList(manager.podLister.Indexer, 2, v1.PodPending, labelMap, rs, "pod")
|
||||
newPodList(manager.podLister.Indexer, 2, v1.PodRunning, labelMap, rs, "pod")
|
||||
|
||||
// This response body is just so we don't err out decoding the http response
|
||||
response := runtime.EncodeOrDie(testapi.Extensions.Codec(), &extensions.ReplicaSet{})
|
||||
@@ -1392,7 +1392,7 @@ func TestAvailableReplicas(t *testing.T) {
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
manager := testNewReplicaSetControllerFromClient(client, stopCh, BurstReplicas, 0)
|
||||
@@ -1409,12 +1409,12 @@ func TestAvailableReplicas(t *testing.T) {
|
||||
|
||||
// First pod becomes ready 20s ago
|
||||
moment := unversioned.Time{Time: time.Now().Add(-2e10)}
|
||||
pod := newPod("pod", rs, api.PodRunning, &moment)
|
||||
pod := newPod("pod", rs, v1.PodRunning, &moment)
|
||||
manager.podLister.Indexer.Add(pod)
|
||||
|
||||
// Second pod becomes ready now
|
||||
otherMoment := unversioned.Now()
|
||||
otherPod := newPod("otherPod", rs, api.PodRunning, &otherMoment)
|
||||
otherPod := newPod("otherPod", rs, v1.PodRunning, &otherMoment)
|
||||
manager.podLister.Indexer.Add(otherPod)
|
||||
|
||||
// This response body is just so we don't err out decoding the http response
|
||||
@@ -1440,7 +1440,7 @@ var (
|
||||
condImagePullBackOff = func() extensions.ReplicaSetCondition {
|
||||
return extensions.ReplicaSetCondition{
|
||||
Type: imagePullBackOff,
|
||||
Status: api.ConditionTrue,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "NonExistentImage",
|
||||
}
|
||||
}
|
||||
@@ -1448,7 +1448,7 @@ var (
|
||||
condReplicaFailure = func() extensions.ReplicaSetCondition {
|
||||
return extensions.ReplicaSetCondition{
|
||||
Type: extensions.ReplicaSetReplicaFailure,
|
||||
Status: api.ConditionTrue,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "OtherFailure",
|
||||
}
|
||||
}
|
||||
@@ -1456,7 +1456,7 @@ var (
|
||||
condReplicaFailure2 = func() extensions.ReplicaSetCondition {
|
||||
return extensions.ReplicaSetCondition{
|
||||
Type: extensions.ReplicaSetReplicaFailure,
|
||||
Status: api.ConditionTrue,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "AnotherFailure",
|
||||
}
|
||||
}
|
||||
@@ -1476,7 +1476,7 @@ func TestGetCondition(t *testing.T) {
|
||||
|
||||
status extensions.ReplicaSetStatus
|
||||
condType extensions.ReplicaSetConditionType
|
||||
condStatus api.ConditionStatus
|
||||
condStatus v1.ConditionStatus
|
||||
condReason string
|
||||
|
||||
expected bool
|
||||
|
||||
@@ -26,8 +26,9 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/extensions/internalversion"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
)
|
||||
|
||||
@@ -62,7 +63,7 @@ func updateReplicaSetStatus(c unversionedextensions.ReplicaSetInterface, rs exte
|
||||
var getErr error
|
||||
for i, rs := 0, &rs; ; i++ {
|
||||
glog.V(4).Infof(fmt.Sprintf("Updating replica count for ReplicaSet: %s/%s, ", rs.Namespace, rs.Name) +
|
||||
fmt.Sprintf("replicas %d->%d (need %d), ", rs.Status.Replicas, newStatus.Replicas, rs.Spec.Replicas) +
|
||||
fmt.Sprintf("replicas %d->%d (need %d), ", rs.Status.Replicas, newStatus.Replicas, *(rs.Spec.Replicas)) +
|
||||
fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rs.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) +
|
||||
fmt.Sprintf("readyReplicas %d->%d, ", rs.Status.ReadyReplicas, newStatus.ReadyReplicas) +
|
||||
fmt.Sprintf("availableReplicas %d->%d, ", rs.Status.AvailableReplicas, newStatus.AvailableReplicas) +
|
||||
@@ -95,7 +96,7 @@ func (o overlappingReplicaSets) Less(i, j int) bool {
|
||||
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
|
||||
}
|
||||
|
||||
func calculateStatus(rs extensions.ReplicaSet, filteredPods []*api.Pod, manageReplicasErr error) extensions.ReplicaSetStatus {
|
||||
func calculateStatus(rs extensions.ReplicaSet, filteredPods []*v1.Pod, manageReplicasErr error) extensions.ReplicaSetStatus {
|
||||
newStatus := rs.Status
|
||||
// Count the number of pods that have labels matching the labels of the pod
|
||||
// template of the replica set, the matching pods may have more
|
||||
@@ -110,9 +111,9 @@ func calculateStatus(rs extensions.ReplicaSet, filteredPods []*api.Pod, manageRe
|
||||
if templateLabel.Matches(labels.Set(pod.Labels)) {
|
||||
fullyLabeledReplicasCount++
|
||||
}
|
||||
if api.IsPodReady(pod) {
|
||||
if v1.IsPodReady(pod) {
|
||||
readyReplicasCount++
|
||||
if api.IsPodAvailable(pod, rs.Spec.MinReadySeconds, unversioned.Now()) {
|
||||
if v1.IsPodAvailable(pod, rs.Spec.MinReadySeconds, unversioned.Now()) {
|
||||
availableReplicasCount++
|
||||
}
|
||||
}
|
||||
@@ -121,12 +122,12 @@ func calculateStatus(rs extensions.ReplicaSet, filteredPods []*api.Pod, manageRe
|
||||
failureCond := GetCondition(rs.Status, extensions.ReplicaSetReplicaFailure)
|
||||
if manageReplicasErr != nil && failureCond == nil {
|
||||
var reason string
|
||||
if diff := len(filteredPods) - int(rs.Spec.Replicas); diff < 0 {
|
||||
if diff := len(filteredPods) - int(*(rs.Spec.Replicas)); diff < 0 {
|
||||
reason = "FailedCreate"
|
||||
} else if diff > 0 {
|
||||
reason = "FailedDelete"
|
||||
}
|
||||
cond := NewReplicaSetCondition(extensions.ReplicaSetReplicaFailure, api.ConditionTrue, reason, manageReplicasErr.Error())
|
||||
cond := NewReplicaSetCondition(extensions.ReplicaSetReplicaFailure, v1.ConditionTrue, reason, manageReplicasErr.Error())
|
||||
SetCondition(&newStatus, cond)
|
||||
} else if manageReplicasErr == nil && failureCond != nil {
|
||||
RemoveCondition(&newStatus, extensions.ReplicaSetReplicaFailure)
|
||||
@@ -140,7 +141,7 @@ func calculateStatus(rs extensions.ReplicaSet, filteredPods []*api.Pod, manageRe
|
||||
}
|
||||
|
||||
// NewReplicaSetCondition creates a new replica set condition.
|
||||
func NewReplicaSetCondition(condType extensions.ReplicaSetConditionType, status api.ConditionStatus, reason, msg string) extensions.ReplicaSetCondition {
|
||||
func NewReplicaSetCondition(condType extensions.ReplicaSetConditionType, status v1.ConditionStatus, reason, msg string) extensions.ReplicaSetCondition {
|
||||
return extensions.ReplicaSetCondition{
|
||||
Type: condType,
|
||||
Status: status,
|
||||
|
||||
@@ -25,13 +25,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
@@ -122,9 +121,9 @@ type ReplicationManager struct {
|
||||
func NewReplicationManager(podInformer cache.SharedIndexInformer, kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int, lookupCacheSize int, garbageCollectorEnabled bool) *ReplicationManager {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
return newReplicationManager(
|
||||
eventBroadcaster.NewRecorder(api.EventSource{Component: "replication-controller"}),
|
||||
eventBroadcaster.NewRecorder(v1.EventSource{Component: "replication-controller"}),
|
||||
podInformer, kubeClient, resyncPeriod, burstReplicas, lookupCacheSize, garbageCollectorEnabled)
|
||||
}
|
||||
|
||||
@@ -148,14 +147,14 @@ func newReplicationManager(eventRecorder record.EventRecorder, podInformer cache
|
||||
|
||||
rm.rcStore.Indexer, rm.rcController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return rm.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return rm.kubeClient.Core().ReplicationControllers(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return rm.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ReplicationController{},
|
||||
&v1.ReplicationController{},
|
||||
// TODO: Can we have much longer period here?
|
||||
FullControllerResyncPeriod,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
@@ -235,10 +234,10 @@ func (rm *ReplicationManager) Run(workers int, stopCh <-chan struct{}) {
|
||||
// getPodController returns the controller managing the given pod.
|
||||
// TODO: Surface that we are ignoring multiple controllers for a single pod.
|
||||
// TODO: use ownerReference.Controller to determine if the rc controls the pod.
|
||||
func (rm *ReplicationManager) getPodController(pod *api.Pod) *api.ReplicationController {
|
||||
func (rm *ReplicationManager) getPodController(pod *v1.Pod) *v1.ReplicationController {
|
||||
// look up in the cache, if cached and the cache is valid, just return cached value
|
||||
if obj, cached := rm.lookupCache.GetMatchingObject(pod); cached {
|
||||
controller, ok := obj.(*api.ReplicationController)
|
||||
controller, ok := obj.(*v1.ReplicationController)
|
||||
if !ok {
|
||||
// This should not happen
|
||||
glog.Errorf("lookup cache does not return a ReplicationController object")
|
||||
@@ -275,7 +274,7 @@ func (rm *ReplicationManager) getPodController(pod *api.Pod) *api.ReplicationCon
|
||||
}
|
||||
|
||||
// isCacheValid check if the cache is valid
|
||||
func (rm *ReplicationManager) isCacheValid(pod *api.Pod, cachedRC *api.ReplicationController) bool {
|
||||
func (rm *ReplicationManager) isCacheValid(pod *v1.Pod, cachedRC *v1.ReplicationController) bool {
|
||||
_, err := rm.rcStore.ReplicationControllers(cachedRC.Namespace).Get(cachedRC.Name)
|
||||
// rc has been deleted or updated, cache is invalid
|
||||
if err != nil || !isControllerMatch(pod, cachedRC) {
|
||||
@@ -286,7 +285,7 @@ func (rm *ReplicationManager) isCacheValid(pod *api.Pod, cachedRC *api.Replicati
|
||||
|
||||
// isControllerMatch take a Pod and ReplicationController, return whether the Pod and ReplicationController are matching
|
||||
// TODO(mqliang): This logic is a copy from GetPodControllers(), remove the duplication
|
||||
func isControllerMatch(pod *api.Pod, rc *api.ReplicationController) bool {
|
||||
func isControllerMatch(pod *v1.Pod, rc *v1.ReplicationController) bool {
|
||||
if rc.Namespace != pod.Namespace {
|
||||
return false
|
||||
}
|
||||
@@ -301,8 +300,8 @@ func isControllerMatch(pod *api.Pod, rc *api.ReplicationController) bool {
|
||||
|
||||
// callback when RC is updated
|
||||
func (rm *ReplicationManager) updateRC(old, cur interface{}) {
|
||||
oldRC := old.(*api.ReplicationController)
|
||||
curRC := cur.(*api.ReplicationController)
|
||||
oldRC := old.(*v1.ReplicationController)
|
||||
curRC := cur.(*v1.ReplicationController)
|
||||
|
||||
// We should invalidate the whole lookup cache if a RC's selector has been updated.
|
||||
//
|
||||
@@ -319,7 +318,7 @@ func (rm *ReplicationManager) updateRC(old, cur interface{}) {
|
||||
rm.lookupCache.InvalidateAll()
|
||||
}
|
||||
// TODO: Remove when #31981 is resolved!
|
||||
glog.Infof("Observed updated replication controller %v. Desired pod count change: %d->%d", curRC.Name, oldRC.Spec.Replicas, curRC.Spec.Replicas)
|
||||
glog.Infof("Observed updated replication controller %v. Desired pod count change: %d->%d", curRC.Name, *(oldRC.Spec.Replicas), *(curRC.Spec.Replicas))
|
||||
|
||||
// You might imagine that we only really need to enqueue the
|
||||
// controller when Spec changes, but it is safer to sync any
|
||||
@@ -342,7 +341,7 @@ func (rm *ReplicationManager) updateRC(old, cur interface{}) {
|
||||
|
||||
// When a pod is created, enqueue the controller that manages it and update it's expectations.
|
||||
func (rm *ReplicationManager) addPod(obj interface{}) {
|
||||
pod := obj.(*api.Pod)
|
||||
pod := obj.(*v1.Pod)
|
||||
|
||||
rc := rm.getPodController(pod)
|
||||
if rc == nil {
|
||||
@@ -366,10 +365,10 @@ func (rm *ReplicationManager) addPod(obj interface{}) {
|
||||
|
||||
// When a pod is updated, figure out what controller/s manage it and wake them
|
||||
// up. If the labels of the pod have changed we need to awaken both the old
|
||||
// and new controller. old and cur must be *api.Pod types.
|
||||
// and new controller. old and cur must be *v1.Pod types.
|
||||
func (rm *ReplicationManager) updatePod(old, cur interface{}) {
|
||||
curPod := cur.(*api.Pod)
|
||||
oldPod := old.(*api.Pod)
|
||||
curPod := cur.(*v1.Pod)
|
||||
oldPod := old.(*v1.Pod)
|
||||
if curPod.ResourceVersion == oldPod.ResourceVersion {
|
||||
// Periodic resync will send update events for all known pods.
|
||||
// Two different versions of the same pod will always have different RVs.
|
||||
@@ -407,9 +406,9 @@ func (rm *ReplicationManager) updatePod(old, cur interface{}) {
|
||||
}
|
||||
|
||||
// When a pod is deleted, enqueue the controller that manages the pod and update its expectations.
|
||||
// obj could be an *api.Pod, or a DeletionFinalStateUnknown marker item.
|
||||
// obj could be an *v1.Pod, or a DeletionFinalStateUnknown marker item.
|
||||
func (rm *ReplicationManager) deletePod(obj interface{}) {
|
||||
pod, ok := obj.(*api.Pod)
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
|
||||
// When a delete is dropped, the relist will notice a pod in the store not
|
||||
// in the list, leading to the insertion of a tombstone object which contains
|
||||
@@ -421,7 +420,7 @@ func (rm *ReplicationManager) deletePod(obj interface{}) {
|
||||
glog.Errorf("Couldn't get object from tombstone %#v", obj)
|
||||
return
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*api.Pod)
|
||||
pod, ok = tombstone.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("Tombstone contained object that is not a pod %#v", obj)
|
||||
return
|
||||
@@ -439,7 +438,7 @@ func (rm *ReplicationManager) deletePod(obj interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// obj could be an *api.ReplicationController, or a DeletionFinalStateUnknown marker item.
|
||||
// obj could be an *v1.ReplicationController, or a DeletionFinalStateUnknown marker item.
|
||||
func (rm *ReplicationManager) enqueueController(obj interface{}) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
@@ -486,8 +485,8 @@ func (rm *ReplicationManager) worker() {
|
||||
|
||||
// manageReplicas checks and updates replicas for the given replication controller.
|
||||
// Does NOT modify <filteredPods>.
|
||||
func (rm *ReplicationManager) manageReplicas(filteredPods []*api.Pod, rc *api.ReplicationController) error {
|
||||
diff := len(filteredPods) - int(rc.Spec.Replicas)
|
||||
func (rm *ReplicationManager) manageReplicas(filteredPods []*v1.Pod, rc *v1.ReplicationController) error {
|
||||
diff := len(filteredPods) - int(*(rc.Spec.Replicas))
|
||||
rcKey, err := controller.KeyFunc(rc)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -510,14 +509,14 @@ func (rm *ReplicationManager) manageReplicas(filteredPods []*api.Pod, rc *api.Re
|
||||
rm.expectations.ExpectCreations(rcKey, diff)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(diff)
|
||||
glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rc.Namespace, rc.Name, rc.Spec.Replicas, diff)
|
||||
glog.V(2).Infof("Too few %q/%q replicas, need %d, creating %d", rc.Namespace, rc.Name, *(rc.Spec.Replicas), diff)
|
||||
for i := 0; i < diff; i++ {
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
var err error
|
||||
if rm.garbageCollectorEnabled {
|
||||
var trueVar = true
|
||||
controllerRef := &api.OwnerReference{
|
||||
controllerRef := &v1.OwnerReference{
|
||||
APIVersion: getRCKind().GroupVersion().String(),
|
||||
Kind: getRCKind().Kind,
|
||||
Name: rc.Name,
|
||||
@@ -554,9 +553,9 @@ func (rm *ReplicationManager) manageReplicas(filteredPods []*api.Pod, rc *api.Re
|
||||
if diff > rm.burstReplicas {
|
||||
diff = rm.burstReplicas
|
||||
}
|
||||
glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rc.Namespace, rc.Name, rc.Spec.Replicas, diff)
|
||||
glog.V(2).Infof("Too many %q/%q replicas, need %d, deleting %d", rc.Namespace, rc.Name, *(rc.Spec.Replicas), diff)
|
||||
// No need to sort pods if we are about to delete all of them
|
||||
if rc.Spec.Replicas != 0 {
|
||||
if *(rc.Spec.Replicas) != 0 {
|
||||
// Sort the pods in the order such that not-ready < ready, unscheduled
|
||||
// < scheduled, and pending < running. This ensures that we delete pods
|
||||
// in the earlier stages whenever possible.
|
||||
@@ -636,7 +635,7 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rc := *obj.(*api.ReplicationController)
|
||||
rc := *obj.(*v1.ReplicationController)
|
||||
|
||||
// Check the expectations of the rc before counting active pods, otherwise a new pod can sneak in
|
||||
// and update the expectations after we've retrieved active pods from the store. If a new pod enters
|
||||
@@ -653,7 +652,7 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
|
||||
// NOTE: filteredPods are pointing to objects from cache - if you need to
|
||||
// modify them, you need to copy it first.
|
||||
// TODO: Do the List and Filter in a single pass, or use an index.
|
||||
var filteredPods []*api.Pod
|
||||
var filteredPods []*v1.Pod
|
||||
if rm.garbageCollectorEnabled {
|
||||
// list all pods to include the pods that don't match the rc's selector
|
||||
// anymore but has the stale controller ref.
|
||||
|
||||
@@ -27,14 +27,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
fakeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
fakeclientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/restclient"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@@ -49,7 +49,7 @@ import (
|
||||
|
||||
var alwaysReady = func() bool { return true }
|
||||
|
||||
func getKey(rc *api.ReplicationController, t *testing.T) string {
|
||||
func getKey(rc *v1.ReplicationController, t *testing.T) string {
|
||||
if key, err := controller.KeyFunc(rc); err != nil {
|
||||
t.Errorf("Unexpected error getting key for rc %v: %v", rc.Name, err)
|
||||
return ""
|
||||
@@ -58,36 +58,36 @@ func getKey(rc *api.ReplicationController, t *testing.T) string {
|
||||
}
|
||||
}
|
||||
|
||||
func newReplicationController(replicas int) *api.ReplicationController {
|
||||
rc := &api.ReplicationController{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(api.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func newReplicationController(replicas int) *v1.ReplicationController {
|
||||
rc := &v1.ReplicationController{
|
||||
TypeMeta: unversioned.TypeMeta{APIVersion: registered.GroupOrDie(v1.GroupName).GroupVersion.String()},
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: uuid.NewUUID(),
|
||||
Name: "foobar",
|
||||
Namespace: api.NamespaceDefault,
|
||||
Namespace: v1.NamespaceDefault,
|
||||
ResourceVersion: "18",
|
||||
},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
Replicas: int32(replicas),
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Replicas: func() *int32 { i := int32(replicas); return &i }(),
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
Template: &api.PodTemplateSpec{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
Template: &v1.PodTemplateSpec{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"name": "foo",
|
||||
"type": "production",
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "foo/bar",
|
||||
TerminationMessagePath: api.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: api.PullIfNotPresent,
|
||||
TerminationMessagePath: v1.TerminationMessagePathDefault,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
SecurityContext: securitycontext.ValidSecurityContextWithContainerDefaults(),
|
||||
},
|
||||
},
|
||||
RestartPolicy: api.RestartPolicyAlways,
|
||||
DNSPolicy: api.DNSDefault,
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
DNSPolicy: v1.DNSDefault,
|
||||
NodeSelector: map[string]string{
|
||||
"baz": "blah",
|
||||
},
|
||||
@@ -99,39 +99,39 @@ func newReplicationController(replicas int) *api.ReplicationController {
|
||||
}
|
||||
|
||||
// create a pod with the given phase for the given rc (same selectors and namespace).
|
||||
func newPod(name string, rc *api.ReplicationController, status api.PodPhase, lastTransitionTime *unversioned.Time) *api.Pod {
|
||||
var conditions []api.PodCondition
|
||||
if status == api.PodRunning {
|
||||
condition := api.PodCondition{Type: api.PodReady, Status: api.ConditionTrue}
|
||||
func newPod(name string, rc *v1.ReplicationController, status v1.PodPhase, lastTransitionTime *unversioned.Time) *v1.Pod {
|
||||
var conditions []v1.PodCondition
|
||||
if status == v1.PodRunning {
|
||||
condition := v1.PodCondition{Type: v1.PodReady, Status: v1.ConditionTrue}
|
||||
if lastTransitionTime != nil {
|
||||
condition.LastTransitionTime = *lastTransitionTime
|
||||
}
|
||||
conditions = append(conditions, condition)
|
||||
}
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: rc.Spec.Selector,
|
||||
Namespace: rc.Namespace,
|
||||
},
|
||||
Status: api.PodStatus{Phase: status, Conditions: conditions},
|
||||
Status: v1.PodStatus{Phase: status, Conditions: conditions},
|
||||
}
|
||||
}
|
||||
|
||||
// create count pods with the given phase for the given rc (same selectors and namespace), and add them to the store.
|
||||
func newPodList(store cache.Store, count int, status api.PodPhase, rc *api.ReplicationController, name string) *api.PodList {
|
||||
pods := []api.Pod{}
|
||||
func newPodList(store cache.Store, count int, status v1.PodPhase, rc *v1.ReplicationController, name string) *v1.PodList {
|
||||
pods := []v1.Pod{}
|
||||
var trueVar = true
|
||||
controllerReference := api.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name, Controller: &trueVar}
|
||||
controllerReference := v1.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name, Controller: &trueVar}
|
||||
for i := 0; i < count; i++ {
|
||||
pod := newPod(fmt.Sprintf("%s%d", name, i), rc, status, nil)
|
||||
pod.OwnerReferences = []api.OwnerReference{controllerReference}
|
||||
pod.OwnerReferences = []v1.OwnerReference{controllerReference}
|
||||
if store != nil {
|
||||
store.Add(pod)
|
||||
}
|
||||
pods = append(pods, *pod)
|
||||
}
|
||||
return &api.PodList{
|
||||
return &v1.PodList{
|
||||
Items: pods,
|
||||
}
|
||||
}
|
||||
@@ -158,7 +158,7 @@ type serverResponse struct {
|
||||
}
|
||||
|
||||
func TestSyncReplicationControllerDoesNothing(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -166,7 +166,7 @@ func TestSyncReplicationControllerDoesNothing(t *testing.T) {
|
||||
// 2 running pods, a controller with 2 replicas, sync is a no-op
|
||||
controllerSpec := newReplicationController(2)
|
||||
manager.rcStore.Indexer.Add(controllerSpec)
|
||||
newPodList(manager.podStore.Indexer, 2, api.PodRunning, controllerSpec, "pod")
|
||||
newPodList(manager.podStore.Indexer, 2, v1.PodRunning, controllerSpec, "pod")
|
||||
|
||||
manager.podControl = &fakePodControl
|
||||
manager.syncReplicationController(getKey(controllerSpec, t))
|
||||
@@ -174,7 +174,7 @@ func TestSyncReplicationControllerDoesNothing(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncReplicationControllerDeletes(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -183,14 +183,14 @@ func TestSyncReplicationControllerDeletes(t *testing.T) {
|
||||
// 2 running pods and a controller with 1 replica, one pod delete expected
|
||||
controllerSpec := newReplicationController(1)
|
||||
manager.rcStore.Indexer.Add(controllerSpec)
|
||||
newPodList(manager.podStore.Indexer, 2, api.PodRunning, controllerSpec, "pod")
|
||||
newPodList(manager.podStore.Indexer, 2, v1.PodRunning, controllerSpec, "pod")
|
||||
|
||||
manager.syncReplicationController(getKey(controllerSpec, t))
|
||||
validateSyncReplication(t, &fakePodControl, 0, 1, 0)
|
||||
}
|
||||
|
||||
func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -206,7 +206,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
// the controller matching the selectors of the deleted pod into the work queue.
|
||||
controllerSpec := newReplicationController(1)
|
||||
manager.rcStore.Indexer.Add(controllerSpec)
|
||||
pods := newPodList(nil, 1, api.PodRunning, controllerSpec, "pod")
|
||||
pods := newPodList(nil, 1, v1.PodRunning, controllerSpec, "pod")
|
||||
manager.deletePod(cache.DeletedFinalStateUnknown{Key: "foo", Obj: &pods.Items[0]})
|
||||
|
||||
go manager.worker()
|
||||
@@ -223,7 +223,7 @@ func TestDeleteFinalStateUnknown(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncReplicationControllerCreates(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
@@ -245,7 +245,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
||||
}
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
@@ -253,8 +253,8 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
||||
activePods := 5
|
||||
rc := newReplicationController(activePods)
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
rc.Status = api.ReplicationControllerStatus{Replicas: int32(activePods), ReadyReplicas: int32(activePods), AvailableReplicas: int32(activePods)}
|
||||
newPodList(manager.podStore.Indexer, activePods, api.PodRunning, rc, "pod")
|
||||
rc.Status = v1.ReplicationControllerStatus{Replicas: int32(activePods), ReadyReplicas: int32(activePods), AvailableReplicas: int32(activePods)}
|
||||
newPodList(manager.podStore.Indexer, activePods, v1.PodRunning, rc, "pod")
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@@ -267,7 +267,7 @@ func TestStatusUpdatesWithoutReplicasChange(t *testing.T) {
|
||||
|
||||
// This response body is just so we don't err out decoding the http response, all
|
||||
// we care about is the request body sent below.
|
||||
response := runtime.EncodeOrDie(testapi.Default.Codec(), &api.ReplicationController{})
|
||||
response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{})
|
||||
fakeHandler.ResponseBody = response
|
||||
|
||||
rc.Generation = rc.Generation + 1
|
||||
@@ -286,7 +286,7 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
||||
}
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
@@ -294,16 +294,16 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
||||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||
rc := newReplicationController(5)
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
rc.Status = api.ReplicationControllerStatus{Replicas: 2, FullyLabeledReplicas: 6, ReadyReplicas: 2, AvailableReplicas: 2, ObservedGeneration: 0}
|
||||
rc.Status = v1.ReplicationControllerStatus{Replicas: 2, FullyLabeledReplicas: 6, ReadyReplicas: 2, AvailableReplicas: 2, ObservedGeneration: 0}
|
||||
rc.Generation = 1
|
||||
newPodList(manager.podStore.Indexer, 2, api.PodRunning, rc, "pod")
|
||||
newPodList(manager.podStore.Indexer, 2, v1.PodRunning, rc, "pod")
|
||||
rcCopy := *rc
|
||||
extraLabelMap := map[string]string{"foo": "bar", "extraKey": "extraValue"}
|
||||
rcCopy.Spec.Selector = extraLabelMap
|
||||
newPodList(manager.podStore.Indexer, 2, api.PodRunning, &rcCopy, "podWithExtraLabel")
|
||||
newPodList(manager.podStore.Indexer, 2, v1.PodRunning, &rcCopy, "podWithExtraLabel")
|
||||
|
||||
// This response body is just so we don't err out decoding the http response
|
||||
response := runtime.EncodeOrDie(testapi.Default.Codec(), &api.ReplicationController{})
|
||||
response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{})
|
||||
fakeHandler.ResponseBody = response
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
@@ -315,7 +315,7 @@ func TestControllerUpdateReplicas(t *testing.T) {
|
||||
// 2. Status.FullyLabeledReplicas should equal to the number of pods that
|
||||
// has the extra labels, i.e., 2.
|
||||
// 3. Every update to the status should include the Generation of the spec.
|
||||
rc.Status = api.ReplicationControllerStatus{Replicas: 4, ReadyReplicas: 4, AvailableReplicas: 4, ObservedGeneration: 1}
|
||||
rc.Status = v1.ReplicationControllerStatus{Replicas: 4, ReadyReplicas: 4, AvailableReplicas: 4, ObservedGeneration: 1}
|
||||
|
||||
decRc := runtime.EncodeOrDie(testapi.Default.Codec(), rc)
|
||||
fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name)+"/status", "PUT", &decRc)
|
||||
@@ -331,7 +331,7 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
|
||||
}
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -339,7 +339,7 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
|
||||
|
||||
controllerSpec := newReplicationController(2)
|
||||
manager.rcStore.Indexer.Add(controllerSpec)
|
||||
newPodList(manager.podStore.Indexer, 1, api.PodRunning, controllerSpec, "pod")
|
||||
newPodList(manager.podStore.Indexer, 1, v1.PodRunning, controllerSpec, "pod")
|
||||
|
||||
// Creates a replica and sets expectations
|
||||
controllerSpec.Status.Replicas = 1
|
||||
@@ -386,47 +386,47 @@ func TestSyncReplicationControllerDormancy(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPodControllerLookup(t *testing.T) {
|
||||
manager := NewReplicationManagerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}), controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager := NewReplicationManagerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}}), controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
testCases := []struct {
|
||||
inRCs []*api.ReplicationController
|
||||
pod *api.Pod
|
||||
inRCs []*v1.ReplicationController
|
||||
pod *v1.Pod
|
||||
outRCName string
|
||||
}{
|
||||
// pods without labels don't match any rcs
|
||||
{
|
||||
inRCs: []*api.ReplicationController{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "basic"}}},
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo1", Namespace: api.NamespaceAll}},
|
||||
inRCs: []*v1.ReplicationController{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "basic"}}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo1", Namespace: v1.NamespaceAll}},
|
||||
outRCName: "",
|
||||
},
|
||||
// Matching labels, not namespace
|
||||
{
|
||||
inRCs: []*api.ReplicationController{
|
||||
inRCs: []*v1.ReplicationController{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo"},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo"},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo2", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
|
||||
outRCName: "",
|
||||
},
|
||||
// Matching ns and labels returns the key to the rc, not the rc name
|
||||
{
|
||||
inRCs: []*api.ReplicationController{
|
||||
inRCs: []*v1.ReplicationController{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "bar", Namespace: "ns"},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "bar", Namespace: "ns"},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "foo3", Namespace: "ns", Labels: map[string]string{"foo": "bar"}}},
|
||||
outRCName: "bar",
|
||||
},
|
||||
@@ -452,7 +452,7 @@ func TestWatchControllers(t *testing.T) {
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
var testControllerSpec api.ReplicationController
|
||||
var testControllerSpec v1.ReplicationController
|
||||
received := make(chan string)
|
||||
|
||||
// The update sent through the fakeWatcher should make its way into the workqueue,
|
||||
@@ -464,8 +464,8 @@ func TestWatchControllers(t *testing.T) {
|
||||
if !exists || err != nil {
|
||||
t.Errorf("Expected to find controller under key %v", key)
|
||||
}
|
||||
controllerSpec := *obj.(*api.ReplicationController)
|
||||
if !api.Semantic.DeepDerivative(controllerSpec, testControllerSpec) {
|
||||
controllerSpec := *obj.(*v1.ReplicationController)
|
||||
if !v1.Semantic.DeepDerivative(controllerSpec, testControllerSpec) {
|
||||
t.Errorf("Expected %#v, but got %#v", testControllerSpec, controllerSpec)
|
||||
}
|
||||
close(received)
|
||||
@@ -507,8 +507,8 @@ func TestWatchPods(t *testing.T) {
|
||||
if !exists || err != nil {
|
||||
t.Errorf("Expected to find controller under key %v", key)
|
||||
}
|
||||
controllerSpec := obj.(*api.ReplicationController)
|
||||
if !api.Semantic.DeepDerivative(controllerSpec, testControllerSpec) {
|
||||
controllerSpec := obj.(*v1.ReplicationController)
|
||||
if !v1.Semantic.DeepDerivative(controllerSpec, testControllerSpec) {
|
||||
t.Errorf("\nExpected %#v,\nbut got %#v", testControllerSpec, controllerSpec)
|
||||
}
|
||||
close(received)
|
||||
@@ -522,9 +522,9 @@ func TestWatchPods(t *testing.T) {
|
||||
go manager.internalPodInformer.Run(stopCh)
|
||||
go wait.Until(manager.worker, 10*time.Millisecond, stopCh)
|
||||
|
||||
pods := newPodList(nil, 1, api.PodRunning, testControllerSpec, "pod")
|
||||
pods := newPodList(nil, 1, v1.PodRunning, testControllerSpec, "pod")
|
||||
testPod := pods.Items[0]
|
||||
testPod.Status.Phase = api.PodFailed
|
||||
testPod.Status.Phase = v1.PodFailed
|
||||
fakeWatch.Add(&testPod)
|
||||
|
||||
select {
|
||||
@@ -545,7 +545,7 @@ func TestUpdatePods(t *testing.T) {
|
||||
if !exists || err != nil {
|
||||
t.Errorf("Expected to find controller under key %v", key)
|
||||
}
|
||||
received <- obj.(*api.ReplicationController).Name
|
||||
received <- obj.(*v1.ReplicationController).Name
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -564,7 +564,7 @@ func TestUpdatePods(t *testing.T) {
|
||||
// case 1: We put in the podStore a pod with labels matching
|
||||
// testControllerSpec1, then update its labels to match testControllerSpec2.
|
||||
// We expect to receive a sync request for both controllers.
|
||||
pod1 := newPodList(manager.podStore.Indexer, 1, api.PodRunning, testControllerSpec1, "pod").Items[0]
|
||||
pod1 := newPodList(manager.podStore.Indexer, 1, v1.PodRunning, testControllerSpec1, "pod").Items[0]
|
||||
pod1.ResourceVersion = "1"
|
||||
pod2 := pod1
|
||||
pod2.Labels = testControllerSpec2.Spec.Selector
|
||||
@@ -612,14 +612,14 @@ func TestControllerUpdateRequeue(t *testing.T) {
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
rc := newReplicationController(1)
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
rc.Status = api.ReplicationControllerStatus{Replicas: 2}
|
||||
newPodList(manager.podStore.Indexer, 1, api.PodRunning, rc, "pod")
|
||||
rc.Status = v1.ReplicationControllerStatus{Replicas: 2}
|
||||
newPodList(manager.podStore.Indexer, 1, v1.PodRunning, rc, "pod")
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager.podControl = &fakePodControl
|
||||
@@ -640,11 +640,11 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
||||
return true, rc, nil
|
||||
})
|
||||
c.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
|
||||
return true, &api.ReplicationController{}, fmt.Errorf("Fake error")
|
||||
return true, &v1.ReplicationController{}, fmt.Errorf("Fake error")
|
||||
})
|
||||
fakeRCClient := c.Core().ReplicationControllers("default")
|
||||
numReplicas := int32(10)
|
||||
status := api.ReplicationControllerStatus{Replicas: numReplicas}
|
||||
status := v1.ReplicationControllerStatus{Replicas: numReplicas}
|
||||
updateReplicationControllerStatus(fakeRCClient, *rc, status)
|
||||
updates, gets := 0, 0
|
||||
for _, a := range c.Actions() {
|
||||
@@ -664,7 +664,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
||||
updates++
|
||||
// Confirm that the update has the right status.Replicas even though the Get
|
||||
// returned an rc with replicas=1.
|
||||
if c, ok := action.GetObject().(*api.ReplicationController); !ok {
|
||||
if c, ok := action.GetObject().(*v1.ReplicationController); !ok {
|
||||
t.Errorf("Expected an rc as the argument to update, got %T", c)
|
||||
} else if c.Status.Replicas != numReplicas {
|
||||
t.Errorf("Expected update for rc to contain replicas %v, got %v instead",
|
||||
@@ -682,7 +682,7 @@ func TestControllerUpdateStatusWithFailure(t *testing.T) {
|
||||
|
||||
// TODO: This test is too hairy for a unittest. It should be moved to an E2E suite.
|
||||
func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, burstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -692,7 +692,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
manager.rcStore.Indexer.Add(controllerSpec)
|
||||
|
||||
expectedPods := 0
|
||||
pods := newPodList(nil, numReplicas, api.PodPending, controllerSpec, "pod")
|
||||
pods := newPodList(nil, numReplicas, v1.PodPending, controllerSpec, "pod")
|
||||
|
||||
rcKey, err := controller.KeyFunc(controllerSpec)
|
||||
if err != nil {
|
||||
@@ -702,7 +702,7 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
// Size up the controller, then size it down, and confirm the expected create/delete pattern
|
||||
for _, replicas := range []int{numReplicas, 0} {
|
||||
|
||||
controllerSpec.Spec.Replicas = int32(replicas)
|
||||
*(controllerSpec.Spec.Replicas) = int32(replicas)
|
||||
manager.rcStore.Indexer.Add(controllerSpec)
|
||||
|
||||
for i := 0; i < numReplicas; i += burstReplicas {
|
||||
@@ -745,11 +745,11 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
// To accurately simulate a watch we must delete the exact pods
|
||||
// the rc is waiting for.
|
||||
expectedDels := manager.expectations.GetUIDs(getKey(controllerSpec, t))
|
||||
podsToDelete := []*api.Pod{}
|
||||
podsToDelete := []*v1.Pod{}
|
||||
for _, key := range expectedDels.List() {
|
||||
nsName := strings.Split(key, "/")
|
||||
podsToDelete = append(podsToDelete, &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
podsToDelete = append(podsToDelete, &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: nsName[1],
|
||||
Namespace: nsName[0],
|
||||
Labels: controllerSpec.Spec.Selector,
|
||||
@@ -789,8 +789,8 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
t.Fatalf("Waiting on unexpected number of deletes.")
|
||||
}
|
||||
nsName := strings.Split(expectedDel.List()[0], "/")
|
||||
lastPod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
lastPod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: nsName[1],
|
||||
Namespace: nsName[0],
|
||||
Labels: controllerSpec.Spec.Selector,
|
||||
@@ -804,11 +804,11 @@ func doTestControllerBurstReplicas(t *testing.T, burstReplicas, numReplicas int)
|
||||
|
||||
// Confirm that we've created the right number of replicas
|
||||
activePods := int32(len(manager.podStore.Indexer.List()))
|
||||
if activePods != controllerSpec.Spec.Replicas {
|
||||
t.Fatalf("Unexpected number of active pods, expected %d, got %d", controllerSpec.Spec.Replicas, activePods)
|
||||
if activePods != *(controllerSpec.Spec.Replicas) {
|
||||
t.Fatalf("Unexpected number of active pods, expected %d, got %d", *(controllerSpec.Spec.Replicas), activePods)
|
||||
}
|
||||
// Replenish the pod list, since we cut it down sizing up
|
||||
pods = newPodList(nil, replicas, api.PodRunning, controllerSpec, "pod")
|
||||
pods = newPodList(nil, replicas, v1.PodRunning, controllerSpec, "pod")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -832,7 +832,7 @@ func (fe FakeRCExpectations) SatisfiedExpectations(controllerKey string) bool {
|
||||
// TestRCSyncExpectations tests that a pod cannot sneak in between counting active pods
|
||||
// and checking expectations.
|
||||
func TestRCSyncExpectations(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 2, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
@@ -840,7 +840,7 @@ func TestRCSyncExpectations(t *testing.T) {
|
||||
|
||||
controllerSpec := newReplicationController(2)
|
||||
manager.rcStore.Indexer.Add(controllerSpec)
|
||||
pods := newPodList(nil, 2, api.PodPending, controllerSpec, "pod")
|
||||
pods := newPodList(nil, 2, v1.PodPending, controllerSpec, "pod")
|
||||
manager.podStore.Indexer.Add(&pods.Items[0])
|
||||
postExpectationsPod := pods.Items[1]
|
||||
|
||||
@@ -857,7 +857,7 @@ func TestRCSyncExpectations(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
@@ -899,7 +899,7 @@ func TestDeleteControllerAndExpectations(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestRCManagerNotReady(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 2, 0)
|
||||
manager.podControl = &fakePodControl
|
||||
@@ -925,10 +925,10 @@ func TestRCManagerNotReady(t *testing.T) {
|
||||
}
|
||||
|
||||
// shuffle returns a new shuffled list of container controllers.
|
||||
func shuffle(controllers []*api.ReplicationController) []*api.ReplicationController {
|
||||
func shuffle(controllers []*v1.ReplicationController) []*v1.ReplicationController {
|
||||
numControllers := len(controllers)
|
||||
randIndexes := rand.Perm(numControllers)
|
||||
shuffled := make([]*api.ReplicationController, numControllers)
|
||||
shuffled := make([]*v1.ReplicationController, numControllers)
|
||||
for i := 0; i < numControllers; i++ {
|
||||
shuffled[i] = controllers[randIndexes[i]]
|
||||
}
|
||||
@@ -936,14 +936,14 @@ func shuffle(controllers []*api.ReplicationController) []*api.ReplicationControl
|
||||
}
|
||||
|
||||
func TestOverlappingRCs(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
|
||||
for i := 0; i < 5; i++ {
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
// Create 10 rcs, shuffled them randomly and insert them into the rc manager's store
|
||||
var controllers []*api.ReplicationController
|
||||
var controllers []*v1.ReplicationController
|
||||
for j := 1; j < 10; j++ {
|
||||
controllerSpec := newReplicationController(1)
|
||||
controllerSpec.CreationTimestamp = unversioned.Date(2014, time.December, j, 0, 0, 0, 0, time.Local)
|
||||
@@ -955,7 +955,7 @@ func TestOverlappingRCs(t *testing.T) {
|
||||
manager.rcStore.Indexer.Add(shuffledControllers[j])
|
||||
}
|
||||
// Add a pod and make sure only the oldest rc is synced
|
||||
pods := newPodList(nil, 1, api.PodPending, controllers[0], "pod")
|
||||
pods := newPodList(nil, 1, v1.PodPending, controllers[0], "pod")
|
||||
rcKey := getKey(controllers[0], t)
|
||||
|
||||
manager.addPod(&pods.Items[0])
|
||||
@@ -967,7 +967,7 @@ func TestOverlappingRCs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestDeletionTimestamp(t *testing.T) {
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, 10, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
@@ -977,7 +977,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Errorf("Couldn't get key for object %#v: %v", controllerSpec, err)
|
||||
}
|
||||
pod := newPodList(nil, 1, api.PodPending, controllerSpec, "pod").Items[0]
|
||||
pod := newPodList(nil, 1, v1.PodPending, controllerSpec, "pod").Items[0]
|
||||
pod.DeletionTimestamp = &unversioned.Time{Time: time.Now()}
|
||||
pod.ResourceVersion = "1"
|
||||
manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(&pod)})
|
||||
@@ -998,7 +998,7 @@ func TestDeletionTimestamp(t *testing.T) {
|
||||
|
||||
// An update from no deletion timestamp to having one should be treated
|
||||
// as a deletion.
|
||||
oldPod := newPodList(nil, 1, api.PodPending, controllerSpec, "pod").Items[0]
|
||||
oldPod := newPodList(nil, 1, v1.PodPending, controllerSpec, "pod").Items[0]
|
||||
oldPod.ResourceVersion = "2"
|
||||
manager.expectations.ExpectDeletions(rcKey, []string{controller.PodKey(&pod)})
|
||||
manager.updatePod(&oldPod, &pod)
|
||||
@@ -1016,8 +1016,8 @@ func TestDeletionTimestamp(t *testing.T) {
|
||||
|
||||
// An update to the pod (including an update to the deletion timestamp)
|
||||
// should not be counted as a second delete.
|
||||
secondPod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
secondPod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: pod.Namespace,
|
||||
Name: "secondPod",
|
||||
Labels: pod.Labels,
|
||||
@@ -1057,20 +1057,20 @@ func TestDeletionTimestamp(t *testing.T) {
|
||||
}
|
||||
|
||||
func BenchmarkGetPodControllerMultiNS(b *testing.B) {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager := NewReplicationManagerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
|
||||
const nsNum = 1000
|
||||
|
||||
pods := []api.Pod{}
|
||||
pods := []v1.Pod{}
|
||||
for i := 0; i < nsNum; i++ {
|
||||
ns := fmt.Sprintf("ns-%d", i)
|
||||
for j := 0; j < 10; j++ {
|
||||
rcName := fmt.Sprintf("rc-%d", j)
|
||||
for k := 0; k < 10; k++ {
|
||||
podName := fmt.Sprintf("pod-%d-%d", j, k)
|
||||
pods = append(pods, api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pods = append(pods, v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: ns,
|
||||
Labels: map[string]string{"rcName": rcName},
|
||||
@@ -1084,9 +1084,9 @@ func BenchmarkGetPodControllerMultiNS(b *testing.B) {
|
||||
ns := fmt.Sprintf("ns-%d", i)
|
||||
for j := 0; j < 10; j++ {
|
||||
rcName := fmt.Sprintf("rc-%d", j)
|
||||
manager.rcStore.Indexer.Add(&api.ReplicationController{
|
||||
ObjectMeta: api.ObjectMeta{Name: rcName, Namespace: ns},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
manager.rcStore.Indexer.Add(&v1.ReplicationController{
|
||||
ObjectMeta: v1.ObjectMeta{Name: rcName, Namespace: ns},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Selector: map[string]string{"rcName": rcName},
|
||||
},
|
||||
})
|
||||
@@ -1103,19 +1103,19 @@ func BenchmarkGetPodControllerMultiNS(b *testing.B) {
|
||||
}
|
||||
|
||||
func BenchmarkGetPodControllerSingleNS(b *testing.B) {
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager := NewReplicationManagerFromClient(client, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
|
||||
const rcNum = 1000
|
||||
const replicaNum = 3
|
||||
|
||||
pods := []api.Pod{}
|
||||
pods := []v1.Pod{}
|
||||
for i := 0; i < rcNum; i++ {
|
||||
rcName := fmt.Sprintf("rc-%d", i)
|
||||
for j := 0; j < replicaNum; j++ {
|
||||
podName := fmt.Sprintf("pod-%d-%d", i, j)
|
||||
pods = append(pods, api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pods = append(pods, v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: "foo",
|
||||
Labels: map[string]string{"rcName": rcName},
|
||||
@@ -1126,9 +1126,9 @@ func BenchmarkGetPodControllerSingleNS(b *testing.B) {
|
||||
|
||||
for i := 0; i < rcNum; i++ {
|
||||
rcName := fmt.Sprintf("rc-%d", i)
|
||||
manager.rcStore.Indexer.Add(&api.ReplicationController{
|
||||
ObjectMeta: api.ObjectMeta{Name: rcName, Namespace: "foo"},
|
||||
Spec: api.ReplicationControllerSpec{
|
||||
manager.rcStore.Indexer.Add(&v1.ReplicationController{
|
||||
ObjectMeta: v1.ObjectMeta{Name: rcName, Namespace: "foo"},
|
||||
Spec: v1.ReplicationControllerSpec{
|
||||
Selector: map[string]string{"rcName": rcName},
|
||||
},
|
||||
})
|
||||
@@ -1158,10 +1158,10 @@ func TestDoNotPatchPodWithOtherControlRef(t *testing.T) {
|
||||
rc := newReplicationController(2)
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
var trueVar = true
|
||||
otherControllerReference := api.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1", Kind: "ReplicationController", Name: "AnotherRC", Controller: &trueVar}
|
||||
otherControllerReference := v1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "v1", Kind: "ReplicationController", Name: "AnotherRC", Controller: &trueVar}
|
||||
// add to podStore a matching Pod controlled by another controller. Expect no patch.
|
||||
pod := newPod("pod", rc, api.PodRunning, nil)
|
||||
pod.OwnerReferences = []api.OwnerReference{otherControllerReference}
|
||||
pod := newPod("pod", rc, v1.PodRunning, nil)
|
||||
pod.OwnerReferences = []v1.OwnerReference{otherControllerReference}
|
||||
manager.podStore.Indexer.Add(pod)
|
||||
err := manager.syncReplicationController(getKey(rc, t))
|
||||
if err != nil {
|
||||
@@ -1178,9 +1178,9 @@ func TestPatchPodWithOtherOwnerRef(t *testing.T) {
|
||||
// add to podStore one more matching pod that doesn't have a controller
|
||||
// ref, but has an owner ref pointing to other object. Expect a patch to
|
||||
// take control of it.
|
||||
unrelatedOwnerReference := api.OwnerReference{UID: uuid.NewUUID(), APIVersion: "batch/v1", Kind: "Job", Name: "Job"}
|
||||
pod := newPod("pod", rc, api.PodRunning, nil)
|
||||
pod.OwnerReferences = []api.OwnerReference{unrelatedOwnerReference}
|
||||
unrelatedOwnerReference := v1.OwnerReference{UID: uuid.NewUUID(), APIVersion: "batch/v1", Kind: "Job", Name: "Job"}
|
||||
pod := newPod("pod", rc, v1.PodRunning, nil)
|
||||
pod.OwnerReferences = []v1.OwnerReference{unrelatedOwnerReference}
|
||||
manager.podStore.Indexer.Add(pod)
|
||||
|
||||
err := manager.syncReplicationController(getKey(rc, t))
|
||||
@@ -1197,9 +1197,9 @@ func TestPatchPodWithCorrectOwnerRef(t *testing.T) {
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
// add to podStore a matching pod that has an ownerRef pointing to the rc,
|
||||
// but ownerRef.Controller is false. Expect a patch to take control it.
|
||||
rcOwnerReference := api.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name}
|
||||
pod := newPod("pod", rc, api.PodRunning, nil)
|
||||
pod.OwnerReferences = []api.OwnerReference{rcOwnerReference}
|
||||
rcOwnerReference := v1.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name}
|
||||
pod := newPod("pod", rc, v1.PodRunning, nil)
|
||||
pod.OwnerReferences = []v1.OwnerReference{rcOwnerReference}
|
||||
manager.podStore.Indexer.Add(pod)
|
||||
|
||||
err := manager.syncReplicationController(getKey(rc, t))
|
||||
@@ -1216,8 +1216,8 @@ func TestPatchPodFails(t *testing.T) {
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
// add to podStore two matching pods. Expect two patches to take control
|
||||
// them.
|
||||
manager.podStore.Indexer.Add(newPod("pod1", rc, api.PodRunning, nil))
|
||||
manager.podStore.Indexer.Add(newPod("pod2", rc, api.PodRunning, nil))
|
||||
manager.podStore.Indexer.Add(newPod("pod1", rc, v1.PodRunning, nil))
|
||||
manager.podStore.Indexer.Add(newPod("pod2", rc, v1.PodRunning, nil))
|
||||
// let both patches fail. The rc manager will assume it fails to take
|
||||
// control of the pods and create new ones.
|
||||
fakePodControl.Err = fmt.Errorf("Fake Error")
|
||||
@@ -1235,9 +1235,9 @@ func TestPatchExtraPodsThenDelete(t *testing.T) {
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
// add to podStore three matching pods. Expect three patches to take control
|
||||
// them, and later delete one of them.
|
||||
manager.podStore.Indexer.Add(newPod("pod1", rc, api.PodRunning, nil))
|
||||
manager.podStore.Indexer.Add(newPod("pod2", rc, api.PodRunning, nil))
|
||||
manager.podStore.Indexer.Add(newPod("pod3", rc, api.PodRunning, nil))
|
||||
manager.podStore.Indexer.Add(newPod("pod1", rc, v1.PodRunning, nil))
|
||||
manager.podStore.Indexer.Add(newPod("pod2", rc, v1.PodRunning, nil))
|
||||
manager.podStore.Indexer.Add(newPod("pod3", rc, v1.PodRunning, nil))
|
||||
err := manager.syncReplicationController(getKey(rc, t))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -1251,11 +1251,11 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
|
||||
rc := newReplicationController(2)
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
// put one pod in the podStore
|
||||
pod := newPod("pod", rc, api.PodRunning, nil)
|
||||
pod := newPod("pod", rc, v1.PodRunning, nil)
|
||||
pod.ResourceVersion = "1"
|
||||
var trueVar = true
|
||||
rcOwnerReference := api.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name, Controller: &trueVar}
|
||||
pod.OwnerReferences = []api.OwnerReference{rcOwnerReference}
|
||||
rcOwnerReference := v1.OwnerReference{UID: rc.UID, APIVersion: "v1", Kind: "ReplicationController", Name: rc.Name, Controller: &trueVar}
|
||||
pod.OwnerReferences = []v1.OwnerReference{rcOwnerReference}
|
||||
updatedPod := *pod
|
||||
// reset the labels
|
||||
updatedPod.Labels = make(map[string]string)
|
||||
@@ -1278,7 +1278,7 @@ func TestUpdateLabelsRemoveControllerRef(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// expect 1 patch to be sent to remove the controllerRef for the pod.
|
||||
// expect 2 creates because the rc.Spec.Replicas=2 and there exists no
|
||||
// expect 2 creates because the *(rc.Spec.Replicas)=2 and there exists no
|
||||
// matching pod.
|
||||
validateSyncReplication(t, fakePodControl, 2, 0, 1)
|
||||
fakePodControl.Clear()
|
||||
@@ -1288,7 +1288,7 @@ func TestUpdateSelectorControllerRef(t *testing.T) {
|
||||
manager, fakePodControl := setupManagerWithGCEnabled()
|
||||
rc := newReplicationController(2)
|
||||
// put 2 pods in the podStore
|
||||
newPodList(manager.podStore.Indexer, 2, api.PodRunning, rc, "pod")
|
||||
newPodList(manager.podStore.Indexer, 2, v1.PodRunning, rc, "pod")
|
||||
// update the RC so that its selector no longer matches the pods
|
||||
updatedRC := *rc
|
||||
updatedRC.Spec.Selector = map[string]string{"foo": "baz"}
|
||||
@@ -1309,7 +1309,7 @@ func TestUpdateSelectorControllerRef(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// expect 2 patches to be sent to remove the controllerRef for the pods.
|
||||
// expect 2 creates because the rc.Spec.Replicas=2 and there exists no
|
||||
// expect 2 creates because the *(rc.Spec.Replicas)=2 and there exists no
|
||||
// matching pod.
|
||||
validateSyncReplication(t, fakePodControl, 2, 0, 2)
|
||||
fakePodControl.Clear()
|
||||
@@ -1323,7 +1323,7 @@ func TestDoNotAdoptOrCreateIfBeingDeleted(t *testing.T) {
|
||||
now := unversioned.Now()
|
||||
rc.DeletionTimestamp = &now
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
pod1 := newPod("pod1", rc, api.PodRunning, nil)
|
||||
pod1 := newPod("pod1", rc, v1.PodRunning, nil)
|
||||
manager.podStore.Indexer.Add(pod1)
|
||||
|
||||
// no patch, no create
|
||||
@@ -1343,21 +1343,21 @@ func TestReadyReplicas(t *testing.T) {
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||
rc := newReplicationController(2)
|
||||
rc.Status = api.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 0, AvailableReplicas: 0, ObservedGeneration: 1}
|
||||
rc.Status = v1.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 0, AvailableReplicas: 0, ObservedGeneration: 1}
|
||||
rc.Generation = 1
|
||||
manager.rcStore.Indexer.Add(rc)
|
||||
|
||||
newPodList(manager.podStore.Indexer, 2, api.PodPending, rc, "pod")
|
||||
newPodList(manager.podStore.Indexer, 2, api.PodRunning, rc, "pod")
|
||||
newPodList(manager.podStore.Indexer, 2, v1.PodPending, rc, "pod")
|
||||
newPodList(manager.podStore.Indexer, 2, v1.PodRunning, rc, "pod")
|
||||
|
||||
// This response body is just so we don't err out decoding the http response
|
||||
response := runtime.EncodeOrDie(testapi.Default.Codec(), &api.ReplicationController{})
|
||||
response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{})
|
||||
fakeHandler.ResponseBody = response
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
@@ -1366,7 +1366,7 @@ func TestReadyReplicas(t *testing.T) {
|
||||
manager.syncReplicationController(getKey(rc, t))
|
||||
|
||||
// ReadyReplicas should go from 0 to 2.
|
||||
rc.Status = api.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 2, AvailableReplicas: 2, ObservedGeneration: 1}
|
||||
rc.Status = v1.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 2, AvailableReplicas: 2, ObservedGeneration: 1}
|
||||
|
||||
decRc := runtime.EncodeOrDie(testapi.Default.Codec(), rc)
|
||||
fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name)+"/status", "PUT", &decRc)
|
||||
@@ -1382,13 +1382,13 @@ func TestAvailableReplicas(t *testing.T) {
|
||||
testServer := httptest.NewServer(&fakeHandler)
|
||||
defer testServer.Close()
|
||||
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
||||
c := clientset.NewForConfigOrDie(&restclient.Config{Host: testServer.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||
manager := NewReplicationManagerFromClient(c, controller.NoResyncPeriodFunc, BurstReplicas, 0)
|
||||
manager.podStoreSynced = alwaysReady
|
||||
|
||||
// Status.Replica should update to match number of pods in system, 1 new pod should be created.
|
||||
rc := newReplicationController(2)
|
||||
rc.Status = api.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 0, ObservedGeneration: 1}
|
||||
rc.Status = v1.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 0, ObservedGeneration: 1}
|
||||
rc.Generation = 1
|
||||
// minReadySeconds set to 15s
|
||||
rc.Spec.MinReadySeconds = 15
|
||||
@@ -1396,16 +1396,16 @@ func TestAvailableReplicas(t *testing.T) {
|
||||
|
||||
// First pod becomes ready 20s ago
|
||||
moment := unversioned.Time{Time: time.Now().Add(-2e10)}
|
||||
pod := newPod("pod", rc, api.PodRunning, &moment)
|
||||
pod := newPod("pod", rc, v1.PodRunning, &moment)
|
||||
manager.podStore.Indexer.Add(pod)
|
||||
|
||||
// Second pod becomes ready now
|
||||
otherMoment := unversioned.Now()
|
||||
otherPod := newPod("otherPod", rc, api.PodRunning, &otherMoment)
|
||||
otherPod := newPod("otherPod", rc, v1.PodRunning, &otherMoment)
|
||||
manager.podStore.Indexer.Add(otherPod)
|
||||
|
||||
// This response body is just so we don't err out decoding the http response
|
||||
response := runtime.EncodeOrDie(testapi.Default.Codec(), &api.ReplicationController{})
|
||||
response := runtime.EncodeOrDie(testapi.Default.Codec(), &v1.ReplicationController{})
|
||||
fakeHandler.ResponseBody = response
|
||||
|
||||
fakePodControl := controller.FakePodControl{}
|
||||
@@ -1414,7 +1414,7 @@ func TestAvailableReplicas(t *testing.T) {
|
||||
// The controller should see only one available pod.
|
||||
manager.syncReplicationController(getKey(rc, t))
|
||||
|
||||
rc.Status = api.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 2, AvailableReplicas: 1, ObservedGeneration: 1}
|
||||
rc.Status = v1.ReplicationControllerStatus{Replicas: 2, ReadyReplicas: 2, AvailableReplicas: 1, ObservedGeneration: 1}
|
||||
|
||||
decRc := runtime.EncodeOrDie(testapi.Default.Codec(), rc)
|
||||
fakeHandler.ValidateRequest(t, testapi.Default.ResourcePath(replicationControllerResourceName(), rc.Namespace, rc.Name)+"/status", "PUT", &decRc)
|
||||
@@ -1422,35 +1422,35 @@ func TestAvailableReplicas(t *testing.T) {
|
||||
}
|
||||
|
||||
var (
|
||||
imagePullBackOff api.ReplicationControllerConditionType = "ImagePullBackOff"
|
||||
imagePullBackOff v1.ReplicationControllerConditionType = "ImagePullBackOff"
|
||||
|
||||
condImagePullBackOff = func() api.ReplicationControllerCondition {
|
||||
return api.ReplicationControllerCondition{
|
||||
condImagePullBackOff = func() v1.ReplicationControllerCondition {
|
||||
return v1.ReplicationControllerCondition{
|
||||
Type: imagePullBackOff,
|
||||
Status: api.ConditionTrue,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "NonExistentImage",
|
||||
}
|
||||
}
|
||||
|
||||
condReplicaFailure = func() api.ReplicationControllerCondition {
|
||||
return api.ReplicationControllerCondition{
|
||||
Type: api.ReplicationControllerReplicaFailure,
|
||||
Status: api.ConditionTrue,
|
||||
condReplicaFailure = func() v1.ReplicationControllerCondition {
|
||||
return v1.ReplicationControllerCondition{
|
||||
Type: v1.ReplicationControllerReplicaFailure,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "OtherFailure",
|
||||
}
|
||||
}
|
||||
|
||||
condReplicaFailure2 = func() api.ReplicationControllerCondition {
|
||||
return api.ReplicationControllerCondition{
|
||||
Type: api.ReplicationControllerReplicaFailure,
|
||||
Status: api.ConditionTrue,
|
||||
condReplicaFailure2 = func() v1.ReplicationControllerCondition {
|
||||
return v1.ReplicationControllerCondition{
|
||||
Type: v1.ReplicationControllerReplicaFailure,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "AnotherFailure",
|
||||
}
|
||||
}
|
||||
|
||||
status = func() *api.ReplicationControllerStatus {
|
||||
return &api.ReplicationControllerStatus{
|
||||
Conditions: []api.ReplicationControllerCondition{condReplicaFailure()},
|
||||
status = func() *v1.ReplicationControllerStatus {
|
||||
return &v1.ReplicationControllerStatus{
|
||||
Conditions: []v1.ReplicationControllerCondition{condReplicaFailure()},
|
||||
}
|
||||
}
|
||||
)
|
||||
@@ -1461,9 +1461,9 @@ func TestGetCondition(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
status api.ReplicationControllerStatus
|
||||
condType api.ReplicationControllerConditionType
|
||||
condStatus api.ConditionStatus
|
||||
status v1.ReplicationControllerStatus
|
||||
condType v1.ReplicationControllerConditionType
|
||||
condStatus v1.ConditionStatus
|
||||
condReason string
|
||||
|
||||
expected bool
|
||||
@@ -1472,7 +1472,7 @@ func TestGetCondition(t *testing.T) {
|
||||
name: "condition exists",
|
||||
|
||||
status: *exampleStatus,
|
||||
condType: api.ReplicationControllerReplicaFailure,
|
||||
condType: v1.ReplicationControllerReplicaFailure,
|
||||
|
||||
expected: true,
|
||||
},
|
||||
@@ -1499,34 +1499,34 @@ func TestSetCondition(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
status *api.ReplicationControllerStatus
|
||||
cond api.ReplicationControllerCondition
|
||||
status *v1.ReplicationControllerStatus
|
||||
cond v1.ReplicationControllerCondition
|
||||
|
||||
expectedStatus *api.ReplicationControllerStatus
|
||||
expectedStatus *v1.ReplicationControllerStatus
|
||||
}{
|
||||
{
|
||||
name: "set for the first time",
|
||||
|
||||
status: &api.ReplicationControllerStatus{},
|
||||
status: &v1.ReplicationControllerStatus{},
|
||||
cond: condReplicaFailure(),
|
||||
|
||||
expectedStatus: &api.ReplicationControllerStatus{Conditions: []api.ReplicationControllerCondition{condReplicaFailure()}},
|
||||
expectedStatus: &v1.ReplicationControllerStatus{Conditions: []v1.ReplicationControllerCondition{condReplicaFailure()}},
|
||||
},
|
||||
{
|
||||
name: "simple set",
|
||||
|
||||
status: &api.ReplicationControllerStatus{Conditions: []api.ReplicationControllerCondition{condImagePullBackOff()}},
|
||||
status: &v1.ReplicationControllerStatus{Conditions: []v1.ReplicationControllerCondition{condImagePullBackOff()}},
|
||||
cond: condReplicaFailure(),
|
||||
|
||||
expectedStatus: &api.ReplicationControllerStatus{Conditions: []api.ReplicationControllerCondition{condImagePullBackOff(), condReplicaFailure()}},
|
||||
expectedStatus: &v1.ReplicationControllerStatus{Conditions: []v1.ReplicationControllerCondition{condImagePullBackOff(), condReplicaFailure()}},
|
||||
},
|
||||
{
|
||||
name: "overwrite",
|
||||
|
||||
status: &api.ReplicationControllerStatus{Conditions: []api.ReplicationControllerCondition{condReplicaFailure()}},
|
||||
status: &v1.ReplicationControllerStatus{Conditions: []v1.ReplicationControllerCondition{condReplicaFailure()}},
|
||||
cond: condReplicaFailure2(),
|
||||
|
||||
expectedStatus: &api.ReplicationControllerStatus{Conditions: []api.ReplicationControllerCondition{condReplicaFailure2()}},
|
||||
expectedStatus: &v1.ReplicationControllerStatus{Conditions: []v1.ReplicationControllerCondition{condReplicaFailure2()}},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1542,26 +1542,26 @@ func TestRemoveCondition(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
|
||||
status *api.ReplicationControllerStatus
|
||||
condType api.ReplicationControllerConditionType
|
||||
status *v1.ReplicationControllerStatus
|
||||
condType v1.ReplicationControllerConditionType
|
||||
|
||||
expectedStatus *api.ReplicationControllerStatus
|
||||
expectedStatus *v1.ReplicationControllerStatus
|
||||
}{
|
||||
{
|
||||
name: "remove from empty status",
|
||||
|
||||
status: &api.ReplicationControllerStatus{},
|
||||
condType: api.ReplicationControllerReplicaFailure,
|
||||
status: &v1.ReplicationControllerStatus{},
|
||||
condType: v1.ReplicationControllerReplicaFailure,
|
||||
|
||||
expectedStatus: &api.ReplicationControllerStatus{},
|
||||
expectedStatus: &v1.ReplicationControllerStatus{},
|
||||
},
|
||||
{
|
||||
name: "simple remove",
|
||||
|
||||
status: &api.ReplicationControllerStatus{Conditions: []api.ReplicationControllerCondition{condReplicaFailure()}},
|
||||
condType: api.ReplicationControllerReplicaFailure,
|
||||
status: &v1.ReplicationControllerStatus{Conditions: []v1.ReplicationControllerCondition{condReplicaFailure()}},
|
||||
condType: v1.ReplicationControllerReplicaFailure,
|
||||
|
||||
expectedStatus: &api.ReplicationControllerStatus{},
|
||||
expectedStatus: &v1.ReplicationControllerStatus{},
|
||||
},
|
||||
{
|
||||
name: "doesn't remove anything",
|
||||
|
||||
@@ -23,14 +23,14 @@ import (
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
)
|
||||
|
||||
// updateReplicationControllerStatus attempts to update the Status.Replicas of the given controller, with a single GET/PUT retry.
|
||||
func updateReplicationControllerStatus(c unversionedcore.ReplicationControllerInterface, rc api.ReplicationController, newStatus api.ReplicationControllerStatus) (updateErr error) {
|
||||
func updateReplicationControllerStatus(c v1core.ReplicationControllerInterface, rc v1.ReplicationController, newStatus v1.ReplicationControllerStatus) (updateErr error) {
|
||||
// This is the steady state. It happens when the rc doesn't have any expectations, since
|
||||
// we do a periodic relist every 30s. If the generations differ but the replicas are
|
||||
// the same, a caller might've resized to the same replica count.
|
||||
@@ -51,7 +51,7 @@ func updateReplicationControllerStatus(c unversionedcore.ReplicationControllerIn
|
||||
var getErr error
|
||||
for i, rc := 0, &rc; ; i++ {
|
||||
glog.V(4).Infof(fmt.Sprintf("Updating replica count for rc: %s/%s, ", rc.Namespace, rc.Name) +
|
||||
fmt.Sprintf("replicas %d->%d (need %d), ", rc.Status.Replicas, newStatus.Replicas, rc.Spec.Replicas) +
|
||||
fmt.Sprintf("replicas %d->%d (need %d), ", rc.Status.Replicas, newStatus.Replicas, *(rc.Spec.Replicas)) +
|
||||
fmt.Sprintf("fullyLabeledReplicas %d->%d, ", rc.Status.FullyLabeledReplicas, newStatus.FullyLabeledReplicas) +
|
||||
fmt.Sprintf("readyReplicas %d->%d, ", rc.Status.ReadyReplicas, newStatus.ReadyReplicas) +
|
||||
fmt.Sprintf("availableReplicas %d->%d, ", rc.Status.AvailableReplicas, newStatus.AvailableReplicas) +
|
||||
@@ -72,7 +72,7 @@ func updateReplicationControllerStatus(c unversionedcore.ReplicationControllerIn
|
||||
}
|
||||
|
||||
// OverlappingControllers sorts a list of controllers by creation timestamp, using their names as a tie breaker.
|
||||
type OverlappingControllers []*api.ReplicationController
|
||||
type OverlappingControllers []*v1.ReplicationController
|
||||
|
||||
func (o OverlappingControllers) Len() int { return len(o) }
|
||||
func (o OverlappingControllers) Swap(i, j int) { o[i], o[j] = o[j], o[i] }
|
||||
@@ -84,7 +84,7 @@ func (o OverlappingControllers) Less(i, j int) bool {
|
||||
return o[i].CreationTimestamp.Before(o[j].CreationTimestamp)
|
||||
}
|
||||
|
||||
func calculateStatus(rc api.ReplicationController, filteredPods []*api.Pod, manageReplicasErr error) api.ReplicationControllerStatus {
|
||||
func calculateStatus(rc v1.ReplicationController, filteredPods []*v1.Pod, manageReplicasErr error) v1.ReplicationControllerStatus {
|
||||
newStatus := rc.Status
|
||||
// Count the number of pods that have labels matching the labels of the pod
|
||||
// template of the replication controller, the matching pods may have more
|
||||
@@ -99,26 +99,26 @@ func calculateStatus(rc api.ReplicationController, filteredPods []*api.Pod, mana
|
||||
if templateLabel.Matches(labels.Set(pod.Labels)) {
|
||||
fullyLabeledReplicasCount++
|
||||
}
|
||||
if api.IsPodReady(pod) {
|
||||
if v1.IsPodReady(pod) {
|
||||
readyReplicasCount++
|
||||
if api.IsPodAvailable(pod, rc.Spec.MinReadySeconds, unversioned.Now()) {
|
||||
if v1.IsPodAvailable(pod, rc.Spec.MinReadySeconds, unversioned.Now()) {
|
||||
availableReplicasCount++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
failureCond := GetCondition(rc.Status, api.ReplicationControllerReplicaFailure)
|
||||
failureCond := GetCondition(rc.Status, v1.ReplicationControllerReplicaFailure)
|
||||
if manageReplicasErr != nil && failureCond == nil {
|
||||
var reason string
|
||||
if diff := len(filteredPods) - int(rc.Spec.Replicas); diff < 0 {
|
||||
if diff := len(filteredPods) - int(*(rc.Spec.Replicas)); diff < 0 {
|
||||
reason = "FailedCreate"
|
||||
} else if diff > 0 {
|
||||
reason = "FailedDelete"
|
||||
}
|
||||
cond := NewReplicationControllerCondition(api.ReplicationControllerReplicaFailure, api.ConditionTrue, reason, manageReplicasErr.Error())
|
||||
cond := NewReplicationControllerCondition(v1.ReplicationControllerReplicaFailure, v1.ConditionTrue, reason, manageReplicasErr.Error())
|
||||
SetCondition(&newStatus, cond)
|
||||
} else if manageReplicasErr == nil && failureCond != nil {
|
||||
RemoveCondition(&newStatus, api.ReplicationControllerReplicaFailure)
|
||||
RemoveCondition(&newStatus, v1.ReplicationControllerReplicaFailure)
|
||||
}
|
||||
|
||||
newStatus.Replicas = int32(len(filteredPods))
|
||||
@@ -129,8 +129,8 @@ func calculateStatus(rc api.ReplicationController, filteredPods []*api.Pod, mana
|
||||
}
|
||||
|
||||
// NewReplicationControllerCondition creates a new replication controller condition.
|
||||
func NewReplicationControllerCondition(condType api.ReplicationControllerConditionType, status api.ConditionStatus, reason, msg string) api.ReplicationControllerCondition {
|
||||
return api.ReplicationControllerCondition{
|
||||
func NewReplicationControllerCondition(condType v1.ReplicationControllerConditionType, status v1.ConditionStatus, reason, msg string) v1.ReplicationControllerCondition {
|
||||
return v1.ReplicationControllerCondition{
|
||||
Type: condType,
|
||||
Status: status,
|
||||
LastTransitionTime: unversioned.Now(),
|
||||
@@ -140,7 +140,7 @@ func NewReplicationControllerCondition(condType api.ReplicationControllerConditi
|
||||
}
|
||||
|
||||
// GetCondition returns a replication controller condition with the provided type if it exists.
|
||||
func GetCondition(status api.ReplicationControllerStatus, condType api.ReplicationControllerConditionType) *api.ReplicationControllerCondition {
|
||||
func GetCondition(status v1.ReplicationControllerStatus, condType v1.ReplicationControllerConditionType) *v1.ReplicationControllerCondition {
|
||||
for i := range status.Conditions {
|
||||
c := status.Conditions[i]
|
||||
if c.Type == condType {
|
||||
@@ -151,7 +151,7 @@ func GetCondition(status api.ReplicationControllerStatus, condType api.Replicati
|
||||
}
|
||||
|
||||
// SetCondition adds/replaces the given condition in the replication controller status.
|
||||
func SetCondition(status *api.ReplicationControllerStatus, condition api.ReplicationControllerCondition) {
|
||||
func SetCondition(status *v1.ReplicationControllerStatus, condition v1.ReplicationControllerCondition) {
|
||||
currentCond := GetCondition(*status, condition.Type)
|
||||
if currentCond != nil && currentCond.Status == condition.Status && currentCond.Reason == condition.Reason {
|
||||
return
|
||||
@@ -161,13 +161,13 @@ func SetCondition(status *api.ReplicationControllerStatus, condition api.Replica
|
||||
}
|
||||
|
||||
// RemoveCondition removes the condition with the provided type from the replication controller status.
|
||||
func RemoveCondition(status *api.ReplicationControllerStatus, condType api.ReplicationControllerConditionType) {
|
||||
func RemoveCondition(status *v1.ReplicationControllerStatus, condType v1.ReplicationControllerConditionType) {
|
||||
status.Conditions = filterOutCondition(status.Conditions, condType)
|
||||
}
|
||||
|
||||
// filterOutCondition returns a new slice of replication controller conditions without conditions with the provided type.
|
||||
func filterOutCondition(conditions []api.ReplicationControllerCondition, condType api.ReplicationControllerConditionType) []api.ReplicationControllerCondition {
|
||||
var newConditions []api.ReplicationControllerCondition
|
||||
func filterOutCondition(conditions []v1.ReplicationControllerCondition, condType v1.ReplicationControllerConditionType) []v1.ReplicationControllerCondition {
|
||||
var newConditions []v1.ReplicationControllerCondition
|
||||
for _, c := range conditions {
|
||||
if c.Type == condType {
|
||||
continue
|
||||
|
||||
@@ -24,8 +24,9 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/quota/evaluator/core"
|
||||
@@ -55,9 +56,9 @@ type ReplenishmentControllerOptions struct {
|
||||
// PodReplenishmentUpdateFunc will replenish if the old pod was quota tracked but the new is not
|
||||
func PodReplenishmentUpdateFunc(options *ReplenishmentControllerOptions) func(oldObj, newObj interface{}) {
|
||||
return func(oldObj, newObj interface{}) {
|
||||
oldPod := oldObj.(*api.Pod)
|
||||
newPod := newObj.(*api.Pod)
|
||||
if core.QuotaPod(oldPod) && !core.QuotaPod(newPod) {
|
||||
oldPod := oldObj.(*v1.Pod)
|
||||
newPod := newObj.(*v1.Pod)
|
||||
if core.QuotaV1Pod(oldPod) && !core.QuotaV1Pod(newPod) {
|
||||
options.ReplenishmentFunc(options.GroupKind, newPod.Namespace, oldPod)
|
||||
}
|
||||
}
|
||||
@@ -146,14 +147,14 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
||||
// TODO move to informer when defined
|
||||
_, result = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().Services(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().Services(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().Services(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().Services(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Service{},
|
||||
&v1.Service{},
|
||||
options.ResyncPeriod(),
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
UpdateFunc: ServiceReplenishmentUpdateFunc(options),
|
||||
@@ -164,14 +165,14 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
||||
// TODO move to informer when defined
|
||||
_, result = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().ReplicationControllers(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().ReplicationControllers(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ReplicationController{},
|
||||
&v1.ReplicationController{},
|
||||
options.ResyncPeriod(),
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||
@@ -187,14 +188,14 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
||||
// TODO (derekwaynecarr) remove me when we can require a sharedInformerFactory in all code paths...
|
||||
_, result = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().PersistentVolumeClaims(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().PersistentVolumeClaims(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().PersistentVolumeClaims(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.PersistentVolumeClaim{},
|
||||
&v1.PersistentVolumeClaim{},
|
||||
options.ResyncPeriod(),
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||
@@ -204,14 +205,14 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
||||
// TODO move to informer when defined
|
||||
_, result = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().Secrets(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().Secrets(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().Secrets(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().Secrets(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Secret{},
|
||||
&v1.Secret{},
|
||||
options.ResyncPeriod(),
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||
@@ -221,14 +222,14 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
||||
// TODO move to informer when defined
|
||||
_, result = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().ConfigMaps(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return r.kubeClient.Core().ConfigMaps(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().ConfigMaps(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return r.kubeClient.Core().ConfigMaps(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ConfigMap{},
|
||||
&v1.ConfigMap{},
|
||||
options.ResyncPeriod(),
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: ObjectReplenishmentDeleteFunc(options),
|
||||
@@ -243,8 +244,8 @@ func (r *replenishmentControllerFactory) NewController(options *ReplenishmentCon
|
||||
// ServiceReplenishmentUpdateFunc will replenish if the service was quota tracked has changed service type
|
||||
func ServiceReplenishmentUpdateFunc(options *ReplenishmentControllerOptions) func(oldObj, newObj interface{}) {
|
||||
return func(oldObj, newObj interface{}) {
|
||||
oldService := oldObj.(*api.Service)
|
||||
newService := newObj.(*api.Service)
|
||||
oldService := oldObj.(*v1.Service)
|
||||
newService := newObj.(*v1.Service)
|
||||
if core.GetQuotaServiceType(oldService) != core.GetQuotaServiceType(newService) {
|
||||
options.ReplenishmentFunc(options.GroupKind, newService.Namespace, nil)
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/util/intstr"
|
||||
@@ -45,13 +46,13 @@ func TestPodReplenishmentUpdateFunc(t *testing.T) {
|
||||
ReplenishmentFunc: mockReplenish.Replenish,
|
||||
ResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
}
|
||||
oldPod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "pod"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
oldPod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "pod"},
|
||||
Status: v1.PodStatus{Phase: v1.PodRunning},
|
||||
}
|
||||
newPod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "pod"},
|
||||
Status: api.PodStatus{Phase: api.PodFailed},
|
||||
newPod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "pod"},
|
||||
Status: v1.PodStatus{Phase: v1.PodFailed},
|
||||
}
|
||||
updateFunc := PodReplenishmentUpdateFunc(&options)
|
||||
updateFunc(oldPod, newPod)
|
||||
@@ -70,9 +71,9 @@ func TestObjectReplenishmentDeleteFunc(t *testing.T) {
|
||||
ReplenishmentFunc: mockReplenish.Replenish,
|
||||
ResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
}
|
||||
oldPod := &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "pod"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
oldPod := &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "pod"},
|
||||
Status: v1.PodStatus{Phase: v1.PodRunning},
|
||||
}
|
||||
deleteFunc := ObjectReplenishmentDeleteFunc(&options)
|
||||
deleteFunc(oldPod)
|
||||
@@ -91,21 +92,21 @@ func TestServiceReplenishmentUpdateFunc(t *testing.T) {
|
||||
ReplenishmentFunc: mockReplenish.Replenish,
|
||||
ResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
}
|
||||
oldService := &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "mysvc"},
|
||||
Spec: api.ServiceSpec{
|
||||
Type: api.ServiceTypeNodePort,
|
||||
Ports: []api.ServicePort{{
|
||||
oldService := &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "mysvc"},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Ports: []v1.ServicePort{{
|
||||
Port: 80,
|
||||
TargetPort: intstr.FromInt(80),
|
||||
}},
|
||||
},
|
||||
}
|
||||
newService := &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "mysvc"},
|
||||
Spec: api.ServiceSpec{
|
||||
Type: api.ServiceTypeClusterIP,
|
||||
Ports: []api.ServicePort{{
|
||||
newService := &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "mysvc"},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeClusterIP,
|
||||
Ports: []v1.ServicePort{{
|
||||
Port: 80,
|
||||
TargetPort: intstr.FromInt(80),
|
||||
}}},
|
||||
@@ -125,21 +126,21 @@ func TestServiceReplenishmentUpdateFunc(t *testing.T) {
|
||||
ReplenishmentFunc: mockReplenish.Replenish,
|
||||
ResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
}
|
||||
oldService = &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "mysvc"},
|
||||
Spec: api.ServiceSpec{
|
||||
Type: api.ServiceTypeNodePort,
|
||||
Ports: []api.ServicePort{{
|
||||
oldService = &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "mysvc"},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Ports: []v1.ServicePort{{
|
||||
Port: 80,
|
||||
TargetPort: intstr.FromInt(80),
|
||||
}},
|
||||
},
|
||||
}
|
||||
newService = &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{Namespace: "test", Name: "mysvc"},
|
||||
Spec: api.ServiceSpec{
|
||||
Type: api.ServiceTypeNodePort,
|
||||
Ports: []api.ServicePort{{
|
||||
newService = &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{Namespace: "test", Name: "mysvc"},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeNodePort,
|
||||
Ports: []v1.ServicePort{{
|
||||
Port: 81,
|
||||
TargetPort: intstr.FromInt(81),
|
||||
}}},
|
||||
|
||||
@@ -23,8 +23,9 @@ import (
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/quota"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
@@ -93,14 +94,14 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour
|
||||
// build the controller that observes quota
|
||||
rq.rqIndexer, rq.rqController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return rq.kubeClient.Core().ResourceQuotas(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return rq.kubeClient.Core().ResourceQuotas(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return rq.kubeClient.Core().ResourceQuotas(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return rq.kubeClient.Core().ResourceQuotas(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ResourceQuota{},
|
||||
&v1.ResourceQuota{},
|
||||
rq.resyncPeriod(),
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: rq.addQuota,
|
||||
@@ -113,9 +114,9 @@ func NewResourceQuotaController(options *ResourceQuotaControllerOptions) *Resour
|
||||
// that cannot be backed by a cache and result in a full query of a namespace's content, we do not
|
||||
// want to pay the price on spurious status updates. As a result, we have a separate routine that is
|
||||
// responsible for enqueue of all resource quotas when doing a full resync (enqueueAll)
|
||||
oldResourceQuota := old.(*api.ResourceQuota)
|
||||
curResourceQuota := cur.(*api.ResourceQuota)
|
||||
if quota.Equals(curResourceQuota.Spec.Hard, oldResourceQuota.Spec.Hard) {
|
||||
oldResourceQuota := old.(*v1.ResourceQuota)
|
||||
curResourceQuota := cur.(*v1.ResourceQuota)
|
||||
if quota.V1Equals(oldResourceQuota.Spec.Hard, curResourceQuota.Spec.Hard) {
|
||||
return
|
||||
}
|
||||
rq.addQuota(curResourceQuota)
|
||||
@@ -152,7 +153,7 @@ func (rq *ResourceQuotaController) enqueueAll() {
|
||||
}
|
||||
}
|
||||
|
||||
// obj could be an *api.ResourceQuota, or a DeletionFinalStateUnknown marker item.
|
||||
// obj could be an *v1.ResourceQuota, or a DeletionFinalStateUnknown marker item.
|
||||
func (rq *ResourceQuotaController) enqueueResourceQuota(obj interface{}) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
@@ -169,10 +170,10 @@ func (rq *ResourceQuotaController) addQuota(obj interface{}) {
|
||||
return
|
||||
}
|
||||
|
||||
resourceQuota := obj.(*api.ResourceQuota)
|
||||
resourceQuota := obj.(*v1.ResourceQuota)
|
||||
|
||||
// if we declared an intent that is not yet captured in status (prioritize it)
|
||||
if !api.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard) {
|
||||
if !v1.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard) {
|
||||
rq.missingUsageQueue.Add(key)
|
||||
return
|
||||
}
|
||||
@@ -180,7 +181,7 @@ func (rq *ResourceQuotaController) addQuota(obj interface{}) {
|
||||
// if we declared a constraint that has no usage (which this controller can calculate, prioritize it)
|
||||
for constraint := range resourceQuota.Status.Hard {
|
||||
if _, usageFound := resourceQuota.Status.Used[constraint]; !usageFound {
|
||||
matchedResources := []api.ResourceName{constraint}
|
||||
matchedResources := []api.ResourceName{api.ResourceName(constraint)}
|
||||
|
||||
for _, evaluator := range rq.registry.Evaluators() {
|
||||
if intersection := quota.Intersection(evaluator.MatchesResources(), matchedResources); len(intersection) != 0 {
|
||||
@@ -260,14 +261,19 @@ func (rq *ResourceQuotaController) syncResourceQuotaFromKey(key string) (err err
|
||||
rq.queue.Add(key)
|
||||
return err
|
||||
}
|
||||
quota := *obj.(*api.ResourceQuota)
|
||||
quota := *obj.(*v1.ResourceQuota)
|
||||
return rq.syncResourceQuota(quota)
|
||||
}
|
||||
|
||||
// syncResourceQuota runs a complete sync of resource quota status across all known kinds
|
||||
func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota api.ResourceQuota) (err error) {
|
||||
func (rq *ResourceQuotaController) syncResourceQuota(v1ResourceQuota v1.ResourceQuota) (err error) {
|
||||
// quota is dirty if any part of spec hard limits differs from the status hard limits
|
||||
dirty := !api.Semantic.DeepEqual(resourceQuota.Spec.Hard, resourceQuota.Status.Hard)
|
||||
dirty := !v1.Semantic.DeepEqual(v1ResourceQuota.Spec.Hard, v1ResourceQuota.Status.Hard)
|
||||
|
||||
resourceQuota := api.ResourceQuota{}
|
||||
if err := v1.Convert_v1_ResourceQuota_To_api_ResourceQuota(&v1ResourceQuota, &resourceQuota, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// dirty tracks if the usage status differs from the previous sync,
|
||||
// if so, we send a new usage with latest status
|
||||
@@ -311,7 +317,11 @@ func (rq *ResourceQuotaController) syncResourceQuota(resourceQuota api.ResourceQ
|
||||
|
||||
// there was a change observed by this controller that requires we update quota
|
||||
if dirty {
|
||||
_, err = rq.kubeClient.Core().ResourceQuotas(usage.Namespace).UpdateStatus(&usage)
|
||||
v1Usage := &v1.ResourceQuota{}
|
||||
if err := v1.Convert_api_ResourceQuota_To_v1_ResourceQuota(&usage, v1Usage, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = rq.kubeClient.Core().ResourceQuotas(usage.Namespace).UpdateStatus(v1Usage)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@@ -327,7 +337,7 @@ func (rq *ResourceQuotaController) replenishQuota(groupKind unversioned.GroupKin
|
||||
}
|
||||
|
||||
// check if this namespace even has a quota...
|
||||
indexKey := &api.ResourceQuota{}
|
||||
indexKey := &v1.ResourceQuota{}
|
||||
indexKey.Namespace = namespace
|
||||
resourceQuotas, err := rq.rqIndexer.Index("namespace", indexKey)
|
||||
if err != nil {
|
||||
@@ -340,8 +350,13 @@ func (rq *ResourceQuotaController) replenishQuota(groupKind unversioned.GroupKin
|
||||
// only queue those quotas that are tracking a resource associated with this kind.
|
||||
matchedResources := evaluator.MatchesResources()
|
||||
for i := range resourceQuotas {
|
||||
resourceQuota := resourceQuotas[i].(*api.ResourceQuota)
|
||||
resourceQuotaResources := quota.ResourceNames(resourceQuota.Status.Hard)
|
||||
resourceQuota := resourceQuotas[i].(*v1.ResourceQuota)
|
||||
internalResourceQuota := &api.ResourceQuota{}
|
||||
if err := v1.Convert_v1_ResourceQuota_To_api_ResourceQuota(resourceQuota, internalResourceQuota, nil); err != nil {
|
||||
glog.Error(err)
|
||||
continue
|
||||
}
|
||||
resourceQuotaResources := quota.ResourceNames(internalResourceQuota.Status.Hard)
|
||||
if len(quota.Intersection(matchedResources, resourceQuotaResources)) > 0 {
|
||||
// TODO: make this support targeted replenishment to a specific kind, right now it does a full recalc on that quota.
|
||||
rq.enqueueResourceQuota(resourceQuota)
|
||||
|
||||
@@ -23,7 +23,8 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/quota/generic"
|
||||
@@ -31,74 +32,74 @@ import (
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
|
||||
func getResourceList(cpu, memory string) api.ResourceList {
|
||||
res := api.ResourceList{}
|
||||
func getResourceList(cpu, memory string) v1.ResourceList {
|
||||
res := v1.ResourceList{}
|
||||
if cpu != "" {
|
||||
res[api.ResourceCPU] = resource.MustParse(cpu)
|
||||
res[v1.ResourceCPU] = resource.MustParse(cpu)
|
||||
}
|
||||
if memory != "" {
|
||||
res[api.ResourceMemory] = resource.MustParse(memory)
|
||||
res[v1.ResourceMemory] = resource.MustParse(memory)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func getResourceRequirements(requests, limits api.ResourceList) api.ResourceRequirements {
|
||||
res := api.ResourceRequirements{}
|
||||
func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements {
|
||||
res := v1.ResourceRequirements{}
|
||||
res.Requests = requests
|
||||
res.Limits = limits
|
||||
return res
|
||||
}
|
||||
|
||||
func TestSyncResourceQuota(t *testing.T) {
|
||||
podList := api.PodList{
|
||||
Items: []api.Pod{
|
||||
podList := v1.PodList{
|
||||
Items: []v1.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-running", Namespace: "testing"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
|
||||
ObjectMeta: v1.ObjectMeta{Name: "pod-running", Namespace: "testing"},
|
||||
Status: v1.PodStatus{Phase: v1.PodRunning},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{{Name: "vol"}},
|
||||
Containers: []v1.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-running-2", Namespace: "testing"},
|
||||
Status: api.PodStatus{Phase: api.PodRunning},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
|
||||
ObjectMeta: v1.ObjectMeta{Name: "pod-running-2", Namespace: "testing"},
|
||||
Status: v1.PodStatus{Phase: v1.PodRunning},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{{Name: "vol"}},
|
||||
Containers: []v1.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "pod-failed", Namespace: "testing"},
|
||||
Status: api.PodStatus{Phase: api.PodFailed},
|
||||
Spec: api.PodSpec{
|
||||
Volumes: []api.Volume{{Name: "vol"}},
|
||||
Containers: []api.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
|
||||
ObjectMeta: v1.ObjectMeta{Name: "pod-failed", Namespace: "testing"},
|
||||
Status: v1.PodStatus{Phase: v1.PodFailed},
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{{Name: "vol"}},
|
||||
Containers: []v1.Container{{Name: "ctr", Image: "image", Resources: getResourceRequirements(getResourceList("100m", "1Gi"), getResourceList("", ""))}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
resourceQuota := api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{Name: "quota", Namespace: "testing"},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
api.ResourceMemory: resource.MustParse("100Gi"),
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
resourceQuota := v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{Name: "quota", Namespace: "testing"},
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
},
|
||||
}
|
||||
expectedUsage := api.ResourceQuota{
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
api.ResourceMemory: resource.MustParse("100Gi"),
|
||||
api.ResourcePods: resource.MustParse("5"),
|
||||
expectedUsage := v1.ResourceQuota{
|
||||
Status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("100Gi"),
|
||||
v1.ResourcePods: resource.MustParse("5"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("200m"),
|
||||
api.ResourceMemory: resource.MustParse("2Gi"),
|
||||
api.ResourcePods: resource.MustParse("2"),
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("200m"),
|
||||
v1.ResourceMemory: resource.MustParse("2Gi"),
|
||||
v1.ResourcePods: resource.MustParse("2"),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -135,7 +136,7 @@ func TestSyncResourceQuota(t *testing.T) {
|
||||
}
|
||||
|
||||
lastActionIndex := len(kubeClient.Actions()) - 1
|
||||
usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*api.ResourceQuota)
|
||||
usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*v1.ResourceQuota)
|
||||
|
||||
// ensure hard and used limits are what we expected
|
||||
for k, v := range expectedUsage.Status.Hard {
|
||||
@@ -157,33 +158,33 @@ func TestSyncResourceQuota(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestSyncResourceQuotaSpecChange(t *testing.T) {
|
||||
resourceQuota := api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
resourceQuota := v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
Status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("0"),
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedUsage := api.ResourceQuota{
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
expectedUsage := v1.ResourceQuota{
|
||||
Status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("0"),
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -221,7 +222,7 @@ func TestSyncResourceQuotaSpecChange(t *testing.T) {
|
||||
}
|
||||
|
||||
lastActionIndex := len(kubeClient.Actions()) - 1
|
||||
usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*api.ResourceQuota)
|
||||
usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*v1.ResourceQuota)
|
||||
|
||||
// ensure hard and used limits are what we expected
|
||||
for k, v := range expectedUsage.Status.Hard {
|
||||
@@ -243,35 +244,35 @@ func TestSyncResourceQuotaSpecChange(t *testing.T) {
|
||||
|
||||
}
|
||||
func TestSyncResourceQuotaSpecHardChange(t *testing.T) {
|
||||
resourceQuota := api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
resourceQuota := v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("3"),
|
||||
api.ResourceMemory: resource.MustParse("1Gi"),
|
||||
Status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("3"),
|
||||
v1.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("0"),
|
||||
api.ResourceMemory: resource.MustParse("0"),
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0"),
|
||||
v1.ResourceMemory: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expectedUsage := api.ResourceQuota{
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
expectedUsage := v1.ResourceQuota{
|
||||
Status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("0"),
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -309,7 +310,7 @@ func TestSyncResourceQuotaSpecHardChange(t *testing.T) {
|
||||
}
|
||||
|
||||
lastActionIndex := len(kubeClient.Actions()) - 1
|
||||
usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*api.ResourceQuota)
|
||||
usage := kubeClient.Actions()[lastActionIndex].(core.UpdateAction).GetObject().(*v1.ResourceQuota)
|
||||
|
||||
// ensure hard and used limits are what we expected
|
||||
for k, v := range expectedUsage.Status.Hard {
|
||||
@@ -331,40 +332,40 @@ func TestSyncResourceQuotaSpecHardChange(t *testing.T) {
|
||||
|
||||
// ensure usage hard and used are are synced with spec hard, not have dirty resource
|
||||
for k, v := range usage.Status.Hard {
|
||||
if k == api.ResourceMemory {
|
||||
if k == v1.ResourceMemory {
|
||||
t.Errorf("Unexpected Usage Hard: Key: %v, Value: %v", k, v.String())
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range usage.Status.Used {
|
||||
if k == api.ResourceMemory {
|
||||
if k == v1.ResourceMemory {
|
||||
t.Errorf("Unexpected Usage Used: Key: %v, Value: %v", k, v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSyncResourceQuotaNoChange(t *testing.T) {
|
||||
resourceQuota := api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
resourceQuota := v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
Status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("0"),
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
kubeClient := fake.NewSimpleClientset(&api.PodList{}, &resourceQuota)
|
||||
kubeClient := fake.NewSimpleClientset(&v1.PodList{}, &resourceQuota)
|
||||
resourceQuotaControllerOptions := &ResourceQuotaControllerOptions{
|
||||
KubeClient: kubeClient,
|
||||
ResyncPeriod: controller.NoResyncPeriodFunc,
|
||||
@@ -416,20 +417,20 @@ func TestAddQuota(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
|
||||
quota *api.ResourceQuota
|
||||
quota *v1.ResourceQuota
|
||||
expectedPriority bool
|
||||
}{
|
||||
{
|
||||
name: "no status",
|
||||
expectedPriority: true,
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
quota: &v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -437,19 +438,19 @@ func TestAddQuota(t *testing.T) {
|
||||
{
|
||||
name: "status, no usage",
|
||||
expectedPriority: true,
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
quota: &v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
Status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -457,22 +458,22 @@ func TestAddQuota(t *testing.T) {
|
||||
{
|
||||
name: "status, mismatch",
|
||||
expectedPriority: true,
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
quota: &v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("6"),
|
||||
Status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("6"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("0"),
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -480,19 +481,19 @@ func TestAddQuota(t *testing.T) {
|
||||
{
|
||||
name: "status, missing usage, but don't care",
|
||||
expectedPriority: false,
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
quota: &v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceServices: resource.MustParse("4"),
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceServices: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceServices: resource.MustParse("4"),
|
||||
Status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceServices: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -500,22 +501,22 @@ func TestAddQuota(t *testing.T) {
|
||||
{
|
||||
name: "ready",
|
||||
expectedPriority: false,
|
||||
quota: &api.ResourceQuota{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
quota: &v1.ResourceQuota{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
Name: "rq",
|
||||
},
|
||||
Spec: api.ResourceQuotaSpec{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
Spec: v1.ResourceQuotaSpec{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
},
|
||||
Status: api.ResourceQuotaStatus{
|
||||
Hard: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("4"),
|
||||
Status: v1.ResourceQuotaStatus{
|
||||
Hard: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("4"),
|
||||
},
|
||||
Used: api.ResourceList{
|
||||
api.ResourceCPU: resource.MustParse("0"),
|
||||
Used: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -23,11 +23,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
@@ -71,14 +71,14 @@ func New(routes cloudprovider.Routes, kubeClient clientset.Interface, clusterNam
|
||||
|
||||
rc.nodeStore.Store, rc.nodeController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return rc.kubeClient.Core().Nodes().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return rc.kubeClient.Core().Nodes().Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Node{},
|
||||
&v1.Node{},
|
||||
controller.NoResyncPeriodFunc(),
|
||||
cache.ResourceEventHandlerFuncs{},
|
||||
)
|
||||
@@ -116,7 +116,7 @@ func (rc *RouteController) reconcileNodeRoutes() error {
|
||||
return rc.reconcile(nodeList.Items, routeList)
|
||||
}
|
||||
|
||||
func (rc *RouteController) reconcile(nodes []api.Node, routes []*cloudprovider.Route) error {
|
||||
func (rc *RouteController) reconcile(nodes []v1.Node, routes []*cloudprovider.Route) error {
|
||||
// nodeCIDRs maps nodeName->nodeCIDR
|
||||
nodeCIDRs := make(map[types.NodeName]string)
|
||||
// routeMap maps routeTargetNode->route
|
||||
@@ -166,8 +166,8 @@ func (rc *RouteController) reconcile(nodes []api.Node, routes []*cloudprovider.R
|
||||
}(nodeName, nameHint, route)
|
||||
} else {
|
||||
// Update condition only if it doesn't reflect the current state.
|
||||
_, condition := api.GetNodeCondition(&node.Status, api.NodeNetworkUnavailable)
|
||||
if condition == nil || condition.Status != api.ConditionFalse {
|
||||
_, condition := v1.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable)
|
||||
if condition == nil || condition.Status != v1.ConditionFalse {
|
||||
rc.updateNetworkingCondition(types.NodeName(node.Name), true)
|
||||
}
|
||||
}
|
||||
@@ -203,17 +203,17 @@ func (rc *RouteController) updateNetworkingCondition(nodeName types.NodeName, ro
|
||||
// patch in the retry loop.
|
||||
currentTime := unversioned.Now()
|
||||
if routeCreated {
|
||||
err = nodeutil.SetNodeCondition(rc.kubeClient, nodeName, api.NodeCondition{
|
||||
Type: api.NodeNetworkUnavailable,
|
||||
Status: api.ConditionFalse,
|
||||
err = nodeutil.SetNodeCondition(rc.kubeClient, nodeName, v1.NodeCondition{
|
||||
Type: v1.NodeNetworkUnavailable,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "RouteCreated",
|
||||
Message: "RouteController created a route",
|
||||
LastTransitionTime: currentTime,
|
||||
})
|
||||
} else {
|
||||
err = nodeutil.SetNodeCondition(rc.kubeClient, nodeName, api.NodeCondition{
|
||||
Type: api.NodeNetworkUnavailable,
|
||||
Status: api.ConditionTrue,
|
||||
err = nodeutil.SetNodeCondition(rc.kubeClient, nodeName, v1.NodeCondition{
|
||||
Type: v1.NodeNetworkUnavailable,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "NoRouteCreated",
|
||||
Message: "RouteController failed to create a route",
|
||||
LastTransitionTime: currentTime,
|
||||
|
||||
@@ -21,8 +21,8 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
@@ -70,12 +70,12 @@ func TestIsResponsibleForRoute(t *testing.T) {
|
||||
|
||||
func TestReconcile(t *testing.T) {
|
||||
cluster := "my-k8s"
|
||||
node1 := api.Node{ObjectMeta: api.ObjectMeta{Name: "node-1", UID: "01"}, Spec: api.NodeSpec{PodCIDR: "10.120.0.0/24"}}
|
||||
node2 := api.Node{ObjectMeta: api.ObjectMeta{Name: "node-2", UID: "02"}, Spec: api.NodeSpec{PodCIDR: "10.120.1.0/24"}}
|
||||
nodeNoCidr := api.Node{ObjectMeta: api.ObjectMeta{Name: "node-2", UID: "02"}, Spec: api.NodeSpec{PodCIDR: ""}}
|
||||
node1 := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "node-1", UID: "01"}, Spec: v1.NodeSpec{PodCIDR: "10.120.0.0/24"}}
|
||||
node2 := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "node-2", UID: "02"}, Spec: v1.NodeSpec{PodCIDR: "10.120.1.0/24"}}
|
||||
nodeNoCidr := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "node-2", UID: "02"}, Spec: v1.NodeSpec{PodCIDR: ""}}
|
||||
|
||||
testCases := []struct {
|
||||
nodes []api.Node
|
||||
nodes []v1.Node
|
||||
initialRoutes []*cloudprovider.Route
|
||||
expectedRoutes []*cloudprovider.Route
|
||||
expectedNetworkUnavailable []bool
|
||||
@@ -83,7 +83,7 @@ func TestReconcile(t *testing.T) {
|
||||
}{
|
||||
// 2 nodes, routes already there
|
||||
{
|
||||
nodes: []api.Node{
|
||||
nodes: []v1.Node{
|
||||
node1,
|
||||
node2,
|
||||
},
|
||||
@@ -96,11 +96,11 @@ func TestReconcile(t *testing.T) {
|
||||
{cluster + "-02", "node-2", "10.120.1.0/24"},
|
||||
},
|
||||
expectedNetworkUnavailable: []bool{true, true},
|
||||
clientset: fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{node1, node2}}),
|
||||
clientset: fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{node1, node2}}),
|
||||
},
|
||||
// 2 nodes, one route already there
|
||||
{
|
||||
nodes: []api.Node{
|
||||
nodes: []v1.Node{
|
||||
node1,
|
||||
node2,
|
||||
},
|
||||
@@ -112,11 +112,11 @@ func TestReconcile(t *testing.T) {
|
||||
{cluster + "-02", "node-2", "10.120.1.0/24"},
|
||||
},
|
||||
expectedNetworkUnavailable: []bool{true, true},
|
||||
clientset: fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{node1, node2}}),
|
||||
clientset: fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{node1, node2}}),
|
||||
},
|
||||
// 2 nodes, no routes yet
|
||||
{
|
||||
nodes: []api.Node{
|
||||
nodes: []v1.Node{
|
||||
node1,
|
||||
node2,
|
||||
},
|
||||
@@ -126,11 +126,11 @@ func TestReconcile(t *testing.T) {
|
||||
{cluster + "-02", "node-2", "10.120.1.0/24"},
|
||||
},
|
||||
expectedNetworkUnavailable: []bool{true, true},
|
||||
clientset: fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{node1, node2}}),
|
||||
clientset: fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{node1, node2}}),
|
||||
},
|
||||
// 2 nodes, a few too many routes
|
||||
{
|
||||
nodes: []api.Node{
|
||||
nodes: []v1.Node{
|
||||
node1,
|
||||
node2,
|
||||
},
|
||||
@@ -145,11 +145,11 @@ func TestReconcile(t *testing.T) {
|
||||
{cluster + "-02", "node-2", "10.120.1.0/24"},
|
||||
},
|
||||
expectedNetworkUnavailable: []bool{true, true},
|
||||
clientset: fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{node1, node2}}),
|
||||
clientset: fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{node1, node2}}),
|
||||
},
|
||||
// 2 nodes, 2 routes, but only 1 is right
|
||||
{
|
||||
nodes: []api.Node{
|
||||
nodes: []v1.Node{
|
||||
node1,
|
||||
node2,
|
||||
},
|
||||
@@ -162,11 +162,11 @@ func TestReconcile(t *testing.T) {
|
||||
{cluster + "-02", "node-2", "10.120.1.0/24"},
|
||||
},
|
||||
expectedNetworkUnavailable: []bool{true, true},
|
||||
clientset: fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{node1, node2}}),
|
||||
clientset: fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{node1, node2}}),
|
||||
},
|
||||
// 2 nodes, one node without CIDR assigned.
|
||||
{
|
||||
nodes: []api.Node{
|
||||
nodes: []v1.Node{
|
||||
node1,
|
||||
nodeNoCidr,
|
||||
},
|
||||
@@ -175,7 +175,7 @@ func TestReconcile(t *testing.T) {
|
||||
{cluster + "-01", "node-1", "10.120.0.0/24"},
|
||||
},
|
||||
expectedNetworkUnavailable: []bool{true, false},
|
||||
clientset: fake.NewSimpleClientset(&api.NodeList{Items: []api.Node{node1, nodeNoCidr}}),
|
||||
clientset: fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{node1, nodeNoCidr}}),
|
||||
},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
@@ -197,13 +197,13 @@ func TestReconcile(t *testing.T) {
|
||||
}
|
||||
for _, action := range testCase.clientset.Actions() {
|
||||
if action.GetVerb() == "update" && action.GetResource().Resource == "nodes" {
|
||||
node := action.(core.UpdateAction).GetObject().(*api.Node)
|
||||
_, condition := api.GetNodeCondition(&node.Status, api.NodeNetworkUnavailable)
|
||||
node := action.(core.UpdateAction).GetObject().(*v1.Node)
|
||||
_, condition := v1.GetNodeCondition(&node.Status, v1.NodeNetworkUnavailable)
|
||||
if condition == nil {
|
||||
t.Errorf("%d. Missing NodeNetworkUnavailable condition for Node %v", i, node.Name)
|
||||
} else {
|
||||
check := func(index int) bool {
|
||||
return (condition.Status == api.ConditionFalse) == testCase.expectedNetworkUnavailable[index]
|
||||
return (condition.Status == v1.ConditionFalse) == testCase.expectedNetworkUnavailable[index]
|
||||
}
|
||||
index := -1
|
||||
for j := range testCase.nodes {
|
||||
@@ -217,7 +217,7 @@ func TestReconcile(t *testing.T) {
|
||||
}
|
||||
if !check(index) {
|
||||
t.Errorf("%d. Invalid NodeNetworkUnavailable condition for Node %v, expected %v, got %v",
|
||||
i, node.Name, testCase.expectedNetworkUnavailable[index], (condition.Status == api.ConditionFalse))
|
||||
i, node.Name, testCase.expectedNetworkUnavailable[index], (condition.Status == v1.ConditionFalse))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,11 +25,11 @@ import (
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
unversioned_core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
@@ -65,7 +65,7 @@ const (
|
||||
|
||||
type cachedService struct {
|
||||
// The cached state of the service
|
||||
state *api.Service
|
||||
state *v1.Service
|
||||
// Controls error back-off
|
||||
lastRetryDelay time.Duration
|
||||
}
|
||||
@@ -78,7 +78,7 @@ type serviceCache struct {
|
||||
type ServiceController struct {
|
||||
cloud cloudprovider.Interface
|
||||
knownHosts []string
|
||||
servicesToUpdate []*api.Service
|
||||
servicesToUpdate []*v1.Service
|
||||
kubeClient clientset.Interface
|
||||
clusterName string
|
||||
balancer cloudprovider.LoadBalancer
|
||||
@@ -100,7 +100,7 @@ type ServiceController struct {
|
||||
func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterName string) (*ServiceController, error) {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
broadcaster.StartRecordingToSink(&unversioned_core.EventSinkImpl{Interface: kubeClient.Core().Events("")})
|
||||
recorder := broadcaster.NewRecorder(api.EventSource{Component: "service-controller"})
|
||||
recorder := broadcaster.NewRecorder(v1.EventSource{Component: "service-controller"})
|
||||
|
||||
if kubeClient != nil && kubeClient.Core().RESTClient().GetRateLimiter() != nil {
|
||||
metrics.RegisterMetricAndTrackRateLimiterUsage("service_controller", kubeClient.Core().RESTClient().GetRateLimiter())
|
||||
@@ -121,20 +121,20 @@ func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterN
|
||||
}
|
||||
s.serviceStore.Indexer, s.serviceController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (pkg_runtime.Object, error) {
|
||||
return s.kubeClient.Core().Services(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (pkg_runtime.Object, error) {
|
||||
return s.kubeClient.Core().Services(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return s.kubeClient.Core().Services(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return s.kubeClient.Core().Services(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Service{},
|
||||
&v1.Service{},
|
||||
serviceSyncPeriod,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: s.enqueueService,
|
||||
UpdateFunc: func(old, cur interface{}) {
|
||||
oldSvc, ok1 := old.(*api.Service)
|
||||
curSvc, ok2 := cur.(*api.Service)
|
||||
oldSvc, ok1 := old.(*v1.Service)
|
||||
curSvc, ok2 := cur.(*v1.Service)
|
||||
if ok1 && ok2 && s.needsUpdate(oldSvc, curSvc) {
|
||||
s.enqueueService(cur)
|
||||
}
|
||||
@@ -149,7 +149,7 @@ func New(cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterN
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// obj could be an *api.Service, or a DeletionFinalStateUnknown marker item.
|
||||
// obj could be an *v1.Service, or a DeletionFinalStateUnknown marker item.
|
||||
func (s *ServiceController) enqueueService(obj interface{}) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
@@ -175,8 +175,8 @@ func (s *ServiceController) Run(workers int) {
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(s.worker, time.Second, wait.NeverStop)
|
||||
}
|
||||
nodeLW := cache.NewListWatchFromClient(s.kubeClient.Core().RESTClient(), "nodes", api.NamespaceAll, fields.Everything())
|
||||
cache.NewReflector(nodeLW, &api.Node{}, s.nodeLister.Store, 0).Run()
|
||||
nodeLW := cache.NewListWatchFromClient(s.kubeClient.Core().RESTClient(), "nodes", v1.NamespaceAll, fields.Everything())
|
||||
cache.NewReflector(nodeLW, &v1.Node{}, s.nodeLister.Store, 0).Run()
|
||||
go wait.Until(s.nodeSyncLoop, nodeSyncPeriod, wait.NeverStop)
|
||||
}
|
||||
|
||||
@@ -224,7 +224,7 @@ func (s *ServiceController) init() error {
|
||||
// Returns an error if processing the service update failed, along with a time.Duration
|
||||
// indicating whether processing should be retried; zero means no-retry; otherwise
|
||||
// we should retry in that Duration.
|
||||
func (s *ServiceController) processServiceUpdate(cachedService *cachedService, service *api.Service, key string) (error, time.Duration) {
|
||||
func (s *ServiceController) processServiceUpdate(cachedService *cachedService, service *v1.Service, key string) (error, time.Duration) {
|
||||
|
||||
// cache the service, we need the info for service deletion
|
||||
cachedService.state = service
|
||||
@@ -237,7 +237,7 @@ func (s *ServiceController) processServiceUpdate(cachedService *cachedService, s
|
||||
message += " (will not retry): "
|
||||
}
|
||||
message += err.Error()
|
||||
s.eventRecorder.Event(service, api.EventTypeWarning, "CreatingLoadBalancerFailed", message)
|
||||
s.eventRecorder.Event(service, v1.EventTypeWarning, "CreatingLoadBalancerFailed", message)
|
||||
|
||||
return err, cachedService.nextRetryDelay()
|
||||
}
|
||||
@@ -253,13 +253,13 @@ func (s *ServiceController) processServiceUpdate(cachedService *cachedService, s
|
||||
|
||||
// Returns whatever error occurred along with a boolean indicator of whether it
|
||||
// should be retried.
|
||||
func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *api.Service) (error, bool) {
|
||||
func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *v1.Service) (error, bool) {
|
||||
|
||||
// Note: It is safe to just call EnsureLoadBalancer. But, on some clouds that requires a delete & create,
|
||||
// which may involve service interruption. Also, we would like user-friendly events.
|
||||
|
||||
// Save the state so we can avoid a write if it doesn't change
|
||||
previousState := api.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer)
|
||||
previousState := v1.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer)
|
||||
|
||||
if !wantsLoadBalancer(service) {
|
||||
needDelete := true
|
||||
@@ -273,31 +273,31 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *api.
|
||||
|
||||
if needDelete {
|
||||
glog.Infof("Deleting existing load balancer for service %s that no longer needs a load balancer.", key)
|
||||
s.eventRecorder.Event(service, api.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
|
||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
|
||||
if err := s.balancer.EnsureLoadBalancerDeleted(s.clusterName, service); err != nil {
|
||||
return err, retryable
|
||||
}
|
||||
s.eventRecorder.Event(service, api.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer")
|
||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer")
|
||||
}
|
||||
|
||||
service.Status.LoadBalancer = api.LoadBalancerStatus{}
|
||||
service.Status.LoadBalancer = v1.LoadBalancerStatus{}
|
||||
} else {
|
||||
glog.V(2).Infof("Ensuring LB for service %s", key)
|
||||
|
||||
// TODO: We could do a dry-run here if wanted to avoid the spurious cloud-calls & events when we restart
|
||||
|
||||
// The load balancer doesn't exist yet, so create it.
|
||||
s.eventRecorder.Event(service, api.EventTypeNormal, "CreatingLoadBalancer", "Creating load balancer")
|
||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "CreatingLoadBalancer", "Creating load balancer")
|
||||
err := s.createLoadBalancer(service)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to create load balancer for service %s: %v", key, err), retryable
|
||||
}
|
||||
s.eventRecorder.Event(service, api.EventTypeNormal, "CreatedLoadBalancer", "Created load balancer")
|
||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "CreatedLoadBalancer", "Created load balancer")
|
||||
}
|
||||
|
||||
// Write the state if changed
|
||||
// TODO: Be careful here ... what if there were other changes to the service?
|
||||
if !api.LoadBalancerStatusEqual(previousState, &service.Status.LoadBalancer) {
|
||||
if !v1.LoadBalancerStatusEqual(previousState, &service.Status.LoadBalancer) {
|
||||
if err := s.persistUpdate(service); err != nil {
|
||||
return fmt.Errorf("Failed to persist updated status to apiserver, even after retries. Giving up: %v", err), notRetryable
|
||||
}
|
||||
@@ -308,7 +308,7 @@ func (s *ServiceController) createLoadBalancerIfNeeded(key string, service *api.
|
||||
return nil, notRetryable
|
||||
}
|
||||
|
||||
func (s *ServiceController) persistUpdate(service *api.Service) error {
|
||||
func (s *ServiceController) persistUpdate(service *v1.Service) error {
|
||||
var err error
|
||||
for i := 0; i < clientRetryCount; i++ {
|
||||
_, err = s.kubeClient.Core().Services(service.Namespace).UpdateStatus(service)
|
||||
@@ -338,7 +338,7 @@ func (s *ServiceController) persistUpdate(service *api.Service) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *ServiceController) createLoadBalancer(service *api.Service) error {
|
||||
func (s *ServiceController) createLoadBalancer(service *v1.Service) error {
|
||||
nodes, err := s.nodeLister.List()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -381,10 +381,10 @@ func (s *serviceCache) GetByKey(key string) (interface{}, bool, error) {
|
||||
|
||||
// ListKeys implements the interface required by DeltaFIFO to list the keys we
|
||||
// already know about.
|
||||
func (s *serviceCache) allServices() []*api.Service {
|
||||
func (s *serviceCache) allServices() []*v1.Service {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
services := make([]*api.Service, 0, len(s.serviceMap))
|
||||
services := make([]*v1.Service, 0, len(s.serviceMap))
|
||||
for _, v := range s.serviceMap {
|
||||
services = append(services, v.state)
|
||||
}
|
||||
@@ -421,12 +421,12 @@ func (s *serviceCache) delete(serviceName string) {
|
||||
delete(s.serviceMap, serviceName)
|
||||
}
|
||||
|
||||
func (s *ServiceController) needsUpdate(oldService *api.Service, newService *api.Service) bool {
|
||||
func (s *ServiceController) needsUpdate(oldService *v1.Service, newService *v1.Service) bool {
|
||||
if !wantsLoadBalancer(oldService) && !wantsLoadBalancer(newService) {
|
||||
return false
|
||||
}
|
||||
if wantsLoadBalancer(oldService) != wantsLoadBalancer(newService) {
|
||||
s.eventRecorder.Eventf(newService, api.EventTypeNormal, "Type", "%v -> %v",
|
||||
s.eventRecorder.Eventf(newService, v1.EventTypeNormal, "Type", "%v -> %v",
|
||||
oldService.Spec.Type, newService.Spec.Type)
|
||||
return true
|
||||
}
|
||||
@@ -434,18 +434,18 @@ func (s *ServiceController) needsUpdate(oldService *api.Service, newService *api
|
||||
return true
|
||||
}
|
||||
if !loadBalancerIPsAreEqual(oldService, newService) {
|
||||
s.eventRecorder.Eventf(newService, api.EventTypeNormal, "LoadbalancerIP", "%v -> %v",
|
||||
s.eventRecorder.Eventf(newService, v1.EventTypeNormal, "LoadbalancerIP", "%v -> %v",
|
||||
oldService.Spec.LoadBalancerIP, newService.Spec.LoadBalancerIP)
|
||||
return true
|
||||
}
|
||||
if len(oldService.Spec.ExternalIPs) != len(newService.Spec.ExternalIPs) {
|
||||
s.eventRecorder.Eventf(newService, api.EventTypeNormal, "ExternalIP", "Count: %v -> %v",
|
||||
s.eventRecorder.Eventf(newService, v1.EventTypeNormal, "ExternalIP", "Count: %v -> %v",
|
||||
len(oldService.Spec.ExternalIPs), len(newService.Spec.ExternalIPs))
|
||||
return true
|
||||
}
|
||||
for i := range oldService.Spec.ExternalIPs {
|
||||
if oldService.Spec.ExternalIPs[i] != newService.Spec.ExternalIPs[i] {
|
||||
s.eventRecorder.Eventf(newService, api.EventTypeNormal, "ExternalIP", "Added: %v",
|
||||
s.eventRecorder.Eventf(newService, v1.EventTypeNormal, "ExternalIP", "Added: %v",
|
||||
newService.Spec.ExternalIPs[i])
|
||||
return true
|
||||
}
|
||||
@@ -454,7 +454,7 @@ func (s *ServiceController) needsUpdate(oldService *api.Service, newService *api
|
||||
return true
|
||||
}
|
||||
if oldService.UID != newService.UID {
|
||||
s.eventRecorder.Eventf(newService, api.EventTypeNormal, "UID", "%v -> %v",
|
||||
s.eventRecorder.Eventf(newService, v1.EventTypeNormal, "UID", "%v -> %v",
|
||||
oldService.UID, newService.UID)
|
||||
return true
|
||||
}
|
||||
@@ -462,14 +462,14 @@ func (s *ServiceController) needsUpdate(oldService *api.Service, newService *api
|
||||
return false
|
||||
}
|
||||
|
||||
func (s *ServiceController) loadBalancerName(service *api.Service) string {
|
||||
func (s *ServiceController) loadBalancerName(service *v1.Service) string {
|
||||
return cloudprovider.GetLoadBalancerName(service)
|
||||
}
|
||||
|
||||
func getPortsForLB(service *api.Service) ([]*api.ServicePort, error) {
|
||||
var protocol api.Protocol
|
||||
func getPortsForLB(service *v1.Service) ([]*v1.ServicePort, error) {
|
||||
var protocol v1.Protocol
|
||||
|
||||
ports := []*api.ServicePort{}
|
||||
ports := []*v1.ServicePort{}
|
||||
for i := range service.Spec.Ports {
|
||||
sp := &service.Spec.Ports[i]
|
||||
// The check on protocol was removed here. The cloud provider itself is now responsible for all protocol validation
|
||||
@@ -484,7 +484,7 @@ func getPortsForLB(service *api.Service) ([]*api.ServicePort, error) {
|
||||
return ports, nil
|
||||
}
|
||||
|
||||
func portsEqualForLB(x, y *api.Service) bool {
|
||||
func portsEqualForLB(x, y *v1.Service) bool {
|
||||
xPorts, err := getPortsForLB(x)
|
||||
if err != nil {
|
||||
return false
|
||||
@@ -496,7 +496,7 @@ func portsEqualForLB(x, y *api.Service) bool {
|
||||
return portSlicesEqualForLB(xPorts, yPorts)
|
||||
}
|
||||
|
||||
func portSlicesEqualForLB(x, y []*api.ServicePort) bool {
|
||||
func portSlicesEqualForLB(x, y []*v1.ServicePort) bool {
|
||||
if len(x) != len(y) {
|
||||
return false
|
||||
}
|
||||
@@ -509,7 +509,7 @@ func portSlicesEqualForLB(x, y []*api.ServicePort) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func portEqualForLB(x, y *api.ServicePort) bool {
|
||||
func portEqualForLB(x, y *v1.ServicePort) bool {
|
||||
// TODO: Should we check name? (In theory, an LB could expose it)
|
||||
if x.Name != y.Name {
|
||||
return false
|
||||
@@ -569,11 +569,11 @@ func stringSlicesEqual(x, y []string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func includeNodeFromNodeList(node *api.Node) bool {
|
||||
func includeNodeFromNodeList(node *v1.Node) bool {
|
||||
return !node.Spec.Unschedulable
|
||||
}
|
||||
|
||||
func hostsFromNodeList(list *api.NodeList) []string {
|
||||
func hostsFromNodeList(list *v1.NodeList) []string {
|
||||
result := []string{}
|
||||
for ix := range list.Items {
|
||||
if includeNodeFromNodeList(&list.Items[ix]) {
|
||||
@@ -583,7 +583,7 @@ func hostsFromNodeList(list *api.NodeList) []string {
|
||||
return result
|
||||
}
|
||||
|
||||
func hostsFromNodeSlice(nodes []*api.Node) []string {
|
||||
func hostsFromNodeSlice(nodes []*v1.Node) []string {
|
||||
result := []string{}
|
||||
for _, node := range nodes {
|
||||
if includeNodeFromNodeList(node) {
|
||||
@@ -594,7 +594,7 @@ func hostsFromNodeSlice(nodes []*api.Node) []string {
|
||||
}
|
||||
|
||||
func getNodeConditionPredicate() cache.NodeConditionPredicate {
|
||||
return func(node *api.Node) bool {
|
||||
return func(node *v1.Node) bool {
|
||||
// We add the master to the node list, but its unschedulable. So we use this to filter
|
||||
// the master.
|
||||
// TODO: Use a node annotation to indicate the master
|
||||
@@ -608,7 +608,7 @@ func getNodeConditionPredicate() cache.NodeConditionPredicate {
|
||||
for _, cond := range node.Status.Conditions {
|
||||
// We consider the node for load balancing only when its NodeReady condition status
|
||||
// is ConditionTrue
|
||||
if cond.Type == api.NodeReady && cond.Status != api.ConditionTrue {
|
||||
if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
|
||||
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||
return false
|
||||
}
|
||||
@@ -648,7 +648,7 @@ func (s *ServiceController) nodeSyncLoop() {
|
||||
// updateLoadBalancerHosts updates all existing load balancers so that
|
||||
// they will match the list of hosts provided.
|
||||
// Returns the list of services that couldn't be updated.
|
||||
func (s *ServiceController) updateLoadBalancerHosts(services []*api.Service, hosts []string) (servicesToRetry []*api.Service) {
|
||||
func (s *ServiceController) updateLoadBalancerHosts(services []*v1.Service, hosts []string) (servicesToRetry []*v1.Service) {
|
||||
for _, service := range services {
|
||||
func() {
|
||||
if service == nil {
|
||||
@@ -665,7 +665,7 @@ func (s *ServiceController) updateLoadBalancerHosts(services []*api.Service, hos
|
||||
|
||||
// Updates the load balancer of a service, assuming we hold the mutex
|
||||
// associated with the service.
|
||||
func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *api.Service, hosts []string) error {
|
||||
func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *v1.Service, hosts []string) error {
|
||||
if !wantsLoadBalancer(service) {
|
||||
return nil
|
||||
}
|
||||
@@ -673,7 +673,7 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *api.Service,
|
||||
// This operation doesn't normally take very long (and happens pretty often), so we only record the final event
|
||||
err := s.balancer.UpdateLoadBalancer(s.clusterName, service, hosts)
|
||||
if err == nil {
|
||||
s.eventRecorder.Event(service, api.EventTypeNormal, "UpdatedLoadBalancer", "Updated load balancer with new hosts")
|
||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "UpdatedLoadBalancer", "Updated load balancer with new hosts")
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -684,15 +684,15 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *api.Service,
|
||||
return nil
|
||||
}
|
||||
|
||||
s.eventRecorder.Eventf(service, api.EventTypeWarning, "LoadBalancerUpdateFailed", "Error updating load balancer with new hosts %v: %v", hosts, err)
|
||||
s.eventRecorder.Eventf(service, v1.EventTypeWarning, "LoadBalancerUpdateFailed", "Error updating load balancer with new hosts %v: %v", hosts, err)
|
||||
return err
|
||||
}
|
||||
|
||||
func wantsLoadBalancer(service *api.Service) bool {
|
||||
return service.Spec.Type == api.ServiceTypeLoadBalancer
|
||||
func wantsLoadBalancer(service *v1.Service) bool {
|
||||
return service.Spec.Type == v1.ServiceTypeLoadBalancer
|
||||
}
|
||||
|
||||
func loadBalancerIPsAreEqual(oldService, newService *api.Service) bool {
|
||||
func loadBalancerIPsAreEqual(oldService, newService *v1.Service) bool {
|
||||
return oldService.Spec.LoadBalancerIP == newService.Spec.LoadBalancerIP
|
||||
}
|
||||
|
||||
@@ -736,7 +736,7 @@ func (s *ServiceController) syncService(key string) error {
|
||||
glog.Infof("Service has been deleted %v", key)
|
||||
err, retryDelay = s.processServiceDeletion(key)
|
||||
} else {
|
||||
service, ok := obj.(*api.Service)
|
||||
service, ok := obj.(*v1.Service)
|
||||
if ok {
|
||||
cachedService = s.cache.getOrCreate(key)
|
||||
err, retryDelay = s.processServiceUpdate(cachedService, service, key)
|
||||
@@ -778,14 +778,14 @@ func (s *ServiceController) processServiceDeletion(key string) (error, time.Dura
|
||||
if !wantsLoadBalancer(service) {
|
||||
return nil, doNotRetry
|
||||
}
|
||||
s.eventRecorder.Event(service, api.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
|
||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer")
|
||||
err := s.balancer.EnsureLoadBalancerDeleted(s.clusterName, service)
|
||||
if err != nil {
|
||||
message := "Error deleting load balancer (will retry): " + err.Error()
|
||||
s.eventRecorder.Event(service, api.EventTypeWarning, "DeletingLoadBalancerFailed", message)
|
||||
s.eventRecorder.Event(service, v1.EventTypeWarning, "DeletingLoadBalancerFailed", message)
|
||||
return err, cachedService.nextRetryDelay()
|
||||
}
|
||||
s.eventRecorder.Event(service, api.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer")
|
||||
s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer")
|
||||
s.cache.delete(key)
|
||||
|
||||
cachedService.resetRetryDelay()
|
||||
|
||||
@@ -20,69 +20,69 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/testapi"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
)
|
||||
|
||||
const region = "us-central"
|
||||
|
||||
func newService(name string, uid types.UID, serviceType api.ServiceType) *api.Service {
|
||||
return &api.Service{ObjectMeta: api.ObjectMeta{Name: name, Namespace: "namespace", UID: uid, SelfLink: testapi.Default.SelfLink("services", name)}, Spec: api.ServiceSpec{Type: serviceType}}
|
||||
func newService(name string, uid types.UID, serviceType v1.ServiceType) *v1.Service {
|
||||
return &v1.Service{ObjectMeta: v1.ObjectMeta{Name: name, Namespace: "namespace", UID: uid, SelfLink: testapi.Default.SelfLink("services", name)}, Spec: v1.ServiceSpec{Type: serviceType}}
|
||||
}
|
||||
|
||||
func TestCreateExternalLoadBalancer(t *testing.T) {
|
||||
table := []struct {
|
||||
service *api.Service
|
||||
service *v1.Service
|
||||
expectErr bool
|
||||
expectCreateAttempt bool
|
||||
}{
|
||||
{
|
||||
service: &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
service: &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "no-external-balancer",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: api.ServiceSpec{
|
||||
Type: api.ServiceTypeClusterIP,
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeClusterIP,
|
||||
},
|
||||
},
|
||||
expectErr: false,
|
||||
expectCreateAttempt: false,
|
||||
},
|
||||
{
|
||||
service: &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
service: &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "udp-service",
|
||||
Namespace: "default",
|
||||
SelfLink: testapi.Default.SelfLink("services", "udp-service"),
|
||||
},
|
||||
Spec: api.ServiceSpec{
|
||||
Ports: []api.ServicePort{{
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{{
|
||||
Port: 80,
|
||||
Protocol: api.ProtocolUDP,
|
||||
Protocol: v1.ProtocolUDP,
|
||||
}},
|
||||
Type: api.ServiceTypeLoadBalancer,
|
||||
Type: v1.ServiceTypeLoadBalancer,
|
||||
},
|
||||
},
|
||||
expectErr: false,
|
||||
expectCreateAttempt: true,
|
||||
},
|
||||
{
|
||||
service: &api.Service{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
service: &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "basic-service1",
|
||||
Namespace: "default",
|
||||
SelfLink: testapi.Default.SelfLink("services", "basic-service1"),
|
||||
},
|
||||
Spec: api.ServiceSpec{
|
||||
Ports: []api.ServicePort{{
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{{
|
||||
Port: 80,
|
||||
Protocol: api.ProtocolTCP,
|
||||
Protocol: v1.ProtocolTCP,
|
||||
}},
|
||||
Type: api.ServiceTypeLoadBalancer,
|
||||
Type: v1.ServiceTypeLoadBalancer,
|
||||
},
|
||||
},
|
||||
expectErr: false,
|
||||
@@ -147,65 +147,65 @@ func TestCreateExternalLoadBalancer(t *testing.T) {
|
||||
func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
|
||||
hosts := []string{"node0", "node1", "node73"}
|
||||
table := []struct {
|
||||
services []*api.Service
|
||||
services []*v1.Service
|
||||
expectedUpdateCalls []fakecloud.FakeUpdateBalancerCall
|
||||
}{
|
||||
{
|
||||
// No services present: no calls should be made.
|
||||
services: []*api.Service{},
|
||||
services: []*v1.Service{},
|
||||
expectedUpdateCalls: nil,
|
||||
},
|
||||
{
|
||||
// Services do not have external load balancers: no calls should be made.
|
||||
services: []*api.Service{
|
||||
newService("s0", "111", api.ServiceTypeClusterIP),
|
||||
newService("s1", "222", api.ServiceTypeNodePort),
|
||||
services: []*v1.Service{
|
||||
newService("s0", "111", v1.ServiceTypeClusterIP),
|
||||
newService("s1", "222", v1.ServiceTypeNodePort),
|
||||
},
|
||||
expectedUpdateCalls: nil,
|
||||
},
|
||||
{
|
||||
// Services does have an external load balancer: one call should be made.
|
||||
services: []*api.Service{
|
||||
newService("s0", "333", api.ServiceTypeLoadBalancer),
|
||||
services: []*v1.Service{
|
||||
newService("s0", "333", v1.ServiceTypeLoadBalancer),
|
||||
},
|
||||
expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{
|
||||
{newService("s0", "333", api.ServiceTypeLoadBalancer), hosts},
|
||||
{newService("s0", "333", v1.ServiceTypeLoadBalancer), hosts},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Three services have an external load balancer: three calls.
|
||||
services: []*api.Service{
|
||||
newService("s0", "444", api.ServiceTypeLoadBalancer),
|
||||
newService("s1", "555", api.ServiceTypeLoadBalancer),
|
||||
newService("s2", "666", api.ServiceTypeLoadBalancer),
|
||||
services: []*v1.Service{
|
||||
newService("s0", "444", v1.ServiceTypeLoadBalancer),
|
||||
newService("s1", "555", v1.ServiceTypeLoadBalancer),
|
||||
newService("s2", "666", v1.ServiceTypeLoadBalancer),
|
||||
},
|
||||
expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{
|
||||
{newService("s0", "444", api.ServiceTypeLoadBalancer), hosts},
|
||||
{newService("s1", "555", api.ServiceTypeLoadBalancer), hosts},
|
||||
{newService("s2", "666", api.ServiceTypeLoadBalancer), hosts},
|
||||
{newService("s0", "444", v1.ServiceTypeLoadBalancer), hosts},
|
||||
{newService("s1", "555", v1.ServiceTypeLoadBalancer), hosts},
|
||||
{newService("s2", "666", v1.ServiceTypeLoadBalancer), hosts},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Two services have an external load balancer and two don't: two calls.
|
||||
services: []*api.Service{
|
||||
newService("s0", "777", api.ServiceTypeNodePort),
|
||||
newService("s1", "888", api.ServiceTypeLoadBalancer),
|
||||
newService("s3", "999", api.ServiceTypeLoadBalancer),
|
||||
newService("s4", "123", api.ServiceTypeClusterIP),
|
||||
services: []*v1.Service{
|
||||
newService("s0", "777", v1.ServiceTypeNodePort),
|
||||
newService("s1", "888", v1.ServiceTypeLoadBalancer),
|
||||
newService("s3", "999", v1.ServiceTypeLoadBalancer),
|
||||
newService("s4", "123", v1.ServiceTypeClusterIP),
|
||||
},
|
||||
expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{
|
||||
{newService("s1", "888", api.ServiceTypeLoadBalancer), hosts},
|
||||
{newService("s3", "999", api.ServiceTypeLoadBalancer), hosts},
|
||||
{newService("s1", "888", v1.ServiceTypeLoadBalancer), hosts},
|
||||
{newService("s3", "999", v1.ServiceTypeLoadBalancer), hosts},
|
||||
},
|
||||
},
|
||||
{
|
||||
// One service has an external load balancer and one is nil: one call.
|
||||
services: []*api.Service{
|
||||
newService("s0", "234", api.ServiceTypeLoadBalancer),
|
||||
services: []*v1.Service{
|
||||
newService("s0", "234", v1.ServiceTypeLoadBalancer),
|
||||
nil,
|
||||
},
|
||||
expectedUpdateCalls: []fakecloud.FakeUpdateBalancerCall{
|
||||
{newService("s0", "234", api.ServiceTypeLoadBalancer), hosts},
|
||||
{newService("s0", "234", v1.ServiceTypeLoadBalancer), hosts},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -218,7 +218,7 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
|
||||
controller.init()
|
||||
cloud.Calls = nil // ignore any cloud calls made in init()
|
||||
|
||||
var services []*api.Service
|
||||
var services []*v1.Service
|
||||
for _, service := range item.services {
|
||||
services = append(services, service)
|
||||
}
|
||||
@@ -233,43 +233,43 @@ func TestUpdateNodesInExternalLoadBalancer(t *testing.T) {
|
||||
|
||||
func TestHostsFromNodeList(t *testing.T) {
|
||||
tests := []struct {
|
||||
nodes *api.NodeList
|
||||
nodes *v1.NodeList
|
||||
expectedHosts []string
|
||||
}{
|
||||
{
|
||||
nodes: &api.NodeList{},
|
||||
nodes: &v1.NodeList{},
|
||||
expectedHosts: []string{},
|
||||
},
|
||||
{
|
||||
nodes: &api.NodeList{
|
||||
Items: []api.Node{
|
||||
nodes: &v1.NodeList{
|
||||
Items: []v1.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo"},
|
||||
Status: api.NodeStatus{Phase: api.NodeRunning},
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo"},
|
||||
Status: v1.NodeStatus{Phase: v1.NodeRunning},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "bar"},
|
||||
Status: api.NodeStatus{Phase: api.NodeRunning},
|
||||
ObjectMeta: v1.ObjectMeta{Name: "bar"},
|
||||
Status: v1.NodeStatus{Phase: v1.NodeRunning},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedHosts: []string{"foo", "bar"},
|
||||
},
|
||||
{
|
||||
nodes: &api.NodeList{
|
||||
Items: []api.Node{
|
||||
nodes: &v1.NodeList{
|
||||
Items: []v1.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "foo"},
|
||||
Status: api.NodeStatus{Phase: api.NodeRunning},
|
||||
ObjectMeta: v1.ObjectMeta{Name: "foo"},
|
||||
Status: v1.NodeStatus{Phase: v1.NodeRunning},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "bar"},
|
||||
Status: api.NodeStatus{Phase: api.NodeRunning},
|
||||
ObjectMeta: v1.ObjectMeta{Name: "bar"},
|
||||
Status: v1.NodeStatus{Phase: v1.NodeRunning},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "unschedulable"},
|
||||
Spec: api.NodeSpec{Unschedulable: true},
|
||||
Status: api.NodeStatus{Phase: api.NodeRunning},
|
||||
ObjectMeta: v1.ObjectMeta{Name: "unschedulable"},
|
||||
Spec: v1.NodeSpec{Unschedulable: true},
|
||||
Status: v1.NodeStatus{Phase: v1.NodeRunning},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -287,20 +287,20 @@ func TestHostsFromNodeList(t *testing.T) {
|
||||
|
||||
func TestGetNodeConditionPredicate(t *testing.T) {
|
||||
tests := []struct {
|
||||
node api.Node
|
||||
node v1.Node
|
||||
expectAccept bool
|
||||
name string
|
||||
}{
|
||||
{
|
||||
node: api.Node{},
|
||||
node: v1.Node{},
|
||||
expectAccept: false,
|
||||
name: "empty",
|
||||
},
|
||||
{
|
||||
node: api.Node{
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{Type: api.NodeReady, Status: api.ConditionTrue},
|
||||
node: v1.Node{
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{Type: v1.NodeReady, Status: v1.ConditionTrue},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -308,11 +308,11 @@ func TestGetNodeConditionPredicate(t *testing.T) {
|
||||
name: "basic",
|
||||
},
|
||||
{
|
||||
node: api.Node{
|
||||
Spec: api.NodeSpec{Unschedulable: true},
|
||||
Status: api.NodeStatus{
|
||||
Conditions: []api.NodeCondition{
|
||||
{Type: api.NodeReady, Status: api.ConditionTrue},
|
||||
node: v1.Node{
|
||||
Spec: v1.NodeSpec{Unschedulable: true},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{Type: v1.NodeReady, Status: v1.ConditionTrue},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -21,11 +21,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
apierrs "k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/meta"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
utilerrors "k8s.io/kubernetes/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/util/metrics"
|
||||
@@ -46,7 +46,7 @@ func nameIndexFunc(obj interface{}) ([]string, error) {
|
||||
// ServiceAccountsControllerOptions contains options for running a ServiceAccountsController
|
||||
type ServiceAccountsControllerOptions struct {
|
||||
// ServiceAccounts is the list of service accounts to ensure exist in every namespace
|
||||
ServiceAccounts []api.ServiceAccount
|
||||
ServiceAccounts []v1.ServiceAccount
|
||||
|
||||
// ServiceAccountResync is the interval between full resyncs of ServiceAccounts.
|
||||
// If non-zero, all service accounts will be re-listed this often.
|
||||
@@ -61,8 +61,8 @@ type ServiceAccountsControllerOptions struct {
|
||||
|
||||
func DefaultServiceAccountsControllerOptions() ServiceAccountsControllerOptions {
|
||||
return ServiceAccountsControllerOptions{
|
||||
ServiceAccounts: []api.ServiceAccount{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "default"}},
|
||||
ServiceAccounts: []v1.ServiceAccount{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "default"}},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -99,7 +99,7 @@ func NewServiceAccountsController(saInformer informers.ServiceAccountInformer, n
|
||||
// ServiceAccountsController manages ServiceAccount objects inside Namespaces
|
||||
type ServiceAccountsController struct {
|
||||
client clientset.Interface
|
||||
serviceAccountsToEnsure []api.ServiceAccount
|
||||
serviceAccountsToEnsure []v1.ServiceAccount
|
||||
|
||||
// To allow injection for testing.
|
||||
syncHandler func(key string) error
|
||||
@@ -133,14 +133,14 @@ func (c *ServiceAccountsController) Run(workers int, stopCh <-chan struct{}) {
|
||||
|
||||
// serviceAccountDeleted reacts to a ServiceAccount deletion by recreating a default ServiceAccount in the namespace if needed
|
||||
func (c *ServiceAccountsController) serviceAccountDeleted(obj interface{}) {
|
||||
sa, ok := obj.(*api.ServiceAccount)
|
||||
sa, ok := obj.(*v1.ServiceAccount)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
|
||||
return
|
||||
}
|
||||
sa, ok = tombstone.Obj.(*api.ServiceAccount)
|
||||
sa, ok = tombstone.Obj.(*v1.ServiceAccount)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("Tombstone contained object that is not a ServiceAccount %#v", obj))
|
||||
return
|
||||
@@ -151,13 +151,13 @@ func (c *ServiceAccountsController) serviceAccountDeleted(obj interface{}) {
|
||||
|
||||
// namespaceAdded reacts to a Namespace creation by creating a default ServiceAccount object
|
||||
func (c *ServiceAccountsController) namespaceAdded(obj interface{}) {
|
||||
namespace := obj.(*api.Namespace)
|
||||
namespace := obj.(*v1.Namespace)
|
||||
c.queue.Add(namespace.Name)
|
||||
}
|
||||
|
||||
// namespaceUpdated reacts to a Namespace update (or re-list) by creating a default ServiceAccount in the namespace if needed
|
||||
func (c *ServiceAccountsController) namespaceUpdated(oldObj interface{}, newObj interface{}) {
|
||||
newNamespace := newObj.(*api.Namespace)
|
||||
newNamespace := newObj.(*v1.Namespace)
|
||||
c.queue.Add(newNamespace.Name)
|
||||
}
|
||||
|
||||
@@ -198,7 +198,7 @@ func (c *ServiceAccountsController) syncNamespace(key string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ns.Status.Phase != api.NamespaceActive {
|
||||
if ns.Status.Phase != v1.NamespaceActive {
|
||||
// If namespace is not active, we shouldn't try to create anything
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -20,9 +20,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
@@ -35,39 +35,39 @@ type serverResponse struct {
|
||||
}
|
||||
|
||||
func TestServiceAccountCreation(t *testing.T) {
|
||||
ns := api.NamespaceDefault
|
||||
ns := v1.NamespaceDefault
|
||||
|
||||
defaultName := "default"
|
||||
managedName := "managed"
|
||||
|
||||
activeNS := &api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{Name: ns},
|
||||
Status: api.NamespaceStatus{
|
||||
Phase: api.NamespaceActive,
|
||||
activeNS := &v1.Namespace{
|
||||
ObjectMeta: v1.ObjectMeta{Name: ns},
|
||||
Status: v1.NamespaceStatus{
|
||||
Phase: v1.NamespaceActive,
|
||||
},
|
||||
}
|
||||
terminatingNS := &api.Namespace{
|
||||
ObjectMeta: api.ObjectMeta{Name: ns},
|
||||
Status: api.NamespaceStatus{
|
||||
Phase: api.NamespaceTerminating,
|
||||
terminatingNS := &v1.Namespace{
|
||||
ObjectMeta: v1.ObjectMeta{Name: ns},
|
||||
Status: v1.NamespaceStatus{
|
||||
Phase: v1.NamespaceTerminating,
|
||||
},
|
||||
}
|
||||
defaultServiceAccount := &api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
defaultServiceAccount := &v1.ServiceAccount{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: defaultName,
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
}
|
||||
managedServiceAccount := &api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
managedServiceAccount := &v1.ServiceAccount{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: managedName,
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
}
|
||||
unmanagedServiceAccount := &api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
unmanagedServiceAccount := &v1.ServiceAccount{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "other-unmanaged",
|
||||
Namespace: ns,
|
||||
ResourceVersion: "1",
|
||||
@@ -75,54 +75,54 @@ func TestServiceAccountCreation(t *testing.T) {
|
||||
}
|
||||
|
||||
testcases := map[string]struct {
|
||||
ExistingNamespace *api.Namespace
|
||||
ExistingServiceAccounts []*api.ServiceAccount
|
||||
ExistingNamespace *v1.Namespace
|
||||
ExistingServiceAccounts []*v1.ServiceAccount
|
||||
|
||||
AddedNamespace *api.Namespace
|
||||
UpdatedNamespace *api.Namespace
|
||||
DeletedServiceAccount *api.ServiceAccount
|
||||
AddedNamespace *v1.Namespace
|
||||
UpdatedNamespace *v1.Namespace
|
||||
DeletedServiceAccount *v1.ServiceAccount
|
||||
|
||||
ExpectCreatedServiceAccounts []string
|
||||
}{
|
||||
"new active namespace missing serviceaccounts": {
|
||||
ExistingServiceAccounts: []*api.ServiceAccount{},
|
||||
ExistingServiceAccounts: []*v1.ServiceAccount{},
|
||||
AddedNamespace: activeNS,
|
||||
ExpectCreatedServiceAccounts: sets.NewString(defaultName, managedName).List(),
|
||||
},
|
||||
"new active namespace missing serviceaccount": {
|
||||
ExistingServiceAccounts: []*api.ServiceAccount{managedServiceAccount},
|
||||
ExistingServiceAccounts: []*v1.ServiceAccount{managedServiceAccount},
|
||||
AddedNamespace: activeNS,
|
||||
ExpectCreatedServiceAccounts: []string{defaultName},
|
||||
},
|
||||
"new active namespace with serviceaccounts": {
|
||||
ExistingServiceAccounts: []*api.ServiceAccount{defaultServiceAccount, managedServiceAccount},
|
||||
ExistingServiceAccounts: []*v1.ServiceAccount{defaultServiceAccount, managedServiceAccount},
|
||||
AddedNamespace: activeNS,
|
||||
ExpectCreatedServiceAccounts: []string{},
|
||||
},
|
||||
|
||||
"new terminating namespace": {
|
||||
ExistingServiceAccounts: []*api.ServiceAccount{},
|
||||
ExistingServiceAccounts: []*v1.ServiceAccount{},
|
||||
AddedNamespace: terminatingNS,
|
||||
ExpectCreatedServiceAccounts: []string{},
|
||||
},
|
||||
|
||||
"updated active namespace missing serviceaccounts": {
|
||||
ExistingServiceAccounts: []*api.ServiceAccount{},
|
||||
ExistingServiceAccounts: []*v1.ServiceAccount{},
|
||||
UpdatedNamespace: activeNS,
|
||||
ExpectCreatedServiceAccounts: sets.NewString(defaultName, managedName).List(),
|
||||
},
|
||||
"updated active namespace missing serviceaccount": {
|
||||
ExistingServiceAccounts: []*api.ServiceAccount{defaultServiceAccount},
|
||||
ExistingServiceAccounts: []*v1.ServiceAccount{defaultServiceAccount},
|
||||
UpdatedNamespace: activeNS,
|
||||
ExpectCreatedServiceAccounts: []string{managedName},
|
||||
},
|
||||
"updated active namespace with serviceaccounts": {
|
||||
ExistingServiceAccounts: []*api.ServiceAccount{defaultServiceAccount, managedServiceAccount},
|
||||
ExistingServiceAccounts: []*v1.ServiceAccount{defaultServiceAccount, managedServiceAccount},
|
||||
UpdatedNamespace: activeNS,
|
||||
ExpectCreatedServiceAccounts: []string{},
|
||||
},
|
||||
"updated terminating namespace": {
|
||||
ExistingServiceAccounts: []*api.ServiceAccount{},
|
||||
ExistingServiceAccounts: []*v1.ServiceAccount{},
|
||||
UpdatedNamespace: terminatingNS,
|
||||
ExpectCreatedServiceAccounts: []string{},
|
||||
},
|
||||
@@ -132,7 +132,7 @@ func TestServiceAccountCreation(t *testing.T) {
|
||||
ExpectCreatedServiceAccounts: []string{},
|
||||
},
|
||||
"deleted serviceaccount with active namespace": {
|
||||
ExistingServiceAccounts: []*api.ServiceAccount{managedServiceAccount},
|
||||
ExistingServiceAccounts: []*v1.ServiceAccount{managedServiceAccount},
|
||||
ExistingNamespace: activeNS,
|
||||
DeletedServiceAccount: defaultServiceAccount,
|
||||
ExpectCreatedServiceAccounts: []string{defaultName},
|
||||
@@ -143,7 +143,7 @@ func TestServiceAccountCreation(t *testing.T) {
|
||||
ExpectCreatedServiceAccounts: []string{},
|
||||
},
|
||||
"deleted unmanaged serviceaccount with active namespace": {
|
||||
ExistingServiceAccounts: []*api.ServiceAccount{defaultServiceAccount, managedServiceAccount},
|
||||
ExistingServiceAccounts: []*v1.ServiceAccount{defaultServiceAccount, managedServiceAccount},
|
||||
ExistingNamespace: activeNS,
|
||||
DeletedServiceAccount: unmanagedServiceAccount,
|
||||
ExpectCreatedServiceAccounts: []string{},
|
||||
@@ -157,11 +157,11 @@ func TestServiceAccountCreation(t *testing.T) {
|
||||
|
||||
for k, tc := range testcases {
|
||||
client := fake.NewSimpleClientset(defaultServiceAccount, managedServiceAccount)
|
||||
informers := informers.NewSharedInformerFactory(fake.NewSimpleClientset(), controller.NoResyncPeriodFunc())
|
||||
informers := informers.NewSharedInformerFactory(fake.NewSimpleClientset(), nil, controller.NoResyncPeriodFunc())
|
||||
options := DefaultServiceAccountsControllerOptions()
|
||||
options.ServiceAccounts = []api.ServiceAccount{
|
||||
{ObjectMeta: api.ObjectMeta{Name: defaultName}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: managedName}},
|
||||
options.ServiceAccounts = []v1.ServiceAccount{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: defaultName}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: managedName}},
|
||||
}
|
||||
controller := NewServiceAccountsController(informers.ServiceAccounts(), informers.Namespaces(), client, options)
|
||||
controller.saLister = &cache.StoreToServiceAccountLister{Indexer: cache.NewIndexer(cache.DeletionHandlingMetaNamespaceKeyFunc, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc})}
|
||||
@@ -220,7 +220,7 @@ func TestServiceAccountCreation(t *testing.T) {
|
||||
t.Errorf("%s: Unexpected action %s", k, action)
|
||||
break
|
||||
}
|
||||
createdAccount := action.(core.CreateAction).GetObject().(*api.ServiceAccount)
|
||||
createdAccount := action.(core.CreateAction).GetObject().(*v1.ServiceAccount)
|
||||
if createdAccount.Name != expectedName {
|
||||
t.Errorf("%s: Expected %s to be created, got %s", k, expectedName, createdAccount.Name)
|
||||
}
|
||||
|
||||
@@ -18,7 +18,8 @@ package serviceaccount
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/registry/core/secret"
|
||||
secretetcd "k8s.io/kubernetes/pkg/registry/core/secret/etcd"
|
||||
serviceaccountregistry "k8s.io/kubernetes/pkg/registry/core/serviceaccount"
|
||||
@@ -40,10 +41,10 @@ type clientGetter struct {
|
||||
func NewGetterFromClient(c clientset.Interface) serviceaccount.ServiceAccountTokenGetter {
|
||||
return clientGetter{c}
|
||||
}
|
||||
func (c clientGetter) GetServiceAccount(namespace, name string) (*api.ServiceAccount, error) {
|
||||
func (c clientGetter) GetServiceAccount(namespace, name string) (*v1.ServiceAccount, error) {
|
||||
return c.client.Core().ServiceAccounts(namespace).Get(name)
|
||||
}
|
||||
func (c clientGetter) GetSecret(namespace, name string) (*api.Secret, error) {
|
||||
func (c clientGetter) GetSecret(namespace, name string) (*v1.Secret, error) {
|
||||
return c.client.Core().Secrets(namespace).Get(name)
|
||||
}
|
||||
|
||||
@@ -58,13 +59,27 @@ type registryGetter struct {
|
||||
func NewGetterFromRegistries(serviceAccounts serviceaccountregistry.Registry, secrets secret.Registry) serviceaccount.ServiceAccountTokenGetter {
|
||||
return ®istryGetter{serviceAccounts, secrets}
|
||||
}
|
||||
func (r *registryGetter) GetServiceAccount(namespace, name string) (*api.ServiceAccount, error) {
|
||||
func (r *registryGetter) GetServiceAccount(namespace, name string) (*v1.ServiceAccount, error) {
|
||||
ctx := api.WithNamespace(api.NewContext(), namespace)
|
||||
return r.serviceAccounts.GetServiceAccount(ctx, name)
|
||||
internalServiceAccount, err := r.serviceAccounts.GetServiceAccount(ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (r *registryGetter) GetSecret(namespace, name string) (*api.Secret, error) {
|
||||
v1ServiceAccount := v1.ServiceAccount{}
|
||||
err = v1.Convert_api_ServiceAccount_To_v1_ServiceAccount(internalServiceAccount, &v1ServiceAccount, nil)
|
||||
return &v1ServiceAccount, err
|
||||
|
||||
}
|
||||
func (r *registryGetter) GetSecret(namespace, name string) (*v1.Secret, error) {
|
||||
ctx := api.WithNamespace(api.NewContext(), namespace)
|
||||
return r.secrets.GetSecret(ctx, name)
|
||||
internalSecret, err := r.secrets.GetSecret(ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v1Secret := v1.Secret{}
|
||||
err = v1.Convert_api_Secret_To_v1_Secret(internalSecret, &v1Secret, nil)
|
||||
return &v1Secret, err
|
||||
|
||||
}
|
||||
|
||||
// NewGetterFromStorageInterface returns a ServiceAccountTokenGetter that
|
||||
|
||||
@@ -24,8 +24,9 @@ import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
apierrors "k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
clientretry "k8s.io/kubernetes/pkg/client/retry"
|
||||
"k8s.io/kubernetes/pkg/fields"
|
||||
"k8s.io/kubernetes/pkg/registry/core/secret"
|
||||
@@ -91,14 +92,14 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
|
||||
|
||||
e.serviceAccounts, e.serviceAccountController = cache.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return e.client.Core().ServiceAccounts(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
return e.client.Core().ServiceAccounts(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return e.client.Core().ServiceAccounts(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
return e.client.Core().ServiceAccounts(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.ServiceAccount{},
|
||||
&v1.ServiceAccount{},
|
||||
options.ServiceAccountResync,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: e.queueServiceAccountSync,
|
||||
@@ -107,19 +108,19 @@ func NewTokensController(cl clientset.Interface, options TokensControllerOptions
|
||||
},
|
||||
)
|
||||
|
||||
tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(api.SecretTypeServiceAccountToken)})
|
||||
tokenSelector := fields.SelectorFromSet(map[string]string{api.SecretTypeField: string(v1.SecretTypeServiceAccountToken)})
|
||||
e.secrets, e.secretController = cache.NewIndexerInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = tokenSelector
|
||||
return e.client.Core().Secrets(api.NamespaceAll).List(options)
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
options.FieldSelector = tokenSelector.String()
|
||||
return e.client.Core().Secrets(v1.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = tokenSelector
|
||||
return e.client.Core().Secrets(api.NamespaceAll).Watch(options)
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
options.FieldSelector = tokenSelector.String()
|
||||
return e.client.Core().Secrets(v1.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Secret{},
|
||||
&v1.Secret{},
|
||||
options.SecretResync,
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: e.queueSecretSync,
|
||||
@@ -190,13 +191,13 @@ func (e *TokensController) Run(workers int, stopCh <-chan struct{}) {
|
||||
}
|
||||
|
||||
func (e *TokensController) queueServiceAccountSync(obj interface{}) {
|
||||
if serviceAccount, ok := obj.(*api.ServiceAccount); ok {
|
||||
if serviceAccount, ok := obj.(*v1.ServiceAccount); ok {
|
||||
e.syncServiceAccountQueue.Add(makeServiceAccountKey(serviceAccount))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *TokensController) queueServiceAccountUpdateSync(oldObj interface{}, newObj interface{}) {
|
||||
if serviceAccount, ok := newObj.(*api.ServiceAccount); ok {
|
||||
if serviceAccount, ok := newObj.(*v1.ServiceAccount); ok {
|
||||
e.syncServiceAccountQueue.Add(makeServiceAccountKey(serviceAccount))
|
||||
}
|
||||
}
|
||||
@@ -219,13 +220,13 @@ func (e *TokensController) retryOrForget(queue workqueue.RateLimitingInterface,
|
||||
}
|
||||
|
||||
func (e *TokensController) queueSecretSync(obj interface{}) {
|
||||
if secret, ok := obj.(*api.Secret); ok {
|
||||
if secret, ok := obj.(*v1.Secret); ok {
|
||||
e.syncSecretQueue.Add(makeSecretQueueKey(secret))
|
||||
}
|
||||
}
|
||||
|
||||
func (e *TokensController) queueSecretUpdateSync(oldObj interface{}, newObj interface{}) {
|
||||
if secret, ok := newObj.(*api.Secret); ok {
|
||||
if secret, ok := newObj.(*v1.Secret); ok {
|
||||
e.syncSecretQueue.Add(makeSecretQueueKey(secret))
|
||||
}
|
||||
}
|
||||
@@ -256,7 +257,7 @@ func (e *TokensController) syncServiceAccount() {
|
||||
case sa == nil:
|
||||
// service account no longer exists, so delete related tokens
|
||||
glog.V(4).Infof("syncServiceAccount(%s/%s), service account deleted, removing tokens", saInfo.namespace, saInfo.name)
|
||||
sa = &api.ServiceAccount{ObjectMeta: api.ObjectMeta{Namespace: saInfo.namespace, Name: saInfo.name, UID: saInfo.uid}}
|
||||
sa = &v1.ServiceAccount{ObjectMeta: v1.ObjectMeta{Namespace: saInfo.namespace, Name: saInfo.name, UID: saInfo.uid}}
|
||||
if retriable, err := e.deleteTokens(sa); err != nil {
|
||||
glog.Errorf("error deleting serviceaccount tokens for %s/%s: %v", saInfo.namespace, saInfo.name, err)
|
||||
retry = retriable
|
||||
@@ -328,7 +329,7 @@ func (e *TokensController) syncSecret() {
|
||||
}
|
||||
}
|
||||
|
||||
func (e *TokensController) deleteTokens(serviceAccount *api.ServiceAccount) ( /*retry*/ bool, error) {
|
||||
func (e *TokensController) deleteTokens(serviceAccount *v1.ServiceAccount) ( /*retry*/ bool, error) {
|
||||
tokens, err := e.listTokenSecrets(serviceAccount)
|
||||
if err != nil {
|
||||
// don't retry on cache lookup errors
|
||||
@@ -349,9 +350,9 @@ func (e *TokensController) deleteTokens(serviceAccount *api.ServiceAccount) ( /*
|
||||
}
|
||||
|
||||
func (e *TokensController) deleteToken(ns, name string, uid types.UID) ( /*retry*/ bool, error) {
|
||||
var opts *api.DeleteOptions
|
||||
var opts *v1.DeleteOptions
|
||||
if len(uid) > 0 {
|
||||
opts = &api.DeleteOptions{Preconditions: &api.Preconditions{UID: &uid}}
|
||||
opts = &v1.DeleteOptions{Preconditions: &v1.Preconditions{UID: &uid}}
|
||||
}
|
||||
err := e.client.Core().Secrets(ns).Delete(name, opts)
|
||||
// NotFound doesn't need a retry (it's already been deleted)
|
||||
@@ -364,7 +365,7 @@ func (e *TokensController) deleteToken(ns, name string, uid types.UID) ( /*retry
|
||||
}
|
||||
|
||||
// ensureReferencedToken makes sure at least one ServiceAccountToken secret exists, and is included in the serviceAccount's Secrets list
|
||||
func (e *TokensController) ensureReferencedToken(serviceAccount *api.ServiceAccount) ( /* retry */ bool, error) {
|
||||
func (e *TokensController) ensureReferencedToken(serviceAccount *v1.ServiceAccount) ( /* retry */ bool, error) {
|
||||
if len(serviceAccount.Secrets) > 0 {
|
||||
allSecrets, err := e.listTokenSecrets(serviceAccount)
|
||||
if err != nil {
|
||||
@@ -396,16 +397,16 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *api.ServiceAcco
|
||||
}
|
||||
|
||||
// Build the secret
|
||||
secret := &api.Secret{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: secret.Strategy.GenerateName(fmt.Sprintf("%s-token-", serviceAccount.Name)),
|
||||
Namespace: serviceAccount.Namespace,
|
||||
Annotations: map[string]string{
|
||||
api.ServiceAccountNameKey: serviceAccount.Name,
|
||||
api.ServiceAccountUIDKey: string(serviceAccount.UID),
|
||||
v1.ServiceAccountNameKey: serviceAccount.Name,
|
||||
v1.ServiceAccountUIDKey: string(serviceAccount.UID),
|
||||
},
|
||||
},
|
||||
Type: api.SecretTypeServiceAccountToken,
|
||||
Type: v1.SecretTypeServiceAccountToken,
|
||||
Data: map[string][]byte{},
|
||||
}
|
||||
|
||||
@@ -415,10 +416,10 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *api.ServiceAcco
|
||||
// retriable error
|
||||
return true, err
|
||||
}
|
||||
secret.Data[api.ServiceAccountTokenKey] = []byte(token)
|
||||
secret.Data[api.ServiceAccountNamespaceKey] = []byte(serviceAccount.Namespace)
|
||||
secret.Data[v1.ServiceAccountTokenKey] = []byte(token)
|
||||
secret.Data[v1.ServiceAccountNamespaceKey] = []byte(serviceAccount.Namespace)
|
||||
if e.rootCA != nil && len(e.rootCA) > 0 {
|
||||
secret.Data[api.ServiceAccountRootCAKey] = e.rootCA
|
||||
secret.Data[v1.ServiceAccountRootCAKey] = e.rootCA
|
||||
}
|
||||
|
||||
// Save the secret
|
||||
@@ -431,12 +432,12 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *api.ServiceAcco
|
||||
// This prevents the service account update (below) triggering another token creation, if the referenced token couldn't be found in the store
|
||||
e.secrets.Add(createdToken)
|
||||
|
||||
liveServiceAccount.Secrets = append(liveServiceAccount.Secrets, api.ObjectReference{Name: secret.Name})
|
||||
liveServiceAccount.Secrets = append(liveServiceAccount.Secrets, v1.ObjectReference{Name: secret.Name})
|
||||
|
||||
if _, err = serviceAccounts.Update(liveServiceAccount); err != nil {
|
||||
// we weren't able to use the token, try to clean it up.
|
||||
glog.V(2).Infof("deleting secret %s/%s because reference couldn't be added (%v)", secret.Namespace, secret.Name, err)
|
||||
deleteOpts := &api.DeleteOptions{Preconditions: &api.Preconditions{UID: &createdToken.UID}}
|
||||
deleteOpts := &v1.DeleteOptions{Preconditions: &v1.Preconditions{UID: &createdToken.UID}}
|
||||
if deleteErr := e.client.Core().Secrets(createdToken.Namespace).Delete(createdToken.Name, deleteOpts); deleteErr != nil {
|
||||
glog.Error(deleteErr) // if we fail, just log it
|
||||
}
|
||||
@@ -454,20 +455,20 @@ func (e *TokensController) ensureReferencedToken(serviceAccount *api.ServiceAcco
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (e *TokensController) secretUpdateNeeded(secret *api.Secret) (bool, bool, bool) {
|
||||
caData := secret.Data[api.ServiceAccountRootCAKey]
|
||||
func (e *TokensController) secretUpdateNeeded(secret *v1.Secret) (bool, bool, bool) {
|
||||
caData := secret.Data[v1.ServiceAccountRootCAKey]
|
||||
needsCA := len(e.rootCA) > 0 && bytes.Compare(caData, e.rootCA) != 0
|
||||
|
||||
needsNamespace := len(secret.Data[api.ServiceAccountNamespaceKey]) == 0
|
||||
needsNamespace := len(secret.Data[v1.ServiceAccountNamespaceKey]) == 0
|
||||
|
||||
tokenData := secret.Data[api.ServiceAccountTokenKey]
|
||||
tokenData := secret.Data[v1.ServiceAccountTokenKey]
|
||||
needsToken := len(tokenData) == 0
|
||||
|
||||
return needsCA, needsNamespace, needsToken
|
||||
}
|
||||
|
||||
// generateTokenIfNeeded populates the token data for the given Secret if not already set
|
||||
func (e *TokensController) generateTokenIfNeeded(serviceAccount *api.ServiceAccount, cachedSecret *api.Secret) ( /* retry */ bool, error) {
|
||||
func (e *TokensController) generateTokenIfNeeded(serviceAccount *v1.ServiceAccount, cachedSecret *v1.Secret) ( /* retry */ bool, error) {
|
||||
// Check the cached secret to see if changes are needed
|
||||
if needsCA, needsNamespace, needsToken := e.secretUpdateNeeded(cachedSecret); !needsCA && !needsToken && !needsNamespace {
|
||||
return false, nil
|
||||
@@ -502,11 +503,11 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *api.ServiceAcco
|
||||
|
||||
// Set the CA
|
||||
if needsCA {
|
||||
liveSecret.Data[api.ServiceAccountRootCAKey] = e.rootCA
|
||||
liveSecret.Data[v1.ServiceAccountRootCAKey] = e.rootCA
|
||||
}
|
||||
// Set the namespace
|
||||
if needsNamespace {
|
||||
liveSecret.Data[api.ServiceAccountNamespaceKey] = []byte(liveSecret.Namespace)
|
||||
liveSecret.Data[v1.ServiceAccountNamespaceKey] = []byte(liveSecret.Namespace)
|
||||
}
|
||||
|
||||
// Generate the token
|
||||
@@ -515,12 +516,12 @@ func (e *TokensController) generateTokenIfNeeded(serviceAccount *api.ServiceAcco
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
liveSecret.Data[api.ServiceAccountTokenKey] = []byte(token)
|
||||
liveSecret.Data[v1.ServiceAccountTokenKey] = []byte(token)
|
||||
}
|
||||
|
||||
// Set annotations
|
||||
liveSecret.Annotations[api.ServiceAccountNameKey] = serviceAccount.Name
|
||||
liveSecret.Annotations[api.ServiceAccountUIDKey] = string(serviceAccount.UID)
|
||||
liveSecret.Annotations[v1.ServiceAccountNameKey] = serviceAccount.Name
|
||||
liveSecret.Annotations[v1.ServiceAccountUIDKey] = string(serviceAccount.UID)
|
||||
|
||||
// Save the secret
|
||||
_, err = secrets.Update(liveSecret)
|
||||
@@ -560,7 +561,7 @@ func (e *TokensController) removeSecretReference(saNamespace string, saName stri
|
||||
}
|
||||
|
||||
// Remove the secret
|
||||
secrets := []api.ObjectReference{}
|
||||
secrets := []v1.ObjectReference{}
|
||||
for _, s := range serviceAccount.Secrets {
|
||||
if s.Name != secretName {
|
||||
secrets = append(secrets, s)
|
||||
@@ -575,16 +576,16 @@ func (e *TokensController) removeSecretReference(saNamespace string, saName stri
|
||||
return err
|
||||
}
|
||||
|
||||
func (e *TokensController) getServiceAccount(ns string, name string, uid types.UID, fetchOnCacheMiss bool) (*api.ServiceAccount, error) {
|
||||
func (e *TokensController) getServiceAccount(ns string, name string, uid types.UID, fetchOnCacheMiss bool) (*v1.ServiceAccount, error) {
|
||||
// Look up in cache
|
||||
obj, exists, err := e.serviceAccounts.GetByKey(makeCacheKey(ns, name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if exists {
|
||||
sa, ok := obj.(*api.ServiceAccount)
|
||||
sa, ok := obj.(*v1.ServiceAccount)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected *api.ServiceAccount, got %#v", sa)
|
||||
return nil, fmt.Errorf("expected *v1.ServiceAccount, got %#v", sa)
|
||||
}
|
||||
// Ensure UID matches if given
|
||||
if len(uid) == 0 || uid == sa.UID {
|
||||
@@ -611,16 +612,16 @@ func (e *TokensController) getServiceAccount(ns string, name string, uid types.U
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (e *TokensController) getSecret(ns string, name string, uid types.UID, fetchOnCacheMiss bool) (*api.Secret, error) {
|
||||
func (e *TokensController) getSecret(ns string, name string, uid types.UID, fetchOnCacheMiss bool) (*v1.Secret, error) {
|
||||
// Look up in cache
|
||||
obj, exists, err := e.secrets.GetByKey(makeCacheKey(ns, name))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if exists {
|
||||
secret, ok := obj.(*api.Secret)
|
||||
secret, ok := obj.(*v1.Secret)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("expected *api.Secret, got %#v", secret)
|
||||
return nil, fmt.Errorf("expected *v1.Secret, got %#v", secret)
|
||||
}
|
||||
// Ensure UID matches if given
|
||||
if len(uid) == 0 || uid == secret.UID {
|
||||
@@ -649,15 +650,15 @@ func (e *TokensController) getSecret(ns string, name string, uid types.UID, fetc
|
||||
|
||||
// listTokenSecrets returns a list of all of the ServiceAccountToken secrets that
|
||||
// reference the given service account's name and uid
|
||||
func (e *TokensController) listTokenSecrets(serviceAccount *api.ServiceAccount) ([]*api.Secret, error) {
|
||||
func (e *TokensController) listTokenSecrets(serviceAccount *v1.ServiceAccount) ([]*v1.Secret, error) {
|
||||
namespaceSecrets, err := e.secrets.ByIndex("namespace", serviceAccount.Namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
items := []*api.Secret{}
|
||||
items := []*v1.Secret{}
|
||||
for _, obj := range namespaceSecrets {
|
||||
secret := obj.(*api.Secret)
|
||||
secret := obj.(*v1.Secret)
|
||||
|
||||
if serviceaccount.IsServiceAccountToken(secret, serviceAccount) {
|
||||
items = append(items, secret)
|
||||
@@ -669,14 +670,14 @@ func (e *TokensController) listTokenSecrets(serviceAccount *api.ServiceAccount)
|
||||
// serviceAccountNameAndUID is a helper method to get the ServiceAccount Name and UID from the given secret
|
||||
// Returns "","" if the secret is not a ServiceAccountToken secret
|
||||
// If the name or uid annotation is missing, "" is returned instead
|
||||
func serviceAccountNameAndUID(secret *api.Secret) (string, string) {
|
||||
if secret.Type != api.SecretTypeServiceAccountToken {
|
||||
func serviceAccountNameAndUID(secret *v1.Secret) (string, string) {
|
||||
if secret.Type != v1.SecretTypeServiceAccountToken {
|
||||
return "", ""
|
||||
}
|
||||
return secret.Annotations[api.ServiceAccountNameKey], secret.Annotations[api.ServiceAccountUIDKey]
|
||||
return secret.Annotations[v1.ServiceAccountNameKey], secret.Annotations[v1.ServiceAccountUIDKey]
|
||||
}
|
||||
|
||||
func getSecretReferences(serviceAccount *api.ServiceAccount) sets.String {
|
||||
func getSecretReferences(serviceAccount *v1.ServiceAccount) sets.String {
|
||||
references := sets.NewString()
|
||||
for _, secret := range serviceAccount.Secrets {
|
||||
references.Insert(secret.Name)
|
||||
@@ -693,7 +694,7 @@ type serviceAccountQueueKey struct {
|
||||
uid types.UID
|
||||
}
|
||||
|
||||
func makeServiceAccountKey(sa *api.ServiceAccount) interface{} {
|
||||
func makeServiceAccountKey(sa *v1.ServiceAccount) interface{} {
|
||||
return serviceAccountQueueKey{
|
||||
namespace: sa.Namespace,
|
||||
name: sa.Name,
|
||||
@@ -721,13 +722,13 @@ type secretQueueKey struct {
|
||||
saUID types.UID
|
||||
}
|
||||
|
||||
func makeSecretQueueKey(secret *api.Secret) interface{} {
|
||||
func makeSecretQueueKey(secret *v1.Secret) interface{} {
|
||||
return secretQueueKey{
|
||||
namespace: secret.Namespace,
|
||||
name: secret.Name,
|
||||
uid: secret.UID,
|
||||
saName: secret.Annotations[api.ServiceAccountNameKey],
|
||||
saUID: types.UID(secret.Annotations[api.ServiceAccountUIDKey]),
|
||||
saName: secret.Annotations[v1.ServiceAccountNameKey],
|
||||
saUID: types.UID(secret.Annotations[v1.ServiceAccountUIDKey]),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -27,59 +27,60 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
apierrors "k8s.io/kubernetes/pkg/api/errors"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
utilrand "k8s.io/kubernetes/pkg/util/rand"
|
||||
)
|
||||
|
||||
type testGenerator struct {
|
||||
GeneratedServiceAccounts []api.ServiceAccount
|
||||
GeneratedSecrets []api.Secret
|
||||
GeneratedServiceAccounts []v1.ServiceAccount
|
||||
GeneratedSecrets []v1.Secret
|
||||
Token string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (t *testGenerator) GenerateToken(serviceAccount api.ServiceAccount, secret api.Secret) (string, error) {
|
||||
func (t *testGenerator) GenerateToken(serviceAccount v1.ServiceAccount, secret v1.Secret) (string, error) {
|
||||
t.GeneratedSecrets = append(t.GeneratedSecrets, secret)
|
||||
t.GeneratedServiceAccounts = append(t.GeneratedServiceAccounts, serviceAccount)
|
||||
return t.Token, t.Err
|
||||
}
|
||||
|
||||
// emptySecretReferences is used by a service account without any secrets
|
||||
func emptySecretReferences() []api.ObjectReference {
|
||||
return []api.ObjectReference{}
|
||||
func emptySecretReferences() []v1.ObjectReference {
|
||||
return []v1.ObjectReference{}
|
||||
}
|
||||
|
||||
// missingSecretReferences is used by a service account that references secrets which do no exist
|
||||
func missingSecretReferences() []api.ObjectReference {
|
||||
return []api.ObjectReference{{Name: "missing-secret-1"}}
|
||||
func missingSecretReferences() []v1.ObjectReference {
|
||||
return []v1.ObjectReference{{Name: "missing-secret-1"}}
|
||||
}
|
||||
|
||||
// regularSecretReferences is used by a service account that references secrets which are not ServiceAccountTokens
|
||||
func regularSecretReferences() []api.ObjectReference {
|
||||
return []api.ObjectReference{{Name: "regular-secret-1"}}
|
||||
func regularSecretReferences() []v1.ObjectReference {
|
||||
return []v1.ObjectReference{{Name: "regular-secret-1"}}
|
||||
}
|
||||
|
||||
// tokenSecretReferences is used by a service account that references a ServiceAccountToken secret
|
||||
func tokenSecretReferences() []api.ObjectReference {
|
||||
return []api.ObjectReference{{Name: "token-secret-1"}}
|
||||
func tokenSecretReferences() []v1.ObjectReference {
|
||||
return []v1.ObjectReference{{Name: "token-secret-1"}}
|
||||
}
|
||||
|
||||
// addTokenSecretReference adds a reference to the ServiceAccountToken that will be created
|
||||
func addTokenSecretReference(refs []api.ObjectReference) []api.ObjectReference {
|
||||
func addTokenSecretReference(refs []v1.ObjectReference) []v1.ObjectReference {
|
||||
return addNamedTokenSecretReference(refs, "default-token-fplln")
|
||||
}
|
||||
|
||||
// addNamedTokenSecretReference adds a reference to the named ServiceAccountToken
|
||||
func addNamedTokenSecretReference(refs []api.ObjectReference, name string) []api.ObjectReference {
|
||||
return append(refs, api.ObjectReference{Name: name})
|
||||
func addNamedTokenSecretReference(refs []v1.ObjectReference, name string) []v1.ObjectReference {
|
||||
return append(refs, v1.ObjectReference{Name: name})
|
||||
}
|
||||
|
||||
// serviceAccount returns a service account with the given secret refs
|
||||
func serviceAccount(secretRefs []api.ObjectReference) *api.ServiceAccount {
|
||||
return &api.ServiceAccount{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func serviceAccount(secretRefs []v1.ObjectReference) *v1.ServiceAccount {
|
||||
return &v1.ServiceAccount{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "default",
|
||||
UID: "12345",
|
||||
Namespace: "default",
|
||||
@@ -90,16 +91,16 @@ func serviceAccount(secretRefs []api.ObjectReference) *api.ServiceAccount {
|
||||
}
|
||||
|
||||
// updatedServiceAccount returns a service account with the resource version modified
|
||||
func updatedServiceAccount(secretRefs []api.ObjectReference) *api.ServiceAccount {
|
||||
func updatedServiceAccount(secretRefs []v1.ObjectReference) *v1.ServiceAccount {
|
||||
sa := serviceAccount(secretRefs)
|
||||
sa.ResourceVersion = "2"
|
||||
return sa
|
||||
}
|
||||
|
||||
// opaqueSecret returns a persisted non-ServiceAccountToken secret named "regular-secret-1"
|
||||
func opaqueSecret() *api.Secret {
|
||||
return &api.Secret{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func opaqueSecret() *v1.Secret {
|
||||
return &v1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "regular-secret-1",
|
||||
Namespace: "default",
|
||||
UID: "23456",
|
||||
@@ -114,22 +115,22 @@ func opaqueSecret() *api.Secret {
|
||||
|
||||
// createdTokenSecret returns the ServiceAccountToken secret posted when creating a new token secret.
|
||||
// Named "default-token-fplln", since that is the first generated name after rand.Seed(1)
|
||||
func createdTokenSecret(overrideName ...string) *api.Secret {
|
||||
func createdTokenSecret(overrideName ...string) *v1.Secret {
|
||||
return namedCreatedTokenSecret("default-token-fplln")
|
||||
}
|
||||
|
||||
// namedTokenSecret returns the ServiceAccountToken secret posted when creating a new token secret with the given name.
|
||||
func namedCreatedTokenSecret(name string) *api.Secret {
|
||||
return &api.Secret{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func namedCreatedTokenSecret(name string) *v1.Secret {
|
||||
return &v1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: "default",
|
||||
Annotations: map[string]string{
|
||||
api.ServiceAccountNameKey: "default",
|
||||
api.ServiceAccountUIDKey: "12345",
|
||||
v1.ServiceAccountNameKey: "default",
|
||||
v1.ServiceAccountUIDKey: "12345",
|
||||
},
|
||||
},
|
||||
Type: api.SecretTypeServiceAccountToken,
|
||||
Type: v1.SecretTypeServiceAccountToken,
|
||||
Data: map[string][]byte{
|
||||
"token": []byte("ABC"),
|
||||
"ca.crt": []byte("CA Data"),
|
||||
@@ -139,19 +140,19 @@ func namedCreatedTokenSecret(name string) *api.Secret {
|
||||
}
|
||||
|
||||
// serviceAccountTokenSecret returns an existing ServiceAccountToken secret named "token-secret-1"
|
||||
func serviceAccountTokenSecret() *api.Secret {
|
||||
return &api.Secret{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func serviceAccountTokenSecret() *v1.Secret {
|
||||
return &v1.Secret{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "token-secret-1",
|
||||
Namespace: "default",
|
||||
UID: "23456",
|
||||
ResourceVersion: "1",
|
||||
Annotations: map[string]string{
|
||||
api.ServiceAccountNameKey: "default",
|
||||
api.ServiceAccountUIDKey: "12345",
|
||||
v1.ServiceAccountNameKey: "default",
|
||||
v1.ServiceAccountUIDKey: "12345",
|
||||
},
|
||||
},
|
||||
Type: api.SecretTypeServiceAccountToken,
|
||||
Type: v1.SecretTypeServiceAccountToken,
|
||||
Data: map[string][]byte{
|
||||
"token": []byte("ABC"),
|
||||
"ca.crt": []byte("CA Data"),
|
||||
@@ -161,37 +162,37 @@ func serviceAccountTokenSecret() *api.Secret {
|
||||
}
|
||||
|
||||
// serviceAccountTokenSecretWithoutTokenData returns an existing ServiceAccountToken secret that lacks token data
|
||||
func serviceAccountTokenSecretWithoutTokenData() *api.Secret {
|
||||
func serviceAccountTokenSecretWithoutTokenData() *v1.Secret {
|
||||
secret := serviceAccountTokenSecret()
|
||||
delete(secret.Data, api.ServiceAccountTokenKey)
|
||||
delete(secret.Data, v1.ServiceAccountTokenKey)
|
||||
return secret
|
||||
}
|
||||
|
||||
// serviceAccountTokenSecretWithoutCAData returns an existing ServiceAccountToken secret that lacks ca data
|
||||
func serviceAccountTokenSecretWithoutCAData() *api.Secret {
|
||||
func serviceAccountTokenSecretWithoutCAData() *v1.Secret {
|
||||
secret := serviceAccountTokenSecret()
|
||||
delete(secret.Data, api.ServiceAccountRootCAKey)
|
||||
delete(secret.Data, v1.ServiceAccountRootCAKey)
|
||||
return secret
|
||||
}
|
||||
|
||||
// serviceAccountTokenSecretWithCAData returns an existing ServiceAccountToken secret with the specified ca data
|
||||
func serviceAccountTokenSecretWithCAData(data []byte) *api.Secret {
|
||||
func serviceAccountTokenSecretWithCAData(data []byte) *v1.Secret {
|
||||
secret := serviceAccountTokenSecret()
|
||||
secret.Data[api.ServiceAccountRootCAKey] = data
|
||||
secret.Data[v1.ServiceAccountRootCAKey] = data
|
||||
return secret
|
||||
}
|
||||
|
||||
// serviceAccountTokenSecretWithoutNamespaceData returns an existing ServiceAccountToken secret that lacks namespace data
|
||||
func serviceAccountTokenSecretWithoutNamespaceData() *api.Secret {
|
||||
func serviceAccountTokenSecretWithoutNamespaceData() *v1.Secret {
|
||||
secret := serviceAccountTokenSecret()
|
||||
delete(secret.Data, api.ServiceAccountNamespaceKey)
|
||||
delete(secret.Data, v1.ServiceAccountNamespaceKey)
|
||||
return secret
|
||||
}
|
||||
|
||||
// serviceAccountTokenSecretWithNamespaceData returns an existing ServiceAccountToken secret with the specified namespace data
|
||||
func serviceAccountTokenSecretWithNamespaceData(data []byte) *api.Secret {
|
||||
func serviceAccountTokenSecretWithNamespaceData(data []byte) *v1.Secret {
|
||||
secret := serviceAccountTokenSecret()
|
||||
secret.Data[api.ServiceAccountNamespaceKey] = data
|
||||
secret.Data[v1.ServiceAccountNamespaceKey] = data
|
||||
return secret
|
||||
}
|
||||
|
||||
@@ -210,15 +211,15 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
Reactors []reaction
|
||||
|
||||
ExistingServiceAccount *api.ServiceAccount
|
||||
ExistingSecrets []*api.Secret
|
||||
ExistingServiceAccount *v1.ServiceAccount
|
||||
ExistingSecrets []*v1.Secret
|
||||
|
||||
AddedServiceAccount *api.ServiceAccount
|
||||
UpdatedServiceAccount *api.ServiceAccount
|
||||
DeletedServiceAccount *api.ServiceAccount
|
||||
AddedSecret *api.Secret
|
||||
UpdatedSecret *api.Secret
|
||||
DeletedSecret *api.Secret
|
||||
AddedServiceAccount *v1.ServiceAccount
|
||||
UpdatedServiceAccount *v1.ServiceAccount
|
||||
DeletedServiceAccount *v1.ServiceAccount
|
||||
AddedSecret *v1.Secret
|
||||
UpdatedSecret *v1.Secret
|
||||
DeletedSecret *v1.Secret
|
||||
|
||||
ExpectedActions []core.Action
|
||||
}{
|
||||
@@ -227,9 +228,9 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
AddedServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))),
|
||||
},
|
||||
},
|
||||
"new serviceaccount with no secrets encountering create error": {
|
||||
@@ -253,17 +254,17 @@ func TestTokenCreation(t *testing.T) {
|
||||
AddedServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
ExpectedActions: []core.Action{
|
||||
// Attempt 1
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()),
|
||||
|
||||
// Attempt 2
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, namedCreatedTokenSecret("default-token-gziey")),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, namedCreatedTokenSecret("default-token-gziey")),
|
||||
|
||||
// Attempt 3
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, namedCreatedTokenSecret("default-token-oh43e")),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addNamedTokenSecretReference(emptySecretReferences(), "default-token-oh43e"))),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, namedCreatedTokenSecret("default-token-oh43e")),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addNamedTokenSecretReference(emptySecretReferences(), "default-token-oh43e"))),
|
||||
},
|
||||
},
|
||||
"new serviceaccount with no secrets encountering unending create error": {
|
||||
@@ -283,14 +284,14 @@ func TestTokenCreation(t *testing.T) {
|
||||
AddedServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
ExpectedActions: []core.Action{
|
||||
// Attempt
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()),
|
||||
// Retry 1
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, namedCreatedTokenSecret("default-token-gziey")),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, namedCreatedTokenSecret("default-token-gziey")),
|
||||
// Retry 2
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, namedCreatedTokenSecret("default-token-oh43e")),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, namedCreatedTokenSecret("default-token-oh43e")),
|
||||
},
|
||||
},
|
||||
"new serviceaccount with missing secrets": {
|
||||
@@ -298,9 +299,9 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
AddedServiceAccount: serviceAccount(missingSecretReferences()),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(missingSecretReferences()))),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addTokenSecretReference(missingSecretReferences()))),
|
||||
},
|
||||
},
|
||||
"new serviceaccount with non-token secrets": {
|
||||
@@ -308,14 +309,14 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
AddedServiceAccount: serviceAccount(regularSecretReferences()),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(regularSecretReferences()))),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addTokenSecretReference(regularSecretReferences()))),
|
||||
},
|
||||
},
|
||||
"new serviceaccount with token secrets": {
|
||||
ClientObjects: []runtime.Object{serviceAccount(tokenSecretReferences()), serviceAccountTokenSecret()},
|
||||
ExistingSecrets: []*api.Secret{serviceAccountTokenSecret()},
|
||||
ExistingSecrets: []*v1.Secret{serviceAccountTokenSecret()},
|
||||
|
||||
AddedServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
ExpectedActions: []core.Action{},
|
||||
@@ -325,7 +326,7 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
AddedServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
|
||||
},
|
||||
},
|
||||
"updated serviceaccount with no secrets": {
|
||||
@@ -333,9 +334,9 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
UpdatedServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addTokenSecretReference(emptySecretReferences()))),
|
||||
},
|
||||
},
|
||||
"updated serviceaccount with missing secrets": {
|
||||
@@ -343,9 +344,9 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
UpdatedServiceAccount: serviceAccount(missingSecretReferences()),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(missingSecretReferences()))),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addTokenSecretReference(missingSecretReferences()))),
|
||||
},
|
||||
},
|
||||
"updated serviceaccount with non-token secrets": {
|
||||
@@ -353,13 +354,13 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
UpdatedServiceAccount: serviceAccount(regularSecretReferences()),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(addTokenSecretReference(regularSecretReferences()))),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
|
||||
core.NewCreateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, createdTokenSecret()),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(addTokenSecretReference(regularSecretReferences()))),
|
||||
},
|
||||
},
|
||||
"updated serviceaccount with token secrets": {
|
||||
ExistingSecrets: []*api.Secret{serviceAccountTokenSecret()},
|
||||
ExistingSecrets: []*v1.Secret{serviceAccountTokenSecret()},
|
||||
|
||||
UpdatedServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
ExpectedActions: []core.Action{},
|
||||
@@ -369,7 +370,7 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
UpdatedServiceAccount: serviceAccount(emptySecretReferences()),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
|
||||
},
|
||||
},
|
||||
|
||||
@@ -389,11 +390,11 @@ func TestTokenCreation(t *testing.T) {
|
||||
},
|
||||
"deleted serviceaccount with token secrets": {
|
||||
ClientObjects: []runtime.Object{serviceAccountTokenSecret()},
|
||||
ExistingSecrets: []*api.Secret{serviceAccountTokenSecret()},
|
||||
ExistingSecrets: []*v1.Secret{serviceAccountTokenSecret()},
|
||||
|
||||
DeletedServiceAccount: serviceAccount(tokenSecretReferences()),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewDeleteAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"),
|
||||
core.NewDeleteAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
|
||||
},
|
||||
},
|
||||
|
||||
@@ -402,8 +403,8 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
AddedSecret: serviceAccountTokenSecret(),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"),
|
||||
core.NewDeleteAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
|
||||
core.NewDeleteAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
|
||||
},
|
||||
},
|
||||
"added secret with serviceaccount": {
|
||||
@@ -418,8 +419,8 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
AddedSecret: serviceAccountTokenSecretWithoutTokenData(),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
},
|
||||
},
|
||||
"added token secret without ca data": {
|
||||
@@ -428,8 +429,8 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
AddedSecret: serviceAccountTokenSecretWithoutCAData(),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
},
|
||||
},
|
||||
"added token secret with mismatched ca data": {
|
||||
@@ -438,8 +439,8 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
AddedSecret: serviceAccountTokenSecretWithCAData([]byte("mismatched")),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
},
|
||||
},
|
||||
"added token secret without namespace data": {
|
||||
@@ -448,8 +449,8 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
AddedSecret: serviceAccountTokenSecretWithoutNamespaceData(),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
},
|
||||
},
|
||||
"added token secret with custom namespace data": {
|
||||
@@ -467,8 +468,8 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
UpdatedSecret: serviceAccountTokenSecret(),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"),
|
||||
core.NewDeleteAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
|
||||
core.NewDeleteAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
|
||||
},
|
||||
},
|
||||
"updated secret with serviceaccount": {
|
||||
@@ -483,8 +484,8 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
UpdatedSecret: serviceAccountTokenSecretWithoutTokenData(),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
},
|
||||
},
|
||||
"updated token secret without ca data": {
|
||||
@@ -493,8 +494,8 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
UpdatedSecret: serviceAccountTokenSecretWithoutCAData(),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
},
|
||||
},
|
||||
"updated token secret with mismatched ca data": {
|
||||
@@ -503,8 +504,8 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
UpdatedSecret: serviceAccountTokenSecretWithCAData([]byte("mismatched")),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
},
|
||||
},
|
||||
"updated token secret without namespace data": {
|
||||
@@ -513,8 +514,8 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
UpdatedSecret: serviceAccountTokenSecretWithoutNamespaceData(),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, "token-secret-1"),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "secrets"}, api.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, "token-secret-1"),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "secrets"}, v1.NamespaceDefault, serviceAccountTokenSecret()),
|
||||
},
|
||||
},
|
||||
"updated token secret with custom namespace data": {
|
||||
@@ -537,8 +538,8 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
DeletedSecret: serviceAccountTokenSecret(),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, serviceAccount(emptySecretReferences())),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
|
||||
core.NewUpdateAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, serviceAccount(emptySecretReferences())),
|
||||
},
|
||||
},
|
||||
"deleted secret with serviceaccount without reference": {
|
||||
@@ -546,7 +547,7 @@ func TestTokenCreation(t *testing.T) {
|
||||
|
||||
DeletedSecret: serviceAccountTokenSecret(),
|
||||
ExpectedActions: []core.Action{
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Resource: "serviceaccounts"}, api.NamespaceDefault, "default"),
|
||||
core.NewGetAction(unversioned.GroupVersionResource{Version: "v1", Resource: "serviceaccounts"}, v1.NamespaceDefault, "default"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -25,8 +25,9 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
kcache "k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
@@ -69,7 +70,7 @@ type AttachDetachController interface {
|
||||
|
||||
// NewAttachDetachController returns a new instance of AttachDetachController.
|
||||
func NewAttachDetachController(
|
||||
kubeClient internalclientset.Interface,
|
||||
kubeClient clientset.Interface,
|
||||
podInformer kcache.SharedInformer,
|
||||
nodeInformer kcache.SharedInformer,
|
||||
pvcInformer kcache.SharedInformer,
|
||||
@@ -144,7 +145,7 @@ func NewAttachDetachController(
|
||||
type attachDetachController struct {
|
||||
// kubeClient is the kube API client used by volumehost to communicate with
|
||||
// the API server.
|
||||
kubeClient internalclientset.Interface
|
||||
kubeClient clientset.Interface
|
||||
|
||||
// pvcInformer is the shared PVC informer used to fetch and store PVC
|
||||
// objects from the API server. It is shared with other controllers and
|
||||
@@ -210,7 +211,7 @@ func (adc *attachDetachController) Run(stopCh <-chan struct{}) {
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) podAdd(obj interface{}) {
|
||||
pod, ok := obj.(*api.Pod)
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if pod == nil || !ok {
|
||||
return
|
||||
}
|
||||
@@ -229,7 +230,7 @@ func (adc *attachDetachController) podUpdate(oldObj, newObj interface{}) {
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) podDelete(obj interface{}) {
|
||||
pod, ok := obj.(*api.Pod)
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
if pod == nil || !ok {
|
||||
return
|
||||
}
|
||||
@@ -238,7 +239,7 @@ func (adc *attachDetachController) podDelete(obj interface{}) {
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) nodeAdd(obj interface{}) {
|
||||
node, ok := obj.(*api.Node)
|
||||
node, ok := obj.(*v1.Node)
|
||||
if node == nil || !ok {
|
||||
return
|
||||
}
|
||||
@@ -259,7 +260,7 @@ func (adc *attachDetachController) nodeUpdate(oldObj, newObj interface{}) {
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) nodeDelete(obj interface{}) {
|
||||
node, ok := obj.(*api.Node)
|
||||
node, ok := obj.(*v1.Node)
|
||||
if node == nil || !ok {
|
||||
return
|
||||
}
|
||||
@@ -275,7 +276,7 @@ func (adc *attachDetachController) nodeDelete(obj interface{}) {
|
||||
// processPodVolumes processes the volumes in the given pod and adds them to the
|
||||
// desired state of the world if addVolumes is true, otherwise it removes them.
|
||||
func (adc *attachDetachController) processPodVolumes(
|
||||
pod *api.Pod, addVolumes bool) {
|
||||
pod *v1.Pod, addVolumes bool) {
|
||||
if pod == nil {
|
||||
return
|
||||
}
|
||||
@@ -363,7 +364,7 @@ func (adc *attachDetachController) processPodVolumes(
|
||||
// createVolumeSpec creates and returns a mutatable volume.Spec object for the
|
||||
// specified volume. It dereference any PVC to get PV objects, if needed.
|
||||
func (adc *attachDetachController) createVolumeSpec(
|
||||
podVolume api.Volume, podNamespace string) (*volume.Spec, error) {
|
||||
podVolume v1.Volume, podNamespace string) (*volume.Spec, error) {
|
||||
if pvcSource := podVolume.VolumeSource.PersistentVolumeClaim; pvcSource != nil {
|
||||
glog.V(10).Infof(
|
||||
"Found PVC, ClaimName: %q/%q",
|
||||
@@ -418,9 +419,9 @@ func (adc *attachDetachController) createVolumeSpec(
|
||||
"failed to deep copy %q volume object. err=%v", podVolume.Name, err)
|
||||
}
|
||||
|
||||
clonedPodVolume, ok := clonedPodVolumeObj.(api.Volume)
|
||||
clonedPodVolume, ok := clonedPodVolumeObj.(v1.Volume)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("failed to cast clonedPodVolume %#v to api.Volume", clonedPodVolumeObj)
|
||||
return nil, fmt.Errorf("failed to cast clonedPodVolume %#v to v1.Volume", clonedPodVolumeObj)
|
||||
}
|
||||
|
||||
return volume.NewSpecFromVolume(&clonedPodVolume), nil
|
||||
@@ -447,7 +448,7 @@ func (adc *attachDetachController) getPVCFromCacheExtractPV(
|
||||
err)
|
||||
}
|
||||
|
||||
pvc, ok := pvcObj.(*api.PersistentVolumeClaim)
|
||||
pvc, ok := pvcObj.(*v1.PersistentVolumeClaim)
|
||||
if !ok || pvc == nil {
|
||||
return "", "", fmt.Errorf(
|
||||
"failed to cast %q object %#v to PersistentVolumeClaim",
|
||||
@@ -455,7 +456,7 @@ func (adc *attachDetachController) getPVCFromCacheExtractPV(
|
||||
pvcObj)
|
||||
}
|
||||
|
||||
if pvc.Status.Phase != api.ClaimBound || pvc.Spec.VolumeName == "" {
|
||||
if pvc.Status.Phase != v1.ClaimBound || pvc.Spec.VolumeName == "" {
|
||||
return "", "", fmt.Errorf(
|
||||
"PVC %q has non-bound phase (%q) or empty pvc.Spec.VolumeName (%q)",
|
||||
key,
|
||||
@@ -482,7 +483,7 @@ func (adc *attachDetachController) getPVSpecFromCache(
|
||||
"failed to find PV %q in PVInformer cache. %v", name, err)
|
||||
}
|
||||
|
||||
pv, ok := pvObj.(*api.PersistentVolume)
|
||||
pv, ok := pvObj.(*v1.PersistentVolume)
|
||||
if !ok || pv == nil {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to cast %q object %#v to PersistentVolume", name, pvObj)
|
||||
@@ -510,7 +511,7 @@ func (adc *attachDetachController) getPVSpecFromCache(
|
||||
"failed to deep copy %q PV object. err=%v", name, err)
|
||||
}
|
||||
|
||||
clonedPV, ok := clonedPVObj.(api.PersistentVolume)
|
||||
clonedPV, ok := clonedPVObj.(v1.PersistentVolume)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf(
|
||||
"failed to cast %q clonedPV %#v to PersistentVolume", name, pvObj)
|
||||
@@ -524,7 +525,7 @@ func (adc *attachDetachController) getPVSpecFromCache(
|
||||
// corresponding volume in the actual state of the world to indicate that it is
|
||||
// mounted.
|
||||
func (adc *attachDetachController) processVolumesInUse(
|
||||
nodeName types.NodeName, volumesInUse []api.UniqueVolumeName) {
|
||||
nodeName types.NodeName, volumesInUse []v1.UniqueVolumeName) {
|
||||
glog.V(4).Infof("processVolumesInUse for node %q", nodeName)
|
||||
for _, attachedVolume := range adc.actualStateOfWorld.GetAttachedVolumesForNode(nodeName) {
|
||||
mounted := false
|
||||
@@ -562,11 +563,11 @@ func (adc *attachDetachController) GetPodPluginDir(podUID types.UID, pluginName
|
||||
return ""
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) GetKubeClient() internalclientset.Interface {
|
||||
func (adc *attachDetachController) GetKubeClient() clientset.Interface {
|
||||
return adc.kubeClient
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) NewWrapperMounter(volName string, spec volume.Spec, pod *api.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||
func (adc *attachDetachController) NewWrapperMounter(volName string, spec volume.Spec, pod *v1.Pod, opts volume.VolumeOptions) (volume.Mounter, error) {
|
||||
return nil, fmt.Errorf("NewWrapperMounter not supported by Attach/Detach controller's VolumeHost implementation")
|
||||
}
|
||||
|
||||
@@ -594,6 +595,6 @@ func (adc *attachDetachController) GetHostIP() (net.IP, error) {
|
||||
return nil, fmt.Errorf("GetHostIP() not supported by Attach/Detach controller's VolumeHost implementation")
|
||||
}
|
||||
|
||||
func (adc *attachDetachController) GetNodeAllocatable() (api.ResourceList, error) {
|
||||
return api.ResourceList{}, nil
|
||||
func (adc *attachDetachController) GetNodeAllocatable() (v1.ResourceList, error) {
|
||||
return v1.ResourceList{}, nil
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
@@ -56,7 +56,7 @@ type ActualStateOfWorld interface {
|
||||
// added.
|
||||
// If no node with the name nodeName exists in list of attached nodes for
|
||||
// the specified volume, the node is added.
|
||||
AddVolumeNode(volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) (api.UniqueVolumeName, error)
|
||||
AddVolumeNode(volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) (v1.UniqueVolumeName, error)
|
||||
|
||||
// SetVolumeMountedByNode sets the MountedByNode value for the given volume
|
||||
// and node. When set to true this value indicates the volume is mounted by
|
||||
@@ -65,7 +65,7 @@ type ActualStateOfWorld interface {
|
||||
// returned.
|
||||
// If no node with the name nodeName exists in list of attached nodes for
|
||||
// the specified volume, an error is returned.
|
||||
SetVolumeMountedByNode(volumeName api.UniqueVolumeName, nodeName types.NodeName, mounted bool) error
|
||||
SetVolumeMountedByNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error
|
||||
|
||||
// SetNodeStatusUpdateNeeded sets statusUpdateNeeded for the specified
|
||||
// node to true indicating the AttachedVolume field in the Node's Status
|
||||
@@ -76,12 +76,12 @@ type ActualStateOfWorld interface {
|
||||
|
||||
// ResetDetachRequestTime resets the detachRequestTime to 0 which indicates there is no detach
|
||||
// request any more for the volume
|
||||
ResetDetachRequestTime(volumeName api.UniqueVolumeName, nodeName types.NodeName)
|
||||
ResetDetachRequestTime(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
|
||||
|
||||
// SetDetachRequestTime sets the detachRequestedTime to current time if this is no
|
||||
// previous request (the previous detachRequestedTime is zero) and return the time elapsed
|
||||
// since last request
|
||||
SetDetachRequestTime(volumeName api.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error)
|
||||
SetDetachRequestTime(volumeName v1.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error)
|
||||
|
||||
// DeleteVolumeNode removes the given volume and node from the underlying
|
||||
// store indicating the specified volume is no longer attached to the
|
||||
@@ -89,12 +89,12 @@ type ActualStateOfWorld interface {
|
||||
// If the volume/node combo does not exist, this is a no-op.
|
||||
// If after deleting the node, the specified volume contains no other child
|
||||
// nodes, the volume is also deleted.
|
||||
DeleteVolumeNode(volumeName api.UniqueVolumeName, nodeName types.NodeName)
|
||||
DeleteVolumeNode(volumeName v1.UniqueVolumeName, nodeName types.NodeName)
|
||||
|
||||
// VolumeNodeExists returns true if the specified volume/node combo exists
|
||||
// in the underlying store indicating the specified volume is attached to
|
||||
// the specified node.
|
||||
VolumeNodeExists(volumeName api.UniqueVolumeName, nodeName types.NodeName) bool
|
||||
VolumeNodeExists(volumeName v1.UniqueVolumeName, nodeName types.NodeName) bool
|
||||
|
||||
// GetAttachedVolumes generates and returns a list of volumes/node pairs
|
||||
// reflecting which volumes are attached to which nodes based on the
|
||||
@@ -115,7 +115,7 @@ type ActualStateOfWorld interface {
|
||||
// this may differ from the actual list of attached volumes for the node
|
||||
// since volumes should be removed from this list as soon a detach operation
|
||||
// is considered, before the detach operation is triggered).
|
||||
GetVolumesToReportAttached() map[types.NodeName][]api.AttachedVolume
|
||||
GetVolumesToReportAttached() map[types.NodeName][]v1.AttachedVolume
|
||||
}
|
||||
|
||||
// AttachedVolume represents a volume that is attached to a node.
|
||||
@@ -138,7 +138,7 @@ type AttachedVolume struct {
|
||||
// NewActualStateOfWorld returns a new instance of ActualStateOfWorld.
|
||||
func NewActualStateOfWorld(volumePluginMgr *volume.VolumePluginMgr) ActualStateOfWorld {
|
||||
return &actualStateOfWorld{
|
||||
attachedVolumes: make(map[api.UniqueVolumeName]attachedVolume),
|
||||
attachedVolumes: make(map[v1.UniqueVolumeName]attachedVolume),
|
||||
nodesToUpdateStatusFor: make(map[types.NodeName]nodeToUpdateStatusFor),
|
||||
volumePluginMgr: volumePluginMgr,
|
||||
}
|
||||
@@ -149,7 +149,7 @@ type actualStateOfWorld struct {
|
||||
// controller believes to be successfully attached to the nodes it is
|
||||
// managing. The key in this map is the name of the volume and the value is
|
||||
// an object containing more information about the attached volume.
|
||||
attachedVolumes map[api.UniqueVolumeName]attachedVolume
|
||||
attachedVolumes map[v1.UniqueVolumeName]attachedVolume
|
||||
|
||||
// nodesToUpdateStatusFor is a map containing the set of nodes for which to
|
||||
// update the VolumesAttached Status field. The key in this map is the name
|
||||
@@ -168,7 +168,7 @@ type actualStateOfWorld struct {
|
||||
// believes to be successfully attached to a node it is managing.
|
||||
type attachedVolume struct {
|
||||
// volumeName contains the unique identifier for this volume.
|
||||
volumeName api.UniqueVolumeName
|
||||
volumeName v1.UniqueVolumeName
|
||||
|
||||
// spec is the volume spec containing the specification for this volume.
|
||||
// Used to generate the volume plugin object, and passed to attach/detach
|
||||
@@ -223,36 +223,36 @@ type nodeToUpdateStatusFor struct {
|
||||
// actual list of attached volumes since volumes should be removed from this
|
||||
// list as soon a detach operation is considered, before the detach
|
||||
// operation is triggered).
|
||||
volumesToReportAsAttached map[api.UniqueVolumeName]api.UniqueVolumeName
|
||||
volumesToReportAsAttached map[v1.UniqueVolumeName]v1.UniqueVolumeName
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) MarkVolumeAsAttached(
|
||||
_ api.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error {
|
||||
_ v1.UniqueVolumeName, volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) error {
|
||||
_, err := asw.AddVolumeNode(volumeSpec, nodeName, devicePath)
|
||||
return err
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) MarkVolumeAsDetached(
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) {
|
||||
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
|
||||
asw.DeleteVolumeNode(volumeName, nodeName)
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) RemoveVolumeFromReportAsAttached(
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) error {
|
||||
volumeName v1.UniqueVolumeName, nodeName types.NodeName) error {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
return asw.removeVolumeFromReportAsAttached(volumeName, nodeName)
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) AddVolumeToReportAsAttached(
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) {
|
||||
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
asw.addVolumeToReportAsAttached(volumeName, nodeName)
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) AddVolumeNode(
|
||||
volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) (api.UniqueVolumeName, error) {
|
||||
volumeSpec *volume.Spec, nodeName types.NodeName, devicePath string) (v1.UniqueVolumeName, error) {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
|
||||
@@ -313,7 +313,7 @@ func (asw *actualStateOfWorld) AddVolumeNode(
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) SetVolumeMountedByNode(
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName, mounted bool) error {
|
||||
volumeName v1.UniqueVolumeName, nodeName types.NodeName, mounted bool) error {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
|
||||
@@ -342,7 +342,7 @@ func (asw *actualStateOfWorld) SetVolumeMountedByNode(
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) ResetDetachRequestTime(
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) {
|
||||
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
|
||||
@@ -356,7 +356,7 @@ func (asw *actualStateOfWorld) ResetDetachRequestTime(
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) SetDetachRequestTime(
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error) {
|
||||
volumeName v1.UniqueVolumeName, nodeName types.NodeName) (time.Duration, error) {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
|
||||
@@ -378,7 +378,7 @@ func (asw *actualStateOfWorld) SetDetachRequestTime(
|
||||
// Get the volume and node object from actual state of world
|
||||
// This is an internal function and caller should acquire and release the lock
|
||||
func (asw *actualStateOfWorld) getNodeAndVolume(
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) (attachedVolume, nodeAttachedTo, error) {
|
||||
volumeName v1.UniqueVolumeName, nodeName types.NodeName) (attachedVolume, nodeAttachedTo, error) {
|
||||
|
||||
volumeObj, volumeExists := asw.attachedVolumes[volumeName]
|
||||
if volumeExists {
|
||||
@@ -396,7 +396,7 @@ func (asw *actualStateOfWorld) getNodeAndVolume(
|
||||
// Remove the volumeName from the node's volumesToReportAsAttached list
|
||||
// This is an internal function and caller should acquire and release the lock
|
||||
func (asw *actualStateOfWorld) removeVolumeFromReportAsAttached(
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) error {
|
||||
volumeName v1.UniqueVolumeName, nodeName types.NodeName) error {
|
||||
|
||||
nodeToUpdate, nodeToUpdateExists := asw.nodesToUpdateStatusFor[nodeName]
|
||||
if nodeToUpdateExists {
|
||||
@@ -418,7 +418,7 @@ func (asw *actualStateOfWorld) removeVolumeFromReportAsAttached(
|
||||
// Add the volumeName to the node's volumesToReportAsAttached list
|
||||
// This is an internal function and caller should acquire and release the lock
|
||||
func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) {
|
||||
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
|
||||
// In case the volume/node entry is no longer in attachedVolume list, skip the rest
|
||||
if _, _, err := asw.getNodeAndVolume(volumeName, nodeName); err != nil {
|
||||
glog.V(4).Infof("Volume %q is no longer attached to node %q", volumeName, nodeName)
|
||||
@@ -430,7 +430,7 @@ func (asw *actualStateOfWorld) addVolumeToReportAsAttached(
|
||||
nodeToUpdate = nodeToUpdateStatusFor{
|
||||
nodeName: nodeName,
|
||||
statusUpdateNeeded: true,
|
||||
volumesToReportAsAttached: make(map[api.UniqueVolumeName]api.UniqueVolumeName),
|
||||
volumesToReportAsAttached: make(map[v1.UniqueVolumeName]v1.UniqueVolumeName),
|
||||
}
|
||||
asw.nodesToUpdateStatusFor[nodeName] = nodeToUpdate
|
||||
glog.V(4).Infof("Add new node %q to nodesToUpdateStatusFor", nodeName)
|
||||
@@ -470,7 +470,7 @@ func (asw *actualStateOfWorld) SetNodeStatusUpdateNeeded(nodeName types.NodeName
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) DeleteVolumeNode(
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) {
|
||||
volumeName v1.UniqueVolumeName, nodeName types.NodeName) {
|
||||
asw.Lock()
|
||||
defer asw.Unlock()
|
||||
|
||||
@@ -493,7 +493,7 @@ func (asw *actualStateOfWorld) DeleteVolumeNode(
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) VolumeNodeExists(
|
||||
volumeName api.UniqueVolumeName, nodeName types.NodeName) bool {
|
||||
volumeName v1.UniqueVolumeName, nodeName types.NodeName) bool {
|
||||
asw.RLock()
|
||||
defer asw.RUnlock()
|
||||
|
||||
@@ -562,19 +562,19 @@ func (asw *actualStateOfWorld) GetAttachedVolumesPerNode() map[types.NodeName][]
|
||||
return attachedVolumesPerNode
|
||||
}
|
||||
|
||||
func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[types.NodeName][]api.AttachedVolume {
|
||||
func (asw *actualStateOfWorld) GetVolumesToReportAttached() map[types.NodeName][]v1.AttachedVolume {
|
||||
asw.RLock()
|
||||
defer asw.RUnlock()
|
||||
|
||||
volumesToReportAttached := make(map[types.NodeName][]api.AttachedVolume)
|
||||
volumesToReportAttached := make(map[types.NodeName][]v1.AttachedVolume)
|
||||
for nodeName, nodeToUpdateObj := range asw.nodesToUpdateStatusFor {
|
||||
if nodeToUpdateObj.statusUpdateNeeded {
|
||||
attachedVolumes := make(
|
||||
[]api.AttachedVolume,
|
||||
[]v1.AttachedVolume,
|
||||
len(nodeToUpdateObj.volumesToReportAsAttached) /* len */)
|
||||
i := 0
|
||||
for _, volume := range nodeToUpdateObj.volumesToReportAsAttached {
|
||||
attachedVolumes[i] = api.AttachedVolume{
|
||||
attachedVolumes[i] = v1.AttachedVolume{
|
||||
Name: volume,
|
||||
DevicePath: asw.attachedVolumes[volume].devicePath,
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||
@@ -32,7 +32,7 @@ func Test_AddVolumeNode_Positive_NewVolumeNewNode(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
|
||||
nodeName := types.NodeName("node-name")
|
||||
@@ -65,7 +65,7 @@ func Test_AddVolumeNode_Positive_ExistingVolumeNewNode(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
node1Name := types.NodeName("node1-name")
|
||||
node2Name := types.NodeName("node2-name")
|
||||
@@ -115,7 +115,7 @@ func Test_AddVolumeNode_Positive_ExistingVolumeExistingNode(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -159,7 +159,7 @@ func Test_DeleteVolumeNode_Positive_VolumeExistsNodeExists(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -189,7 +189,7 @@ func Test_DeleteVolumeNode_Positive_VolumeDoesntExistNodeDoesntExist(t *testing.
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
nodeName := types.NodeName("node-name")
|
||||
|
||||
// Act
|
||||
@@ -215,7 +215,7 @@ func Test_DeleteVolumeNode_Positive_TwoNodesOneDeleted(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
node1Name := types.NodeName("node1-name")
|
||||
node2Name := types.NodeName("node2-name")
|
||||
@@ -264,7 +264,7 @@ func Test_VolumeNodeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -296,7 +296,7 @@ func Test_VolumeNodeExists_Positive_VolumeExistsNodeDoesntExist(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
node1Name := types.NodeName("node1-name")
|
||||
node2Name := types.NodeName("node2-name")
|
||||
@@ -328,7 +328,7 @@ func Test_VolumeNodeExists_Positive_VolumeAndNodeDontExist(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
nodeName := types.NodeName("node-name")
|
||||
|
||||
// Act
|
||||
@@ -368,7 +368,7 @@ func Test_GetAttachedVolumes_Positive_OneVolumeOneNode(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -395,7 +395,7 @@ func Test_GetAttachedVolumes_Positive_TwoVolumeTwoNodes(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||
volume1Name := v1.UniqueVolumeName("volume1-name")
|
||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||
node1Name := types.NodeName("node1-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -403,7 +403,7 @@ func Test_GetAttachedVolumes_Positive_TwoVolumeTwoNodes(t *testing.T) {
|
||||
if add1Err != nil {
|
||||
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
|
||||
}
|
||||
volume2Name := api.UniqueVolumeName("volume2-name")
|
||||
volume2Name := v1.UniqueVolumeName("volume2-name")
|
||||
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
||||
node2Name := types.NodeName("node2-name")
|
||||
generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Spec, node2Name, devicePath)
|
||||
@@ -430,7 +430,7 @@ func Test_GetAttachedVolumes_Positive_OneVolumeTwoNodes(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
node1Name := types.NodeName("node1-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -469,7 +469,7 @@ func Test_SetVolumeMountedByNode_Positive_Set(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -496,7 +496,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSet(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -532,7 +532,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithoutInitialSet(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -565,7 +565,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetAddVolumeNodeNotRes
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -606,7 +606,7 @@ func Test_SetVolumeMountedByNode_Positive_UnsetWithInitialSetVerifyDetachRequest
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -653,7 +653,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_Set(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
devicePath := "fake/device/path"
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := types.NodeName("node-name")
|
||||
@@ -680,7 +680,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_Marked(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -716,7 +716,7 @@ func Test_MarkDesireToDetach_Positive_MarkedAddVolumeNodeReset(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -759,7 +759,7 @@ func Test_RemoveVolumeFromReportAsAttached_Positive_UnsetWithInitialSetVolumeMou
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -802,7 +802,7 @@ func Test_RemoveVolumeFromReportAsAttached(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -835,7 +835,7 @@ func Test_RemoveVolumeFromReportAsAttached_AddVolumeToReportAsAttached_Positive(
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -878,7 +878,7 @@ func Test_RemoveVolumeFromReportAsAttached_Delete_AddVolumeNode(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -923,7 +923,7 @@ func Test_SetDetachRequestTime_Positive(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -970,7 +970,7 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeOneNode(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := types.NodeName("node-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -994,7 +994,7 @@ func Test_GetAttachedVolumesForNode_Positive_TwoVolumeTwoNodes(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||
volume1Name := v1.UniqueVolumeName("volume1-name")
|
||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||
node1Name := types.NodeName("node1-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -1002,7 +1002,7 @@ func Test_GetAttachedVolumesForNode_Positive_TwoVolumeTwoNodes(t *testing.T) {
|
||||
if add1Err != nil {
|
||||
t.Fatalf("AddVolumeNode failed. Expected: <no error> Actual: <%v>", add1Err)
|
||||
}
|
||||
volume2Name := api.UniqueVolumeName("volume2-name")
|
||||
volume2Name := v1.UniqueVolumeName("volume2-name")
|
||||
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
||||
node2Name := types.NodeName("node2-name")
|
||||
generatedVolumeName2, add2Err := asw.AddVolumeNode(volume2Spec, node2Name, devicePath)
|
||||
@@ -1025,7 +1025,7 @@ func Test_GetAttachedVolumesForNode_Positive_OneVolumeTwoNodes(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
node1Name := types.NodeName("node1-name")
|
||||
devicePath := "fake/device/path"
|
||||
@@ -1061,7 +1061,7 @@ func Test_OneVolumeTwoNodes_TwoDevicePaths(t *testing.T) {
|
||||
// Arrange
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
asw := NewActualStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
node1Name := types.NodeName("node1-name")
|
||||
devicePath1 := "fake/device/path1"
|
||||
@@ -1097,7 +1097,7 @@ func Test_OneVolumeTwoNodes_TwoDevicePaths(t *testing.T) {
|
||||
func verifyAttachedVolume(
|
||||
t *testing.T,
|
||||
attachedVolumes []AttachedVolume,
|
||||
expectedVolumeName api.UniqueVolumeName,
|
||||
expectedVolumeName v1.UniqueVolumeName,
|
||||
expectedVolumeSpecName string,
|
||||
expectedNodeName types.NodeName,
|
||||
expectedDevicePath string,
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
k8stypes "k8s.io/kubernetes/pkg/types"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
|
||||
@@ -58,7 +58,7 @@ type DesiredStateOfWorld interface {
|
||||
// should be attached to the specified node, the volume is implicitly added.
|
||||
// If no node with the name nodeName exists in list of nodes managed by the
|
||||
// attach/detach attached controller, an error is returned.
|
||||
AddPod(podName types.UniquePodName, pod *api.Pod, volumeSpec *volume.Spec, nodeName k8stypes.NodeName) (api.UniqueVolumeName, error)
|
||||
AddPod(podName types.UniquePodName, pod *v1.Pod, volumeSpec *volume.Spec, nodeName k8stypes.NodeName) (v1.UniqueVolumeName, error)
|
||||
|
||||
// DeleteNode removes the given node from the list of nodes managed by the
|
||||
// attach/detach controller.
|
||||
@@ -76,7 +76,7 @@ type DesiredStateOfWorld interface {
|
||||
// volumes under the specified node, this is a no-op.
|
||||
// If after deleting the pod, the specified volume contains no other child
|
||||
// pods, the volume is also deleted.
|
||||
DeletePod(podName types.UniquePodName, volumeName api.UniqueVolumeName, nodeName k8stypes.NodeName)
|
||||
DeletePod(podName types.UniquePodName, volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName)
|
||||
|
||||
// NodeExists returns true if the node with the specified name exists in
|
||||
// the list of nodes managed by the attach/detach controller.
|
||||
@@ -85,7 +85,7 @@ type DesiredStateOfWorld interface {
|
||||
// VolumeExists returns true if the volume with the specified name exists
|
||||
// in the list of volumes that should be attached to the specified node by
|
||||
// the attach detach controller.
|
||||
VolumeExists(volumeName api.UniqueVolumeName, nodeName k8stypes.NodeName) bool
|
||||
VolumeExists(volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName) bool
|
||||
|
||||
// GetVolumesToAttach generates and returns a list of volumes to attach
|
||||
// and the nodes they should be attached to based on the current desired
|
||||
@@ -106,10 +106,10 @@ type VolumeToAttach struct {
|
||||
// scheduled to the underlying node.
|
||||
type PodToAdd struct {
|
||||
// pod contains the api object of pod
|
||||
Pod *api.Pod
|
||||
Pod *v1.Pod
|
||||
|
||||
// volumeName contains the unique identifier for this volume.
|
||||
VolumeName api.UniqueVolumeName
|
||||
VolumeName v1.UniqueVolumeName
|
||||
|
||||
// nodeName contains the name of this node.
|
||||
NodeName k8stypes.NodeName
|
||||
@@ -143,13 +143,13 @@ type nodeManaged struct {
|
||||
// volumesToAttach is a map containing the set of volumes that should be
|
||||
// attached to this node. The key in the map is the name of the volume and
|
||||
// the value is a pod object containing more information about the volume.
|
||||
volumesToAttach map[api.UniqueVolumeName]volumeToAttach
|
||||
volumesToAttach map[v1.UniqueVolumeName]volumeToAttach
|
||||
}
|
||||
|
||||
// The volume object represents a volume that should be attached to a node.
|
||||
type volumeToAttach struct {
|
||||
// volumeName contains the unique identifier for this volume.
|
||||
volumeName api.UniqueVolumeName
|
||||
volumeName v1.UniqueVolumeName
|
||||
|
||||
// spec is the volume spec containing the specification for this volume.
|
||||
// Used to generate the volume plugin object, and passed to attach/detach
|
||||
@@ -170,7 +170,7 @@ type pod struct {
|
||||
podName types.UniquePodName
|
||||
|
||||
// pod object contains the api object of pod
|
||||
podObj *api.Pod
|
||||
podObj *v1.Pod
|
||||
}
|
||||
|
||||
func (dsw *desiredStateOfWorld) AddNode(nodeName k8stypes.NodeName) {
|
||||
@@ -180,16 +180,16 @@ func (dsw *desiredStateOfWorld) AddNode(nodeName k8stypes.NodeName) {
|
||||
if _, nodeExists := dsw.nodesManaged[nodeName]; !nodeExists {
|
||||
dsw.nodesManaged[nodeName] = nodeManaged{
|
||||
nodeName: nodeName,
|
||||
volumesToAttach: make(map[api.UniqueVolumeName]volumeToAttach),
|
||||
volumesToAttach: make(map[v1.UniqueVolumeName]volumeToAttach),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (dsw *desiredStateOfWorld) AddPod(
|
||||
podName types.UniquePodName,
|
||||
podToAdd *api.Pod,
|
||||
podToAdd *v1.Pod,
|
||||
volumeSpec *volume.Spec,
|
||||
nodeName k8stypes.NodeName) (api.UniqueVolumeName, error) {
|
||||
nodeName k8stypes.NodeName) (v1.UniqueVolumeName, error) {
|
||||
dsw.Lock()
|
||||
defer dsw.Unlock()
|
||||
|
||||
@@ -261,7 +261,7 @@ func (dsw *desiredStateOfWorld) DeleteNode(nodeName k8stypes.NodeName) error {
|
||||
|
||||
func (dsw *desiredStateOfWorld) DeletePod(
|
||||
podName types.UniquePodName,
|
||||
volumeName api.UniqueVolumeName,
|
||||
volumeName v1.UniqueVolumeName,
|
||||
nodeName k8stypes.NodeName) {
|
||||
dsw.Lock()
|
||||
defer dsw.Unlock()
|
||||
@@ -299,7 +299,7 @@ func (dsw *desiredStateOfWorld) NodeExists(nodeName k8stypes.NodeName) bool {
|
||||
}
|
||||
|
||||
func (dsw *desiredStateOfWorld) VolumeExists(
|
||||
volumeName api.UniqueVolumeName, nodeName k8stypes.NodeName) bool {
|
||||
volumeName v1.UniqueVolumeName, nodeName k8stypes.NodeName) bool {
|
||||
dsw.RLock()
|
||||
defer dsw.RUnlock()
|
||||
|
||||
@@ -334,9 +334,9 @@ func (dsw *desiredStateOfWorld) GetVolumesToAttach() []VolumeToAttach {
|
||||
return volumesToAttach
|
||||
}
|
||||
|
||||
// Construct a list of api.Pod objects from the given pod map
|
||||
func getPodsFromMap(podMap map[types.UniquePodName]pod) []*api.Pod {
|
||||
pods := make([]*api.Pod, 0, len(podMap))
|
||||
// Construct a list of v1.Pod objects from the given pod map
|
||||
func getPodsFromMap(podMap map[types.UniquePodName]pod) []*v1.Pod {
|
||||
pods := make([]*v1.Pod, 0, len(podMap))
|
||||
for _, pod := range podMap {
|
||||
pods = append(pods, pod.podObj)
|
||||
}
|
||||
|
||||
@@ -19,7 +19,7 @@ package cache
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
controllervolumetesting "k8s.io/kubernetes/pkg/controller/volume/attachdetach/testing"
|
||||
k8stypes "k8s.io/kubernetes/pkg/types"
|
||||
volumetesting "k8s.io/kubernetes/pkg/volume/testing"
|
||||
@@ -91,7 +91,7 @@ func Test_AddPod_Positive_NewPodNodeExistsVolumeDoesntExist(t *testing.T) {
|
||||
podName := "pod-uid"
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
@@ -139,7 +139,7 @@ func Test_AddPod_Positive_NewPodNodeExistsVolumeExists(t *testing.T) {
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
pod1Name := "pod1-uid"
|
||||
pod2Name := "pod2-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
@@ -212,7 +212,7 @@ func Test_AddPod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
@@ -278,7 +278,7 @@ func Test_AddPod_Negative_NewPodNodeDoesntExistVolumeDoesntExist(t *testing.T) {
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||
@@ -377,7 +377,7 @@ func Test_DeleteNode_Negative_NodeExistsHasChildVolumes(t *testing.T) {
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
generatedVolumeName, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
|
||||
if podAddErr != nil {
|
||||
@@ -416,7 +416,7 @@ func Test_DeletePod_Positive_PodExistsNodeExistsVolumeExists(t *testing.T) {
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
@@ -464,7 +464,7 @@ func Test_DeletePod_Positive_2PodsExistNodeExistsVolumesExist(t *testing.T) {
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
pod1Name := "pod1-uid"
|
||||
pod2Name := "pod2-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
@@ -525,7 +525,7 @@ func Test_DeletePod_Positive_PodDoesNotExist(t *testing.T) {
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
pod1Name := "pod1-uid"
|
||||
pod2Name := "pod2-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
@@ -573,7 +573,7 @@ func Test_DeletePod_Positive_NodeDoesNotExist(t *testing.T) {
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
node1Name := k8stypes.NodeName("node1-name")
|
||||
dsw.AddNode(node1Name)
|
||||
@@ -628,7 +628,7 @@ func Test_DeletePod_Positive_VolumeDoesNotExist(t *testing.T) {
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
podName := "pod-uid"
|
||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||
volume1Name := v1.UniqueVolumeName("volume1-name")
|
||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
@@ -647,7 +647,7 @@ func Test_DeletePod_Positive_VolumeDoesNotExist(t *testing.T) {
|
||||
generatedVolume1Name,
|
||||
nodeName)
|
||||
}
|
||||
volume2Name := api.UniqueVolumeName("volume2-name")
|
||||
volume2Name := v1.UniqueVolumeName("volume2-name")
|
||||
|
||||
// Act
|
||||
dsw.DeletePod(types.UniquePodName(podName), volume2Name, nodeName)
|
||||
@@ -731,7 +731,7 @@ func Test_VolumeExists_Positive_VolumeExistsNodeExists(t *testing.T) {
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
generatedVolumeName, _ := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volumeSpec, nodeName)
|
||||
|
||||
@@ -761,7 +761,7 @@ func Test_VolumeExists_Positive_VolumeDoesntExistNodeExists(t *testing.T) {
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
podName := "pod-uid"
|
||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||
volume1Name := v1.UniqueVolumeName("volume1-name")
|
||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(podName), controllervolumetesting.NewPod(podName, podName), volume1Spec, nodeName)
|
||||
if podAddErr != nil {
|
||||
@@ -770,7 +770,7 @@ func Test_VolumeExists_Positive_VolumeDoesntExistNodeExists(t *testing.T) {
|
||||
podName,
|
||||
podAddErr)
|
||||
}
|
||||
volume2Name := api.UniqueVolumeName("volume2-name")
|
||||
volume2Name := v1.UniqueVolumeName("volume2-name")
|
||||
|
||||
// Act
|
||||
volumeExists := dsw.VolumeExists(volume2Name, nodeName)
|
||||
@@ -795,7 +795,7 @@ func Test_VolumeExists_Positive_VolumeDoesntExistNodeDoesntExists(t *testing.T)
|
||||
volumePluginMgr, _ := volumetesting.GetTestVolumePluginMgr(t)
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
|
||||
// Act
|
||||
volumeExists := dsw.VolumeExists(volumeName, nodeName)
|
||||
@@ -857,7 +857,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEach(t *testing.T) {
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
node1Name := k8stypes.NodeName("node1-name")
|
||||
pod1Name := "pod1-uid"
|
||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||
volume1Name := v1.UniqueVolumeName("volume1-name")
|
||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||
dsw.AddNode(node1Name)
|
||||
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volume1Spec, node1Name)
|
||||
@@ -869,7 +869,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEach(t *testing.T) {
|
||||
}
|
||||
node2Name := k8stypes.NodeName("node2-name")
|
||||
pod2Name := "pod2-uid"
|
||||
volume2Name := api.UniqueVolumeName("volume2-name")
|
||||
volume2Name := v1.UniqueVolumeName("volume2-name")
|
||||
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
||||
dsw.AddNode(node2Name)
|
||||
generatedVolume2Name, podAddErr := dsw.AddPod(types.UniquePodName(pod2Name), controllervolumetesting.NewPod(pod2Name, pod2Name), volume2Spec, node2Name)
|
||||
@@ -902,7 +902,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEachExtraPod(t *testing.T
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
node1Name := k8stypes.NodeName("node1-name")
|
||||
pod1Name := "pod1-uid"
|
||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||
volume1Name := v1.UniqueVolumeName("volume1-name")
|
||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||
dsw.AddNode(node1Name)
|
||||
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volume1Spec, node1Name)
|
||||
@@ -914,7 +914,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesOneVolumeEachExtraPod(t *testing.T
|
||||
}
|
||||
node2Name := k8stypes.NodeName("node2-name")
|
||||
pod2Name := "pod2-uid"
|
||||
volume2Name := api.UniqueVolumeName("volume2-name")
|
||||
volume2Name := v1.UniqueVolumeName("volume2-name")
|
||||
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
||||
dsw.AddNode(node2Name)
|
||||
generatedVolume2Name, podAddErr := dsw.AddPod(types.UniquePodName(pod2Name), controllervolumetesting.NewPod(pod2Name, pod2Name), volume2Spec, node2Name)
|
||||
@@ -956,7 +956,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) {
|
||||
dsw := NewDesiredStateOfWorld(volumePluginMgr)
|
||||
node1Name := k8stypes.NodeName("node1-name")
|
||||
pod1Name := "pod1-uid"
|
||||
volume1Name := api.UniqueVolumeName("volume1-name")
|
||||
volume1Name := v1.UniqueVolumeName("volume1-name")
|
||||
volume1Spec := controllervolumetesting.GetTestVolumeSpec(string(volume1Name), volume1Name)
|
||||
dsw.AddNode(node1Name)
|
||||
generatedVolume1Name, podAddErr := dsw.AddPod(types.UniquePodName(pod1Name), controllervolumetesting.NewPod(pod1Name, pod1Name), volume1Spec, node1Name)
|
||||
@@ -968,7 +968,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) {
|
||||
}
|
||||
node2Name := k8stypes.NodeName("node2-name")
|
||||
pod2aName := "pod2a-name"
|
||||
volume2Name := api.UniqueVolumeName("volume2-name")
|
||||
volume2Name := v1.UniqueVolumeName("volume2-name")
|
||||
volume2Spec := controllervolumetesting.GetTestVolumeSpec(string(volume2Name), volume2Name)
|
||||
dsw.AddNode(node2Name)
|
||||
generatedVolume2Name1, podAddErr := dsw.AddPod(types.UniquePodName(pod2aName), controllervolumetesting.NewPod(pod2aName, pod2aName), volume2Spec, node2Name)
|
||||
@@ -993,7 +993,7 @@ func Test_GetVolumesToAttach_Positive_TwoNodesThreeVolumes(t *testing.T) {
|
||||
generatedVolume2Name2)
|
||||
}
|
||||
pod3Name := "pod3-uid"
|
||||
volume3Name := api.UniqueVolumeName("volume3-name")
|
||||
volume3Name := v1.UniqueVolumeName("volume3-name")
|
||||
volume3Spec := controllervolumetesting.GetTestVolumeSpec(string(volume3Name), volume3Name)
|
||||
generatedVolume3Name, podAddErr := dsw.AddPod(types.UniquePodName(pod3Name), controllervolumetesting.NewPod(pod3Name, pod3Name), volume3Spec, node1Name)
|
||||
if podAddErr != nil {
|
||||
@@ -1020,7 +1020,7 @@ func verifyVolumeToAttach(
|
||||
t *testing.T,
|
||||
volumesToAttach []VolumeToAttach,
|
||||
expectedNodeName k8stypes.NodeName,
|
||||
expectedVolumeName api.UniqueVolumeName,
|
||||
expectedVolumeName v1.UniqueVolumeName,
|
||||
expectedVolumeSpecName string) {
|
||||
for _, volumeToAttach := range volumesToAttach {
|
||||
if volumeToAttach.NodeName == expectedNodeName &&
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
kcache "k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/util/wait"
|
||||
@@ -102,7 +102,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() {
|
||||
}
|
||||
|
||||
if exists {
|
||||
informerPod, ok := informerPodObj.(*api.Pod)
|
||||
informerPod, ok := informerPodObj.(*v1.Pod)
|
||||
if !ok {
|
||||
glog.Errorf("Failed to cast obj %#v to pod object for pod %q (UID %q)", informerPod, dswPodKey, dswPodUID)
|
||||
continue
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
"k8s.io/kubernetes/pkg/controller/informers"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
@@ -86,7 +86,7 @@ func Test_Run_Positive_OneDesiredVolumeAttach(t *testing.T) {
|
||||
reconciler := NewReconciler(
|
||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, dsw, asw, ad, nsu)
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
@@ -132,7 +132,7 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithUnmountedVolume(t *te
|
||||
reconciler := NewReconciler(
|
||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, dsw, asw, ad, nsu)
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
@@ -199,7 +199,7 @@ func Test_Run_Positive_OneDesiredVolumeAttachThenDetachWithMountedVolume(t *test
|
||||
reconciler := NewReconciler(
|
||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, dsw, asw, ad, nsu)
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
@@ -266,7 +266,7 @@ func Test_Run_Negative_OneDesiredVolumeAttachThenDetachWithUnmountedVolumeUpdate
|
||||
reconciler := NewReconciler(
|
||||
reconcilerLoopPeriod, maxWaitForUnmountDuration, syncLoopPeriod, dsw, asw, ad, nsu)
|
||||
podName := "pod-uid"
|
||||
volumeName := api.UniqueVolumeName("volume-name")
|
||||
volumeName := v1.UniqueVolumeName("volume-name")
|
||||
volumeSpec := controllervolumetesting.GetTestVolumeSpec(string(volumeName), volumeName)
|
||||
nodeName := k8stypes.NodeName("node-name")
|
||||
dsw.AddNode(nodeName)
|
||||
|
||||
@@ -24,9 +24,9 @@ import (
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
kcache "k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||
"k8s.io/kubernetes/pkg/controller/volume/attachdetach/cache"
|
||||
"k8s.io/kubernetes/pkg/conversion"
|
||||
"k8s.io/kubernetes/pkg/util/strategicpatch"
|
||||
@@ -42,7 +42,7 @@ type NodeStatusUpdater interface {
|
||||
|
||||
// NewNodeStatusUpdater returns a new instance of NodeStatusUpdater.
|
||||
func NewNodeStatusUpdater(
|
||||
kubeClient internalclientset.Interface,
|
||||
kubeClient clientset.Interface,
|
||||
nodeInformer kcache.SharedInformer,
|
||||
actualStateOfWorld cache.ActualStateOfWorld) NodeStatusUpdater {
|
||||
return &nodeStatusUpdater{
|
||||
@@ -53,7 +53,7 @@ func NewNodeStatusUpdater(
|
||||
}
|
||||
|
||||
type nodeStatusUpdater struct {
|
||||
kubeClient internalclientset.Interface
|
||||
kubeClient clientset.Interface
|
||||
nodeInformer kcache.SharedInformer
|
||||
actualStateOfWorld cache.ActualStateOfWorld
|
||||
}
|
||||
@@ -81,7 +81,7 @@ func (nsu *nodeStatusUpdater) UpdateNodeStatuses() error {
|
||||
err)
|
||||
}
|
||||
|
||||
node, ok := clonedNode.(*api.Node)
|
||||
node, ok := clonedNode.(*v1.Node)
|
||||
if !ok || node == nil {
|
||||
return fmt.Errorf(
|
||||
"failed to cast %q object %#v to Node",
|
||||
|
||||
@@ -19,8 +19,8 @@ package testing
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/fake"
|
||||
"k8s.io/kubernetes/pkg/client/testing/core"
|
||||
"k8s.io/kubernetes/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/types"
|
||||
@@ -29,12 +29,12 @@ import (
|
||||
)
|
||||
|
||||
// GetTestVolumeSpec returns a test volume spec
|
||||
func GetTestVolumeSpec(volumeName string, diskName api.UniqueVolumeName) *volume.Spec {
|
||||
func GetTestVolumeSpec(volumeName string, diskName v1.UniqueVolumeName) *volume.Spec {
|
||||
return &volume.Spec{
|
||||
Volume: &api.Volume{
|
||||
Volume: &v1.Volume{
|
||||
Name: volumeName,
|
||||
VolumeSource: api.VolumeSource{
|
||||
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: string(diskName),
|
||||
FSType: "fake",
|
||||
ReadOnly: false,
|
||||
@@ -48,28 +48,28 @@ func CreateTestClient() *fake.Clientset {
|
||||
fakeClient := &fake.Clientset{}
|
||||
|
||||
fakeClient.AddReactor("list", "pods", func(action core.Action) (handled bool, ret runtime.Object, err error) {
|
||||
obj := &api.PodList{}
|
||||
obj := &v1.PodList{}
|
||||
podNamePrefix := "mypod"
|
||||
namespace := "mynamespace"
|
||||
for i := 0; i < 5; i++ {
|
||||
podName := fmt.Sprintf("%s-%d", podNamePrefix, i)
|
||||
pod := api.Pod{
|
||||
Status: api.PodStatus{
|
||||
Phase: api.PodRunning,
|
||||
pod := v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
},
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: namespace,
|
||||
Labels: map[string]string{
|
||||
"name": podName,
|
||||
},
|
||||
},
|
||||
Spec: api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "containerName",
|
||||
Image: "containerImage",
|
||||
VolumeMounts: []api.VolumeMount{
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: "volumeMountName",
|
||||
ReadOnly: false,
|
||||
@@ -78,11 +78,11 @@ func CreateTestClient() *fake.Clientset {
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []api.Volume{
|
||||
Volumes: []v1.Volume{
|
||||
{
|
||||
Name: "volumeName",
|
||||
VolumeSource: api.VolumeSource{
|
||||
GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{
|
||||
VolumeSource: v1.VolumeSource{
|
||||
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
|
||||
PDName: "pdName",
|
||||
FSType: "ext4",
|
||||
ReadOnly: false,
|
||||
@@ -104,9 +104,9 @@ func CreateTestClient() *fake.Clientset {
|
||||
}
|
||||
|
||||
// NewPod returns a test pod object
|
||||
func NewPod(uid, name string) *api.Pod {
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
func NewPod(uid, name string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
UID: types.UID(uid),
|
||||
Name: name,
|
||||
Namespace: name,
|
||||
|
||||
@@ -19,9 +19,9 @@ package persistentvolume
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/storage"
|
||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/util"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1beta1/util"
|
||||
)
|
||||
|
||||
// Test single call to syncClaim and syncVolume methods.
|
||||
@@ -42,134 +42,134 @@ func TestSync(t *testing.T) {
|
||||
{
|
||||
// syncClaim binds to a matching unbound volume.
|
||||
"1-1 - successful bind",
|
||||
newVolumeArray("volume1-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim1-1", "uid1-1", "1Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending),
|
||||
newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", v1.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim does not do anything when there is no matching volume.
|
||||
"1-2 - noop",
|
||||
newVolumeArray("volume1-2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume1-2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim1-2", "uid1-2", "10Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim1-2", "uid1-2", "10Gi", "", api.ClaimPending),
|
||||
newVolumeArray("volume1-2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume1-2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim1-2", "uid1-2", "10Gi", "", v1.ClaimPending),
|
||||
newClaimArray("claim1-2", "uid1-2", "10Gi", "", v1.ClaimPending),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim resets claim.Status to Pending when there is no
|
||||
// matching volume.
|
||||
"1-3 - reset to Pending",
|
||||
newVolumeArray("volume1-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume1-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim1-3", "uid1-3", "10Gi", "", api.ClaimBound),
|
||||
newClaimArray("claim1-3", "uid1-3", "10Gi", "", api.ClaimPending),
|
||||
newVolumeArray("volume1-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume1-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim1-3", "uid1-3", "10Gi", "", v1.ClaimBound),
|
||||
newClaimArray("claim1-3", "uid1-3", "10Gi", "", v1.ClaimPending),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim binds claims to the smallest matching volume
|
||||
"1-4 - smallest volume",
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume1-4_1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume1-4_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume1-4_1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume1-4_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
},
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume1-4_1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume1-4_2", "1Gi", "uid1-4", "claim1-4", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume1-4_1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume1-4_2", "1Gi", "uid1-4", "claim1-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
},
|
||||
newClaimArray("claim1-4", "uid1-4", "1Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim1-4", "uid1-4", "1Gi", "volume1-4_2", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim1-4", "uid1-4", "1Gi", "", v1.ClaimPending),
|
||||
newClaimArray("claim1-4", "uid1-4", "1Gi", "volume1-4_2", v1.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim binds a claim only to volume that points to it (by
|
||||
// name), even though a smaller one is available.
|
||||
"1-5 - prebound volume by name - success",
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume1-5_1", "10Gi", "", "claim1-5", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume1-5_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume1-5_1", "10Gi", "", "claim1-5", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume1-5_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
},
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume1-5_1", "10Gi", "uid1-5", "claim1-5", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume1-5_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume1-5_1", "10Gi", "uid1-5", "claim1-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume1-5_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
},
|
||||
newClaimArray("claim1-5", "uid1-5", "1Gi", "", api.ClaimPending),
|
||||
withExpectedCapacity("10Gi", newClaimArray("claim1-5", "uid1-5", "1Gi", "volume1-5_1", api.ClaimBound, annBoundByController, annBindCompleted)),
|
||||
newClaimArray("claim1-5", "uid1-5", "1Gi", "", v1.ClaimPending),
|
||||
withExpectedCapacity("10Gi", newClaimArray("claim1-5", "uid1-5", "1Gi", "volume1-5_1", v1.ClaimBound, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim binds a claim only to volume that points to it (by
|
||||
// UID), even though a smaller one is available.
|
||||
"1-6 - prebound volume by UID - success",
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume1-6_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume1-6_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
},
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume1-6_2", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume1-6_1", "10Gi", "uid1-6", "claim1-6", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume1-6_2", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
},
|
||||
newClaimArray("claim1-6", "uid1-6", "1Gi", "", api.ClaimPending),
|
||||
withExpectedCapacity("10Gi", newClaimArray("claim1-6", "uid1-6", "1Gi", "volume1-6_1", api.ClaimBound, annBoundByController, annBindCompleted)),
|
||||
newClaimArray("claim1-6", "uid1-6", "1Gi", "", v1.ClaimPending),
|
||||
withExpectedCapacity("10Gi", newClaimArray("claim1-6", "uid1-6", "1Gi", "volume1-6_1", v1.ClaimBound, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim does not bind claim to a volume prebound to a claim with
|
||||
// same name and different UID
|
||||
"1-7 - prebound volume to different claim",
|
||||
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim1-7", "uid1-7", "1Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim1-7", "uid1-7", "1Gi", "", api.ClaimPending),
|
||||
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume1-7", "10Gi", "uid1-777", "claim1-7", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim1-7", "uid1-7", "1Gi", "", v1.ClaimPending),
|
||||
newClaimArray("claim1-7", "uid1-7", "1Gi", "", v1.ClaimPending),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim completes binding - simulates controller crash after
|
||||
// PV.ClaimRef is saved
|
||||
"1-8 - complete bind after crash - PV bound",
|
||||
newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", api.VolumePending, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim1-8", "uid1-8", "1Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim1-8", "uid1-8", "1Gi", "volume1-8", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", v1.VolumePending, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolumeArray("volume1-8", "1Gi", "uid1-8", "claim1-8", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim1-8", "uid1-8", "1Gi", "", v1.ClaimPending),
|
||||
newClaimArray("claim1-8", "uid1-8", "1Gi", "volume1-8", v1.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim completes binding - simulates controller crash after
|
||||
// PV.Status is saved
|
||||
"1-9 - complete bind after crash - PV status saved",
|
||||
newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim1-9", "uid1-9", "1Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim1-9", "uid1-9", "1Gi", "volume1-9", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolumeArray("volume1-9", "1Gi", "uid1-9", "claim1-9", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim1-9", "uid1-9", "1Gi", "", v1.ClaimPending),
|
||||
newClaimArray("claim1-9", "uid1-9", "1Gi", "volume1-9", v1.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim completes binding - simulates controller crash after
|
||||
// PVC.VolumeName is saved
|
||||
"1-10 - complete bind after crash - PVC bound",
|
||||
newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimPending, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolumeArray("volume1-10", "1Gi", "uid1-10", "claim1-10", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", v1.ClaimPending, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim1-10", "uid1-10", "1Gi", "volume1-10", v1.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim binds a claim only when the label selector matches the volume
|
||||
"1-11 - bind when selector matches",
|
||||
withLabels(labels, newVolumeArray("volume1-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain)),
|
||||
withLabels(labels, newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController)),
|
||||
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", api.ClaimPending)),
|
||||
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", api.ClaimBound, annBoundByController, annBindCompleted)),
|
||||
withLabels(labels, newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain)),
|
||||
withLabels(labels, newVolumeArray("volume1-1", "1Gi", "uid1-1", "claim1-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController)),
|
||||
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending)),
|
||||
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "volume1-1", v1.ClaimBound, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim does not bind a claim when the label selector doesn't match
|
||||
"1-12 - do not bind when selector does not match",
|
||||
newVolumeArray("volume1-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume1-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", api.ClaimPending)),
|
||||
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", api.ClaimPending)),
|
||||
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending)),
|
||||
withLabelSelector(labels, newClaimArray("claim1-1", "uid1-1", "1Gi", "", v1.ClaimPending)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
|
||||
@@ -181,8 +181,8 @@ func TestSync(t *testing.T) {
|
||||
"2-1 - claim prebound to non-existing volume - noop",
|
||||
novolumes,
|
||||
novolumes,
|
||||
newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", api.ClaimPending),
|
||||
newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", api.ClaimPending),
|
||||
newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", v1.ClaimPending),
|
||||
newClaimArray("claim2-1", "uid2-1", "10Gi", "volume2-1", v1.ClaimPending),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
@@ -191,28 +191,28 @@ func TestSync(t *testing.T) {
|
||||
"2-2 - claim prebound to non-existing volume - reset status",
|
||||
novolumes,
|
||||
novolumes,
|
||||
newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", api.ClaimBound),
|
||||
newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", api.ClaimPending),
|
||||
newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", v1.ClaimBound),
|
||||
newClaimArray("claim2-2", "uid2-2", "10Gi", "volume2-2", v1.ClaimPending),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim with claim pre-bound to a PV that exists and is
|
||||
// unbound. Check it gets bound and no annBoundByController is set.
|
||||
"2-3 - claim prebound to unbound volume",
|
||||
newVolumeArray("volume2-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", api.ClaimPending),
|
||||
newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", api.ClaimBound, annBindCompleted),
|
||||
newVolumeArray("volume2-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", v1.ClaimPending),
|
||||
newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", v1.ClaimBound, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// claim with claim pre-bound to a PV that is pre-bound to the claim
|
||||
// by name. Check it gets bound and no annBoundByController is set.
|
||||
"2-4 - claim prebound to prebound volume by name",
|
||||
newVolumeArray("volume2-4", "1Gi", "", "claim2-4", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume2-4", "1Gi", "uid2-4", "claim2-4", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim2-4", "uid2-4", "1Gi", "volume2-4", api.ClaimPending),
|
||||
newClaimArray("claim2-4", "uid2-4", "1Gi", "volume2-4", api.ClaimBound, annBindCompleted),
|
||||
newVolumeArray("volume2-4", "1Gi", "", "claim2-4", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume2-4", "1Gi", "uid2-4", "claim2-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim2-4", "uid2-4", "1Gi", "volume2-4", v1.ClaimPending),
|
||||
newClaimArray("claim2-4", "uid2-4", "1Gi", "volume2-4", v1.ClaimBound, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
@@ -220,30 +220,30 @@ func TestSync(t *testing.T) {
|
||||
// claim by UID. Check it gets bound and no annBoundByController is
|
||||
// set.
|
||||
"2-5 - claim prebound to prebound volume by UID",
|
||||
newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim2-5", "uid2-5", "1Gi", "volume2-5", api.ClaimPending),
|
||||
newClaimArray("claim2-5", "uid2-5", "1Gi", "volume2-5", api.ClaimBound, annBindCompleted),
|
||||
newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume2-5", "1Gi", "uid2-5", "claim2-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim2-5", "uid2-5", "1Gi", "volume2-5", v1.ClaimPending),
|
||||
newClaimArray("claim2-5", "uid2-5", "1Gi", "volume2-5", v1.ClaimBound, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim with claim pre-bound to a PV that is bound to different
|
||||
// claim. Check it's reset to Pending.
|
||||
"2-6 - claim prebound to already bound volume",
|
||||
newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim2-6", "uid2-6", "1Gi", "volume2-6", api.ClaimBound),
|
||||
newClaimArray("claim2-6", "uid2-6", "1Gi", "volume2-6", api.ClaimPending),
|
||||
newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume2-6", "1Gi", "uid2-6_1", "claim2-6_1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim2-6", "uid2-6", "1Gi", "volume2-6", v1.ClaimBound),
|
||||
newClaimArray("claim2-6", "uid2-6", "1Gi", "volume2-6", v1.ClaimPending),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim with claim bound by controller to a PV that is bound to
|
||||
// different claim. Check it throws an error.
|
||||
"2-7 - claim bound by controller to already bound volume",
|
||||
newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim2-7", "uid2-7", "1Gi", "volume2-7", api.ClaimBound, annBoundByController),
|
||||
newClaimArray("claim2-7", "uid2-7", "1Gi", "volume2-7", api.ClaimBound, annBoundByController),
|
||||
newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume2-7", "1Gi", "uid2-7_1", "claim2-7_1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim2-7", "uid2-7", "1Gi", "volume2-7", v1.ClaimBound, annBoundByController),
|
||||
newClaimArray("claim2-7", "uid2-7", "1Gi", "volume2-7", v1.ClaimBound, annBoundByController),
|
||||
noevents, noerrors, testSyncClaimError,
|
||||
},
|
||||
{
|
||||
@@ -251,10 +251,10 @@ func TestSync(t *testing.T) {
|
||||
// unbound, but does not match the selector. Check it gets bound
|
||||
// and no annBoundByController is set.
|
||||
"2-8 - claim prebound to unbound volume that does not match the selector",
|
||||
newVolumeArray("volume2-3", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
withLabelSelector(labels, newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", api.ClaimPending)),
|
||||
withLabelSelector(labels, newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", api.ClaimBound, annBindCompleted)),
|
||||
newVolumeArray("volume2-3", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume2-3", "1Gi", "uid2-3", "claim2-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
withLabelSelector(labels, newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", v1.ClaimPending)),
|
||||
withLabelSelector(labels, newClaimArray("claim2-3", "uid2-3", "1Gi", "volume2-3", v1.ClaimBound, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
|
||||
@@ -265,8 +265,8 @@ func TestSync(t *testing.T) {
|
||||
"3-1 - bound claim with missing VolumeName",
|
||||
novolumes,
|
||||
novolumes,
|
||||
newClaimArray("claim3-1", "uid3-1", "10Gi", "", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim3-1", "uid3-1", "10Gi", "", api.ClaimLost, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim3-1", "uid3-1", "10Gi", "", v1.ClaimBound, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim3-1", "uid3-1", "10Gi", "", v1.ClaimLost, annBoundByController, annBindCompleted),
|
||||
[]string{"Warning ClaimLost"}, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
@@ -275,28 +275,28 @@ func TestSync(t *testing.T) {
|
||||
"3-2 - bound claim with missing volume",
|
||||
novolumes,
|
||||
novolumes,
|
||||
newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", api.ClaimLost, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", v1.ClaimBound, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim3-2", "uid3-2", "10Gi", "volume3-2", v1.ClaimLost, annBoundByController, annBindCompleted),
|
||||
[]string{"Warning ClaimLost"}, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim with claim bound to unbound volume. Check it's bound.
|
||||
// Also check that Pending phase is set to Bound
|
||||
"3-3 - bound claim with unbound volume",
|
||||
newVolumeArray("volume3-3", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimPending, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
newVolumeArray("volume3-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimPending, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncClaim with claim bound to volume with missing (or different)
|
||||
// volume.Spec.ClaimRef.UID. Check that the claim is marked as lost.
|
||||
"3-4 - bound claim with prebound volume",
|
||||
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", api.ClaimPending, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", api.ClaimLost, annBoundByController, annBindCompleted),
|
||||
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume3-4", "10Gi", "claim3-4-x", "claim3-4", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", v1.ClaimPending, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim3-4", "uid3-4", "10Gi", "volume3-4", v1.ClaimLost, annBoundByController, annBindCompleted),
|
||||
[]string{"Warning ClaimMisbound"}, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
@@ -304,10 +304,10 @@ func TestSync(t *testing.T) {
|
||||
// controller does not do anything. Also check that Pending phase is
|
||||
// set to Bound
|
||||
"3-5 - bound claim with bound volume",
|
||||
newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", api.ClaimPending, annBindCompleted),
|
||||
newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", api.ClaimBound, annBindCompleted),
|
||||
newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume3-5", "10Gi", "uid3-5", "claim3-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", v1.ClaimPending, annBindCompleted),
|
||||
newClaimArray("claim3-5", "uid3-5", "10Gi", "volume3-5", v1.ClaimBound, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
@@ -315,10 +315,10 @@ func TestSync(t *testing.T) {
|
||||
// claim. Check that the claim is marked as lost.
|
||||
// TODO: test that an event is emitted
|
||||
"3-6 - bound claim with bound volume",
|
||||
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", api.ClaimPending, annBindCompleted),
|
||||
newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", api.ClaimLost, annBindCompleted),
|
||||
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume3-6", "10Gi", "uid3-6-x", "claim3-6-x", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", v1.ClaimPending, annBindCompleted),
|
||||
newClaimArray("claim3-6", "uid3-6", "10Gi", "volume3-6", v1.ClaimLost, annBindCompleted),
|
||||
[]string{"Warning ClaimMisbound"}, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
@@ -326,18 +326,18 @@ func TestSync(t *testing.T) {
|
||||
// even if the claim's selector doesn't match the volume. Also
|
||||
// check that Pending phase is set to Bound
|
||||
"3-7 - bound claim with unbound volume where selector doesn't match",
|
||||
newVolumeArray("volume3-3", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
withLabelSelector(labels, newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimPending, annBoundByController, annBindCompleted)),
|
||||
withLabelSelector(labels, newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", api.ClaimBound, annBoundByController, annBindCompleted)),
|
||||
newVolumeArray("volume3-3", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume3-3", "10Gi", "uid3-3", "claim3-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
withLabelSelector(labels, newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimPending, annBoundByController, annBindCompleted)),
|
||||
withLabelSelector(labels, newClaimArray("claim3-3", "uid3-3", "10Gi", "volume3-3", v1.ClaimBound, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
// [Unit test set 4] All syncVolume tests.
|
||||
{
|
||||
// syncVolume with pending volume. Check it's marked as Available.
|
||||
"4-1 - pending volume",
|
||||
newVolumeArray("volume4-1", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-1", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-1", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-1", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain),
|
||||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors, testSyncVolume,
|
||||
@@ -346,8 +346,8 @@ func TestSync(t *testing.T) {
|
||||
// syncVolume with prebound pending volume. Check it's marked as
|
||||
// Available.
|
||||
"4-2 - pending prebound volume",
|
||||
newVolumeArray("volume4-2", "10Gi", "", "claim4-2", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-2", "10Gi", "", "claim4-2", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-2", "10Gi", "", "claim4-2", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-2", "10Gi", "", "claim4-2", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain),
|
||||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors, testSyncVolume,
|
||||
@@ -356,8 +356,8 @@ func TestSync(t *testing.T) {
|
||||
// syncVolume with volume bound to missing claim.
|
||||
// Check the volume gets Released
|
||||
"4-3 - bound volume with missing claim",
|
||||
newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", api.VolumeReleased, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-3", "10Gi", "uid4-3", "claim4-3", v1.VolumeReleased, v1.PersistentVolumeReclaimRetain),
|
||||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors, testSyncVolume,
|
||||
@@ -366,50 +366,50 @@ func TestSync(t *testing.T) {
|
||||
// syncVolume with volume bound to claim with different UID.
|
||||
// Check the volume gets Released.
|
||||
"4-4 - volume bound to claim with different UID",
|
||||
newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", api.VolumeReleased, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", api.ClaimBound, annBindCompleted),
|
||||
newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", api.ClaimBound, annBindCompleted),
|
||||
newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-4", "10Gi", "uid4-4", "claim4-4", v1.VolumeReleased, v1.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", v1.ClaimBound, annBindCompleted),
|
||||
newClaimArray("claim4-4", "uid4-4-x", "10Gi", "volume4-4", v1.ClaimBound, annBindCompleted),
|
||||
noevents, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// syncVolume with volume bound by controller to unbound claim.
|
||||
// Check syncVolume does not do anything.
|
||||
"4-5 - volume bound by controller to unbound claim",
|
||||
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending),
|
||||
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim4-5", "uid4-5", "10Gi", "", v1.ClaimPending),
|
||||
newClaimArray("claim4-5", "uid4-5", "10Gi", "", v1.ClaimPending),
|
||||
noevents, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// syncVolume with volume bound by user to unbound claim.
|
||||
// Check syncVolume does not do anything.
|
||||
"4-5 - volume bound by user to bound claim",
|
||||
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim4-5", "uid4-5", "10Gi", "", api.ClaimPending),
|
||||
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-5", "10Gi", "uid4-5", "claim4-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim4-5", "uid4-5", "10Gi", "", v1.ClaimPending),
|
||||
newClaimArray("claim4-5", "uid4-5", "10Gi", "", v1.ClaimPending),
|
||||
noevents, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// syncVolume with volume bound to bound claim.
|
||||
// Check that the volume is marked as Bound.
|
||||
"4-6 - volume bound by to bound claim",
|
||||
newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", api.ClaimBound),
|
||||
newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", api.ClaimBound),
|
||||
newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-6", "10Gi", "uid4-6", "claim4-6", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", v1.ClaimBound),
|
||||
newClaimArray("claim4-6", "uid4-6", "10Gi", "volume4-6", v1.ClaimBound),
|
||||
noevents, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
// syncVolume with volume bound by controller to claim bound to
|
||||
// another volume. Check that the volume is rolled back.
|
||||
"4-7 - volume bound by controller to claim bound somewhere else",
|
||||
newVolumeArray("volume4-7", "10Gi", "uid4-7", "claim4-7", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolumeArray("volume4-7", "10Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", api.ClaimBound),
|
||||
newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", api.ClaimBound),
|
||||
newVolumeArray("volume4-7", "10Gi", "uid4-7", "claim4-7", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolumeArray("volume4-7", "10Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", v1.ClaimBound),
|
||||
newClaimArray("claim4-7", "uid4-7", "10Gi", "volume4-7-x", v1.ClaimBound),
|
||||
noevents, noerrors, testSyncVolume,
|
||||
},
|
||||
{
|
||||
@@ -417,10 +417,10 @@ func TestSync(t *testing.T) {
|
||||
// another volume. Check that the volume is marked as Available
|
||||
// and its UID is reset.
|
||||
"4-8 - volume bound by user to claim bound somewhere else",
|
||||
newVolumeArray("volume4-8", "10Gi", "uid4-8", "claim4-8", api.VolumeBound, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-8", "10Gi", "", "claim4-8", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", api.ClaimBound),
|
||||
newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", api.ClaimBound),
|
||||
newVolumeArray("volume4-8", "10Gi", "uid4-8", "claim4-8", v1.VolumeBound, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume4-8", "10Gi", "", "claim4-8", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain),
|
||||
newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", v1.ClaimBound),
|
||||
newClaimArray("claim4-8", "uid4-8", "10Gi", "volume4-8-x", v1.ClaimBound),
|
||||
noevents, noerrors, testSyncVolume,
|
||||
},
|
||||
|
||||
@@ -429,78 +429,78 @@ func TestSync(t *testing.T) {
|
||||
// syncVolume binds a claim to requested class even if there is a
|
||||
// smaller PV available
|
||||
"13-1 - binding to class",
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume13-1-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume13-1-2", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain, storageutil.StorageClassAnnotation),
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume13-1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume13-1-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, storageutil.StorageClassAnnotation),
|
||||
},
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume13-1-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume13-1-2", "10Gi", "uid13-1", "claim13-1", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController, storageutil.StorageClassAnnotation),
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume13-1-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume13-1-2", "10Gi", "uid13-1", "claim13-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController, storageutil.StorageClassAnnotation),
|
||||
},
|
||||
newClaimArray("claim13-1", "uid13-1", "1Gi", "", api.ClaimPending, storageutil.StorageClassAnnotation),
|
||||
withExpectedCapacity("10Gi", newClaimArray("claim13-1", "uid13-1", "1Gi", "volume13-1-2", api.ClaimBound, annBoundByController, storageutil.StorageClassAnnotation, annBindCompleted)),
|
||||
newClaimArray("claim13-1", "uid13-1", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation),
|
||||
withExpectedCapacity("10Gi", newClaimArray("claim13-1", "uid13-1", "1Gi", "volume13-1-2", v1.ClaimBound, annBoundByController, storageutil.StorageClassAnnotation, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds a claim without a class even if there is a
|
||||
// smaller PV with a class available
|
||||
"13-2 - binding without a class",
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume13-2-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain, storageutil.StorageClassAnnotation),
|
||||
newVolume("volume13-2-2", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume13-2-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, storageutil.StorageClassAnnotation),
|
||||
newVolume("volume13-2-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
},
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume13-2-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain, storageutil.StorageClassAnnotation),
|
||||
newVolume("volume13-2-2", "10Gi", "uid13-2", "claim13-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume13-2-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, storageutil.StorageClassAnnotation),
|
||||
newVolume("volume13-2-2", "10Gi", "uid13-2", "claim13-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
},
|
||||
newClaimArray("claim13-2", "uid13-2", "1Gi", "", api.ClaimPending),
|
||||
withExpectedCapacity("10Gi", newClaimArray("claim13-2", "uid13-2", "1Gi", "volume13-2-2", api.ClaimBound, annBoundByController, annBindCompleted)),
|
||||
newClaimArray("claim13-2", "uid13-2", "1Gi", "", v1.ClaimPending),
|
||||
withExpectedCapacity("10Gi", newClaimArray("claim13-2", "uid13-2", "1Gi", "volume13-2-2", v1.ClaimBound, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds a claim with given class even if there is a
|
||||
// smaller PV with different class available
|
||||
"13-3 - binding to specific a class",
|
||||
volumeWithClass("silver", []*api.PersistentVolume{
|
||||
newVolume("volume13-3-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume13-3-2", "10Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain, storageutil.StorageClassAnnotation),
|
||||
volumeWithClass("silver", []*v1.PersistentVolume{
|
||||
newVolume("volume13-3-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume13-3-2", "10Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain, storageutil.StorageClassAnnotation),
|
||||
}),
|
||||
volumeWithClass("silver", []*api.PersistentVolume{
|
||||
newVolume("volume13-3-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume13-3-2", "10Gi", "uid13-3", "claim13-3", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController, storageutil.StorageClassAnnotation),
|
||||
volumeWithClass("silver", []*v1.PersistentVolume{
|
||||
newVolume("volume13-3-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolume("volume13-3-2", "10Gi", "uid13-3", "claim13-3", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController, storageutil.StorageClassAnnotation),
|
||||
}),
|
||||
newClaimArray("claim13-3", "uid13-3", "1Gi", "", api.ClaimPending, storageutil.StorageClassAnnotation),
|
||||
withExpectedCapacity("10Gi", newClaimArray("claim13-3", "uid13-3", "1Gi", "volume13-3-2", api.ClaimBound, annBoundByController, annBindCompleted, storageutil.StorageClassAnnotation)),
|
||||
newClaimArray("claim13-3", "uid13-3", "1Gi", "", v1.ClaimPending, storageutil.StorageClassAnnotation),
|
||||
withExpectedCapacity("10Gi", newClaimArray("claim13-3", "uid13-3", "1Gi", "volume13-3-2", v1.ClaimBound, annBoundByController, annBindCompleted, storageutil.StorageClassAnnotation)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds claim requesting class "" to claim to PV with
|
||||
// class=""
|
||||
"13-4 - empty class",
|
||||
volumeWithClass("", newVolumeArray("volume13-4", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain)),
|
||||
volumeWithClass("", newVolumeArray("volume13-4", "1Gi", "uid13-4", "claim13-4", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController)),
|
||||
claimWithClass("", newClaimArray("claim13-4", "uid13-4", "1Gi", "", api.ClaimPending)),
|
||||
claimWithClass("", newClaimArray("claim13-4", "uid13-4", "1Gi", "volume13-4", api.ClaimBound, annBoundByController, annBindCompleted)),
|
||||
volumeWithClass("", newVolumeArray("volume13-4", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain)),
|
||||
volumeWithClass("", newVolumeArray("volume13-4", "1Gi", "uid13-4", "claim13-4", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController)),
|
||||
claimWithClass("", newClaimArray("claim13-4", "uid13-4", "1Gi", "", v1.ClaimPending)),
|
||||
claimWithClass("", newClaimArray("claim13-4", "uid13-4", "1Gi", "volume13-4", v1.ClaimBound, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds claim requesting class nil to claim to PV with
|
||||
// class = ""
|
||||
"13-5 - nil class",
|
||||
volumeWithClass("", newVolumeArray("volume13-5", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain)),
|
||||
volumeWithClass("", newVolumeArray("volume13-5", "1Gi", "uid13-5", "claim13-5", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController)),
|
||||
newClaimArray("claim13-5", "uid13-5", "1Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim13-5", "uid13-5", "1Gi", "volume13-5", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
volumeWithClass("", newVolumeArray("volume13-5", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain)),
|
||||
volumeWithClass("", newVolumeArray("volume13-5", "1Gi", "uid13-5", "claim13-5", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController)),
|
||||
newClaimArray("claim13-5", "uid13-5", "1Gi", "", v1.ClaimPending),
|
||||
newClaimArray("claim13-5", "uid13-5", "1Gi", "volume13-5", v1.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// syncVolume binds claim requesting class "" to claim to PV with
|
||||
// class=nil
|
||||
"13-6 - nil class in PV, '' class in claim",
|
||||
newVolumeArray("volume13-6", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume13-6", "1Gi", "uid13-6", "claim13-6", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
claimWithClass("", newClaimArray("claim13-6", "uid13-6", "1Gi", "", api.ClaimPending)),
|
||||
claimWithClass("", newClaimArray("claim13-6", "uid13-6", "1Gi", "volume13-6", api.ClaimBound, annBoundByController, annBindCompleted)),
|
||||
newVolumeArray("volume13-6", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume13-6", "1Gi", "uid13-6", "claim13-6", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
claimWithClass("", newClaimArray("claim13-6", "uid13-6", "1Gi", "", v1.ClaimPending)),
|
||||
claimWithClass("", newClaimArray("claim13-6", "uid13-6", "1Gi", "volume13-6", v1.ClaimBound, annBoundByController, annBindCompleted)),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
}
|
||||
@@ -527,26 +527,26 @@ func TestMultiSync(t *testing.T) {
|
||||
{
|
||||
// syncClaim binds to a matching unbound volume.
|
||||
"10-1 - successful bind",
|
||||
newVolumeArray("volume10-1", "1Gi", "", "", api.VolumePending, api.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume10-1", "1Gi", "uid10-1", "claim10-1", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim10-1", "uid10-1", "1Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim10-1", "uid10-1", "1Gi", "volume10-1", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
newVolumeArray("volume10-1", "1Gi", "", "", v1.VolumePending, v1.PersistentVolumeReclaimRetain),
|
||||
newVolumeArray("volume10-1", "1Gi", "uid10-1", "claim10-1", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newClaimArray("claim10-1", "uid10-1", "1Gi", "", v1.ClaimPending),
|
||||
newClaimArray("claim10-1", "uid10-1", "1Gi", "volume10-1", v1.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
{
|
||||
// Two controllers bound two PVs to single claim. Test one of them
|
||||
// wins and the second rolls back.
|
||||
"10-2 - bind PV race",
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolume("volume10-2-2", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolume("volume10-2-2", "1Gi", "uid10-2", "claim10-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
},
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", api.VolumeBound, api.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolume("volume10-2-2", "1Gi", "", "", api.VolumeAvailable, api.PersistentVolumeReclaimRetain),
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume10-2-1", "1Gi", "uid10-2", "claim10-2", v1.VolumeBound, v1.PersistentVolumeReclaimRetain, annBoundByController),
|
||||
newVolume("volume10-2-2", "1Gi", "", "", v1.VolumeAvailable, v1.PersistentVolumeReclaimRetain),
|
||||
},
|
||||
newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", api.ClaimBound, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", v1.ClaimBound, annBoundByController, annBindCompleted),
|
||||
newClaimArray("claim10-2", "uid10-2", "1Gi", "volume10-2-1", v1.ClaimBound, annBoundByController, annBindCompleted),
|
||||
noevents, noerrors, testSyncClaim,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -20,8 +20,8 @@ import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/apis/storage"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
storage "k8s.io/kubernetes/pkg/apis/storage/v1beta1"
|
||||
)
|
||||
|
||||
// Test single call to syncVolume, expecting recycling to happen.
|
||||
@@ -33,7 +33,7 @@ func TestDeleteSync(t *testing.T) {
|
||||
{
|
||||
// delete volume bound by controller
|
||||
"8-1 - successful delete",
|
||||
newVolumeArray("volume8-1", "1Gi", "uid8-1", "claim8-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController),
|
||||
newVolumeArray("volume8-1", "1Gi", "uid8-1", "claim8-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController),
|
||||
novolumes,
|
||||
noclaims,
|
||||
noclaims,
|
||||
@@ -45,7 +45,7 @@ func TestDeleteSync(t *testing.T) {
|
||||
{
|
||||
// delete volume bound by user
|
||||
"8-2 - successful delete with prebound volume",
|
||||
newVolumeArray("volume8-2", "1Gi", "uid8-2", "claim8-2", api.VolumeBound, api.PersistentVolumeReclaimDelete),
|
||||
newVolumeArray("volume8-2", "1Gi", "uid8-2", "claim8-2", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
|
||||
novolumes,
|
||||
noclaims,
|
||||
noclaims,
|
||||
@@ -57,8 +57,8 @@ func TestDeleteSync(t *testing.T) {
|
||||
{
|
||||
// delete failure - plugin not found
|
||||
"8-3 - plugin not found",
|
||||
newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", api.VolumeBound, api.PersistentVolumeReclaimDelete),
|
||||
withMessage("Error getting deleter volume plugin for volume \"volume8-3\": no volume plugin matched", newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", api.VolumeFailed, api.PersistentVolumeReclaimDelete)),
|
||||
newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
|
||||
withMessage("Error getting deleter volume plugin for volume \"volume8-3\": no volume plugin matched", newVolumeArray("volume8-3", "1Gi", "uid8-3", "claim8-3", v1.VolumeFailed, v1.PersistentVolumeReclaimDelete)),
|
||||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeFailedDelete"}, noerrors, testSyncVolume,
|
||||
@@ -66,8 +66,8 @@ func TestDeleteSync(t *testing.T) {
|
||||
{
|
||||
// delete failure - newDeleter returns error
|
||||
"8-4 - newDeleter returns error",
|
||||
newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", api.VolumeBound, api.PersistentVolumeReclaimDelete),
|
||||
withMessage("Failed to create deleter for volume \"volume8-4\": Mock plugin error: no deleteCalls configured", newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", api.VolumeFailed, api.PersistentVolumeReclaimDelete)),
|
||||
newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
|
||||
withMessage("Failed to create deleter for volume \"volume8-4\": Mock plugin error: no deleteCalls configured", newVolumeArray("volume8-4", "1Gi", "uid8-4", "claim8-4", v1.VolumeFailed, v1.PersistentVolumeReclaimDelete)),
|
||||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeFailedDelete"}, noerrors,
|
||||
@@ -76,8 +76,8 @@ func TestDeleteSync(t *testing.T) {
|
||||
{
|
||||
// delete failure - delete() returns error
|
||||
"8-5 - delete returns error",
|
||||
newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", api.VolumeBound, api.PersistentVolumeReclaimDelete),
|
||||
withMessage("Mock delete error", newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", api.VolumeFailed, api.PersistentVolumeReclaimDelete)),
|
||||
newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
|
||||
withMessage("Mock delete error", newVolumeArray("volume8-5", "1Gi", "uid8-5", "claim8-5", v1.VolumeFailed, v1.PersistentVolumeReclaimDelete)),
|
||||
noclaims,
|
||||
noclaims,
|
||||
[]string{"Warning VolumeFailedDelete"}, noerrors,
|
||||
@@ -86,7 +86,7 @@ func TestDeleteSync(t *testing.T) {
|
||||
{
|
||||
// delete success(?) - volume is deleted before doDelete() starts
|
||||
"8-6 - volume is deleted before deleting",
|
||||
newVolumeArray("volume8-6", "1Gi", "uid8-6", "claim8-6", api.VolumeBound, api.PersistentVolumeReclaimDelete),
|
||||
newVolumeArray("volume8-6", "1Gi", "uid8-6", "claim8-6", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
|
||||
novolumes,
|
||||
noclaims,
|
||||
noclaims,
|
||||
@@ -103,31 +103,31 @@ func TestDeleteSync(t *testing.T) {
|
||||
// starts. This simulates "volume no longer needs recycling,
|
||||
// skipping".
|
||||
"8-7 - volume is bound before deleting",
|
||||
newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController),
|
||||
newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController),
|
||||
newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController),
|
||||
newVolumeArray("volume8-7", "1Gi", "uid8-7", "claim8-7", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController),
|
||||
noclaims,
|
||||
newClaimArray("claim8-7", "uid8-7", "10Gi", "volume8-7", api.ClaimBound),
|
||||
newClaimArray("claim8-7", "uid8-7", "10Gi", "volume8-7", v1.ClaimBound),
|
||||
noevents, noerrors,
|
||||
wrapTestWithInjectedOperation(wrapTestWithReclaimCalls(operationDelete, []error{}, testSyncVolume), func(ctrl *PersistentVolumeController, reactor *volumeReactor) {
|
||||
reactor.lock.Lock()
|
||||
defer reactor.lock.Unlock()
|
||||
// Bind the volume to resurrected claim (this should never
|
||||
// happen)
|
||||
claim := newClaim("claim8-7", "uid8-7", "10Gi", "volume8-7", api.ClaimBound)
|
||||
claim := newClaim("claim8-7", "uid8-7", "10Gi", "volume8-7", v1.ClaimBound)
|
||||
reactor.claims[claim.Name] = claim
|
||||
ctrl.claims.Add(claim)
|
||||
volume := reactor.volumes["volume8-7"]
|
||||
volume.Status.Phase = api.VolumeBound
|
||||
volume.Status.Phase = v1.VolumeBound
|
||||
}),
|
||||
},
|
||||
{
|
||||
// delete success - volume bound by user is deleted, while a new
|
||||
// claim is created with another UID.
|
||||
"8-9 - prebound volume is deleted while the claim exists",
|
||||
newVolumeArray("volume8-9", "1Gi", "uid8-9", "claim8-9", api.VolumeBound, api.PersistentVolumeReclaimDelete),
|
||||
newVolumeArray("volume8-9", "1Gi", "uid8-9", "claim8-9", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
|
||||
novolumes,
|
||||
newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", api.ClaimPending),
|
||||
newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", v1.ClaimPending),
|
||||
newClaimArray("claim8-9", "uid8-9-x", "10Gi", "", v1.ClaimPending),
|
||||
noevents, noerrors,
|
||||
// Inject deleter into the controller and call syncVolume. The
|
||||
// deleter simulates one delete() call that succeeds.
|
||||
@@ -136,8 +136,8 @@ func TestDeleteSync(t *testing.T) {
|
||||
{
|
||||
// PV requires external deleter
|
||||
"8-10 - external deleter",
|
||||
newVolumeArray("volume8-10", "1Gi", "uid10-1", "claim10-1", api.VolumeBound, api.PersistentVolumeReclaimDelete, annBoundByController),
|
||||
newVolumeArray("volume8-10", "1Gi", "uid10-1", "claim10-1", api.VolumeReleased, api.PersistentVolumeReclaimDelete, annBoundByController),
|
||||
newVolumeArray("volume8-10", "1Gi", "uid10-1", "claim10-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annBoundByController),
|
||||
newVolumeArray("volume8-10", "1Gi", "uid10-1", "claim10-1", v1.VolumeReleased, v1.PersistentVolumeReclaimDelete, annBoundByController),
|
||||
noclaims,
|
||||
noclaims,
|
||||
noevents, noerrors,
|
||||
@@ -152,16 +152,16 @@ func TestDeleteSync(t *testing.T) {
|
||||
// delete success - two PVs are provisioned for a single claim.
|
||||
// One of the PVs is deleted.
|
||||
"8-11 - two PVs provisioned for a single claim",
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume8-11-1", "1Gi", "uid8-11", "claim8-11", api.VolumeBound, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
newVolume("volume8-11-2", "1Gi", "uid8-11", "claim8-11", api.VolumeBound, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume8-11-1", "1Gi", "uid8-11", "claim8-11", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
newVolume("volume8-11-2", "1Gi", "uid8-11", "claim8-11", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
},
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume8-11-2", "1Gi", "uid8-11", "claim8-11", api.VolumeBound, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume8-11-2", "1Gi", "uid8-11", "claim8-11", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
},
|
||||
// the claim is bound to volume8-11-2 -> volume8-11-1 has lost the race and will be deleted
|
||||
newClaimArray("claim8-11", "uid8-11", "10Gi", "volume8-11-2", api.ClaimBound),
|
||||
newClaimArray("claim8-11", "uid8-11", "10Gi", "volume8-11-2", api.ClaimBound),
|
||||
newClaimArray("claim8-11", "uid8-11", "10Gi", "volume8-11-2", v1.ClaimBound),
|
||||
newClaimArray("claim8-11", "uid8-11", "10Gi", "volume8-11-2", v1.ClaimBound),
|
||||
noevents, noerrors,
|
||||
// Inject deleter into the controller and call syncVolume. The
|
||||
// deleter simulates one delete() call that succeeds.
|
||||
@@ -172,17 +172,17 @@ func TestDeleteSync(t *testing.T) {
|
||||
// claim. One of the PVs is marked as Released to be deleted by the
|
||||
// external provisioner.
|
||||
"8-12 - two PVs externally provisioned for a single claim",
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume8-12-1", "1Gi", "uid8-12", "claim8-12", api.VolumeBound, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
newVolume("volume8-12-2", "1Gi", "uid8-12", "claim8-12", api.VolumeBound, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume8-12-1", "1Gi", "uid8-12", "claim8-12", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
newVolume("volume8-12-2", "1Gi", "uid8-12", "claim8-12", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
},
|
||||
[]*api.PersistentVolume{
|
||||
newVolume("volume8-12-1", "1Gi", "uid8-12", "claim8-12", api.VolumeReleased, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
newVolume("volume8-12-2", "1Gi", "uid8-12", "claim8-12", api.VolumeBound, api.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
[]*v1.PersistentVolume{
|
||||
newVolume("volume8-12-1", "1Gi", "uid8-12", "claim8-12", v1.VolumeReleased, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
newVolume("volume8-12-2", "1Gi", "uid8-12", "claim8-12", v1.VolumeBound, v1.PersistentVolumeReclaimDelete, annDynamicallyProvisioned),
|
||||
},
|
||||
// the claim is bound to volume8-12-2 -> volume8-12-1 has lost the race and will be "Released"
|
||||
newClaimArray("claim8-12", "uid8-12", "10Gi", "volume8-12-2", api.ClaimBound),
|
||||
newClaimArray("claim8-12", "uid8-12", "10Gi", "volume8-12-2", api.ClaimBound),
|
||||
newClaimArray("claim8-12", "uid8-12", "10Gi", "volume8-12-2", v1.ClaimBound),
|
||||
newClaimArray("claim8-12", "uid8-12", "10Gi", "volume8-12-2", v1.ClaimBound),
|
||||
noevents, noerrors,
|
||||
func(ctrl *PersistentVolumeController, reactor *volumeReactor, test controllerTest) error {
|
||||
// Inject external deleter annotation
|
||||
@@ -215,7 +215,7 @@ func TestDeleteMultiSync(t *testing.T) {
|
||||
// delete failure - delete returns error. The controller should
|
||||
// try again.
|
||||
"9-1 - delete returns error",
|
||||
newVolumeArray("volume9-1", "1Gi", "uid9-1", "claim9-1", api.VolumeBound, api.PersistentVolumeReclaimDelete),
|
||||
newVolumeArray("volume9-1", "1Gi", "uid9-1", "claim9-1", v1.VolumeBound, v1.PersistentVolumeReclaimDelete),
|
||||
novolumes,
|
||||
noclaims,
|
||||
noclaims,
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user