replacing handwritten client in several controllers

This commit is contained in:
Chao Xu
2016-01-14 21:00:58 -08:00
parent 55f039fd53
commit c72d234bbf
64 changed files with 648 additions and 531 deletions

View File

@@ -27,8 +27,9 @@ import (
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/client/cache"
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
"k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned"
unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/framework"
"k8s.io/kubernetes/pkg/labels"
@@ -61,7 +62,7 @@ const (
// TODO: this really should be called ReplicationController. The only reason why it's a Manager
// is to distinguish this type from API object "ReplicationController". We should fix this.
type ReplicationManager struct {
kubeClient client.Interface
kubeClient clientset.Interface
podControl controller.PodControlInterface
// An rc is temporarily suspended after creating/deleting these many replicas.
@@ -90,10 +91,10 @@ type ReplicationManager struct {
}
// NewReplicationManager creates a new ReplicationManager.
func NewReplicationManager(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int) *ReplicationManager {
func NewReplicationManager(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc, burstReplicas int) *ReplicationManager {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
eventBroadcaster.StartRecordingToSink(&unversioned_legacy.EventSinkImpl{kubeClient.Legacy().Events("")})
rm := &ReplicationManager{
kubeClient: kubeClient,
@@ -109,10 +110,10 @@ func NewReplicationManager(kubeClient client.Interface, resyncPeriod controller.
rm.rcStore.Store, rm.rcController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return rm.kubeClient.ReplicationControllers(api.NamespaceAll).List(options)
return rm.kubeClient.Legacy().ReplicationControllers(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return rm.kubeClient.ReplicationControllers(api.NamespaceAll).Watch(options)
return rm.kubeClient.Legacy().ReplicationControllers(api.NamespaceAll).Watch(options)
},
},
&api.ReplicationController{},
@@ -150,10 +151,10 @@ func NewReplicationManager(kubeClient client.Interface, resyncPeriod controller.
rm.podStore.Store, rm.podController = framework.NewInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return rm.kubeClient.Pods(api.NamespaceAll).List(options)
return rm.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return rm.kubeClient.Pods(api.NamespaceAll).Watch(options)
return rm.kubeClient.Legacy().Pods(api.NamespaceAll).Watch(options)
},
},
&api.Pod{},
@@ -451,7 +452,7 @@ func (rm *ReplicationManager) syncReplicationController(key string) error {
}
// Always updates status as pods come up or die.
if err := updateReplicaCount(rm.kubeClient.ReplicationControllers(rc.Namespace), rc, len(filteredPods)); err != nil {
if err := updateReplicaCount(rm.kubeClient.Legacy().ReplicationControllers(rc.Namespace), rc, len(filteredPods)); err != nil {
// Multiple things could lead to this update failing. Requeuing the controller ensures
// we retry with some fairness.
glog.V(2).Infof("Failed to update replica count for controller %v/%v; requeuing; error: %v", rc.Namespace, rc.Name, err)