replacing handwritten client in several controllers
This commit is contained in:
@@ -27,8 +27,10 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/validation"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_1"
|
||||
"k8s.io/kubernetes/pkg/client/record"
|
||||
client "k8s.io/kubernetes/pkg/client/unversioned"
|
||||
unversioned_extensions "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned"
|
||||
unversioned_legacy "k8s.io/kubernetes/pkg/client/typed/generated/legacy/unversioned"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/framework"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
@@ -60,7 +62,7 @@ const (
|
||||
// DaemonSetsController is responsible for synchronizing DaemonSet objects stored
|
||||
// in the system with actual running pods.
|
||||
type DaemonSetsController struct {
|
||||
kubeClient client.Interface
|
||||
kubeClient clientset.Interface
|
||||
podControl controller.PodControlInterface
|
||||
|
||||
// An dsc is temporarily suspended after creating/deleting these many replicas.
|
||||
@@ -91,10 +93,11 @@ type DaemonSetsController struct {
|
||||
queue *workqueue.Type
|
||||
}
|
||||
|
||||
func NewDaemonSetsController(kubeClient client.Interface, resyncPeriod controller.ResyncPeriodFunc) *DaemonSetsController {
|
||||
func NewDaemonSetsController(kubeClient clientset.Interface, resyncPeriod controller.ResyncPeriodFunc) *DaemonSetsController {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
|
||||
// TODO: remove the wrapper when every clients have moved to use the clientset.
|
||||
eventBroadcaster.StartRecordingToSink(&unversioned_legacy.EventSinkImpl{kubeClient.Legacy().Events("")})
|
||||
|
||||
dsc := &DaemonSetsController{
|
||||
kubeClient: kubeClient,
|
||||
@@ -142,10 +145,10 @@ func NewDaemonSetsController(kubeClient client.Interface, resyncPeriod controlle
|
||||
dsc.podStore.Store, dsc.podController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dsc.kubeClient.Pods(api.NamespaceAll).List(options)
|
||||
return dsc.kubeClient.Legacy().Pods(api.NamespaceAll).List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return dsc.kubeClient.Pods(api.NamespaceAll).Watch(options)
|
||||
return dsc.kubeClient.Legacy().Pods(api.NamespaceAll).Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Pod{},
|
||||
@@ -160,10 +163,10 @@ func NewDaemonSetsController(kubeClient client.Interface, resyncPeriod controlle
|
||||
dsc.nodeStore.Store, dsc.nodeController = framework.NewInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
|
||||
return dsc.kubeClient.Nodes().List(options)
|
||||
return dsc.kubeClient.Legacy().Nodes().List(options)
|
||||
},
|
||||
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
|
||||
return dsc.kubeClient.Nodes().Watch(options)
|
||||
return dsc.kubeClient.Legacy().Nodes().Watch(options)
|
||||
},
|
||||
},
|
||||
&api.Node{},
|
||||
@@ -463,7 +466,7 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) {
|
||||
deleteWait.Wait()
|
||||
}
|
||||
|
||||
func storeDaemonSetStatus(dsClient client.DaemonSetInterface, ds *extensions.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled int) error {
|
||||
func storeDaemonSetStatus(dsClient unversioned_extensions.DaemonSetInterface, ds *extensions.DaemonSet, desiredNumberScheduled, currentNumberScheduled, numberMisscheduled int) error {
|
||||
if ds.Status.DesiredNumberScheduled == desiredNumberScheduled && ds.Status.CurrentNumberScheduled == currentNumberScheduled && ds.Status.NumberMisscheduled == numberMisscheduled {
|
||||
return nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user