switch controller manager to generated clientset

This commit is contained in:
deads2k
2016-09-20 09:43:11 -04:00
parent adda69c7ca
commit b83a317003
11 changed files with 125 additions and 124 deletions

View File

@@ -25,8 +25,10 @@ import (
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/policy"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
policyclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/unversioned"
"k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
@@ -43,7 +45,7 @@ const statusUpdateRetries = 2
type updater func(*policy.PodDisruptionBudget) error
type DisruptionController struct {
kubeClient *client.Client
kubeClient internalclientset.Interface
pdbStore cache.Store
pdbController *cache.Controller
@@ -83,7 +85,7 @@ type controllerAndScale struct {
// controllers and their scale.
type podControllerFinder func(*api.Pod) ([]controllerAndScale, error)
func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient *client.Client) *DisruptionController {
func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient internalclientset.Interface) *DisruptionController {
dc := &DisruptionController{
kubeClient: kubeClient,
podController: podInformer.GetController(),
@@ -124,10 +126,10 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient *
dc.rcIndexer, dc.rcController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.kubeClient.ReplicationControllers(api.NamespaceAll).List(options)
return dc.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return dc.kubeClient.ReplicationControllers(api.NamespaceAll).Watch(options)
return dc.kubeClient.Core().ReplicationControllers(api.NamespaceAll).Watch(options)
},
},
&api.ReplicationController{},
@@ -256,7 +258,7 @@ func (dc *DisruptionController) Run(stopCh <-chan struct{}) {
glog.V(0).Infof("Starting disruption controller")
if dc.kubeClient != nil {
glog.V(0).Infof("Sending events to api server.")
dc.broadcaster.StartRecordingToSink(dc.kubeClient.Events(""))
dc.broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: dc.kubeClient.Core().Events("")})
} else {
glog.V(0).Infof("No api server defined - no events will be sent to API server.")
}
@@ -589,7 +591,7 @@ func (dc *DisruptionController) updatePdbSpec(pdb *policy.PodDisruptionBudget, c
// refresh tries to re-GET the given PDB. If there are any errors, it just
// returns the old PDB. Intended to be used in a retry loop where it runs a
// bounded number of times.
func refresh(pdbClient client.PodDisruptionBudgetInterface, pdb *policy.PodDisruptionBudget) *policy.PodDisruptionBudget {
func refresh(pdbClient policyclientset.PodDisruptionBudgetInterface, pdb *policy.PodDisruptionBudget) *policy.PodDisruptionBudget {
newPdb, err := pdbClient.Get(pdb.Name)
if err == nil {
return newPdb