switch controller manager to generated clientset

This commit is contained in:
deads2k
2016-09-20 09:43:11 -04:00
parent adda69c7ca
commit b83a317003
11 changed files with 125 additions and 124 deletions

View File

@@ -25,8 +25,10 @@ import (
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/apis/policy"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
policyclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/policy/unversioned"
"k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
@@ -43,7 +45,7 @@ const statusUpdateRetries = 2
type updater func(*policy.PodDisruptionBudget) error
type DisruptionController struct {
kubeClient *client.Client
kubeClient internalclientset.Interface
pdbStore cache.Store
pdbController *cache.Controller
@@ -83,7 +85,7 @@ type controllerAndScale struct {
// controllers and their scale.
type podControllerFinder func(*api.Pod) ([]controllerAndScale, error)
func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient *client.Client) *DisruptionController {
func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient internalclientset.Interface) *DisruptionController {
dc := &DisruptionController{
kubeClient: kubeClient,
podController: podInformer.GetController(),
@@ -124,10 +126,10 @@ func NewDisruptionController(podInformer cache.SharedIndexInformer, kubeClient *
dc.rcIndexer, dc.rcController = cache.NewIndexerInformer(
&cache.ListWatch{
ListFunc: func(options api.ListOptions) (runtime.Object, error) {
return dc.kubeClient.ReplicationControllers(api.NamespaceAll).List(options)
return dc.kubeClient.Core().ReplicationControllers(api.NamespaceAll).List(options)
},
WatchFunc: func(options api.ListOptions) (watch.Interface, error) {
return dc.kubeClient.ReplicationControllers(api.NamespaceAll).Watch(options)
return dc.kubeClient.Core().ReplicationControllers(api.NamespaceAll).Watch(options)
},
},
&api.ReplicationController{},
@@ -256,7 +258,7 @@ func (dc *DisruptionController) Run(stopCh <-chan struct{}) {
glog.V(0).Infof("Starting disruption controller")
if dc.kubeClient != nil {
glog.V(0).Infof("Sending events to api server.")
dc.broadcaster.StartRecordingToSink(dc.kubeClient.Events(""))
dc.broadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: dc.kubeClient.Core().Events("")})
} else {
glog.V(0).Infof("No api server defined - no events will be sent to API server.")
}
@@ -589,7 +591,7 @@ func (dc *DisruptionController) updatePdbSpec(pdb *policy.PodDisruptionBudget, c
// refresh tries to re-GET the given PDB. If there are any errors, it just
// returns the old PDB. Intended to be used in a retry loop where it runs a
// bounded number of times.
func refresh(pdbClient client.PodDisruptionBudgetInterface, pdb *policy.PodDisruptionBudget) *policy.PodDisruptionBudget {
func refresh(pdbClient policyclientset.PodDisruptionBudgetInterface, pdb *policy.PodDisruptionBudget) *policy.PodDisruptionBudget {
newPdb, err := pdbClient.Get(pdb.Name)
if err == nil {
return newPdb

View File

@@ -70,7 +70,7 @@ var (
)
// NewEndpointController returns a new *EndpointController.
func NewEndpointController(podInformer cache.SharedIndexInformer, client *clientset.Clientset) *EndpointController {
func NewEndpointController(podInformer cache.SharedIndexInformer, client clientset.Interface) *EndpointController {
if client != nil && client.Core().GetRESTClient().GetRateLimiter() != nil {
metrics.RegisterMetricAndTrackRateLimiterUsage("endpoint_controller", client.Core().GetRESTClient().GetRateLimiter())
}
@@ -123,7 +123,7 @@ func NewEndpointControllerFromClient(client *clientset.Clientset, resyncPeriod c
// EndpointController manages selector-based service endpoints.
type EndpointController struct {
client *clientset.Clientset
client clientset.Interface
serviceStore cache.StoreToServiceLister
podStore cache.StoreToPodLister
@@ -347,7 +347,7 @@ func (e *EndpointController) syncService(key string) error {
// Don't retry, as the key isn't going to magically become understandable.
return nil
}
err = e.client.Endpoints(namespace).Delete(name, nil)
err = e.client.Core().Endpoints(namespace).Delete(name, nil)
if err != nil && !errors.IsNotFound(err) {
return err
}
@@ -450,7 +450,7 @@ func (e *EndpointController) syncService(key string) error {
subsets = endpoints.RepackSubsets(subsets)
// See if there's actually an update here.
currentEndpoints, err := e.client.Endpoints(service.Namespace).Get(service.Name)
currentEndpoints, err := e.client.Core().Endpoints(service.Namespace).Get(service.Name)
if err != nil {
if errors.IsNotFound(err) {
currentEndpoints = &api.Endpoints{
@@ -496,10 +496,10 @@ func (e *EndpointController) syncService(key string) error {
createEndpoints := len(currentEndpoints.ResourceVersion) == 0
if createEndpoints {
// No previous endpoints, create them
_, err = e.client.Endpoints(service.Namespace).Create(newEndpoints)
_, err = e.client.Core().Endpoints(service.Namespace).Create(newEndpoints)
} else {
// Pre-existing
_, err = e.client.Endpoints(service.Namespace).Update(newEndpoints)
_, err = e.client.Core().Endpoints(service.Namespace).Update(newEndpoints)
}
if err != nil {
if createEndpoints && errors.IsForbidden(err) {
@@ -521,7 +521,7 @@ func (e *EndpointController) syncService(key string) error {
// some stragglers could have been left behind if the endpoint controller
// reboots).
func (e *EndpointController) checkLeftoverEndpoints() {
list, err := e.client.Endpoints(api.NamespaceAll).List(api.ListOptions{})
list, err := e.client.Core().Endpoints(api.NamespaceAll).List(api.ListOptions{})
if err != nil {
utilruntime.HandleError(fmt.Errorf("Unable to list endpoints (%v); orphaned endpoints will not be cleaned up. (They're pretty harmless, but you can restart this component if you want another attempt made.)", err))
return

View File

@@ -23,8 +23,8 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
"k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/runtime"
"github.com/golang/glog"
@@ -159,7 +159,7 @@ type petClient interface {
// apiServerPetClient is a petset aware Kubernetes client.
type apiServerPetClient struct {
c *client.Client
c internalclientset.Interface
recorder record.EventRecorder
petHealthChecker
}
@@ -167,7 +167,7 @@ type apiServerPetClient struct {
// Get gets the pet in the pcb from the apiserver.
func (p *apiServerPetClient) Get(pet *pcb) (*pcb, bool, error) {
ns := pet.parent.Namespace
pod, err := podClient(p.c, ns).Get(pet.pod.Name)
pod, err := p.c.Core().Pods(ns).Get(pet.pod.Name)
if errors.IsNotFound(err) {
return nil, false, nil
}
@@ -181,7 +181,7 @@ func (p *apiServerPetClient) Get(pet *pcb) (*pcb, bool, error) {
// Delete deletes the pet in the pcb from the apiserver.
func (p *apiServerPetClient) Delete(pet *pcb) error {
err := podClient(p.c, pet.parent.Namespace).Delete(pet.pod.Name, nil)
err := p.c.Core().Pods(pet.parent.Namespace).Delete(pet.pod.Name, nil)
if errors.IsNotFound(err) {
err = nil
}
@@ -191,7 +191,7 @@ func (p *apiServerPetClient) Delete(pet *pcb) error {
// Create creates the pet in the pcb.
func (p *apiServerPetClient) Create(pet *pcb) error {
_, err := podClient(p.c, pet.parent.Namespace).Create(pet.pod)
_, err := p.c.Core().Pods(pet.parent.Namespace).Create(pet.pod)
p.event(pet.parent, "Create", fmt.Sprintf("pet: %v", pet.pod.Name), err)
return err
}
@@ -200,7 +200,7 @@ func (p *apiServerPetClient) Create(pet *pcb) error {
// If the pod object of a pet which to be updated has been changed in server side, we
// will get the actual value and set pet identity before retries.
func (p *apiServerPetClient) Update(pet *pcb, expectedPet *pcb) (updateErr error) {
pc := podClient(p.c, pet.parent.Namespace)
pc := p.c.Core().Pods(pet.parent.Namespace)
for i := 0; ; i++ {
updatePod, needsUpdate, err := copyPetID(pet, expectedPet)
@@ -227,12 +227,12 @@ func (p *apiServerPetClient) DeletePVCs(pet *pcb) error {
}
func (p *apiServerPetClient) getPVC(pvcName, pvcNamespace string) (*api.PersistentVolumeClaim, error) {
pvc, err := claimClient(p.c, pvcNamespace).Get(pvcName)
pvc, err := p.c.Core().PersistentVolumeClaims(pvcNamespace).Get(pvcName)
return pvc, err
}
func (p *apiServerPetClient) createPVC(pvc *api.PersistentVolumeClaim) error {
_, err := claimClient(p.c, pvc.Namespace).Create(pvc)
_, err := p.c.Core().PersistentVolumeClaims(pvc.Namespace).Create(pvc)
return err
}

View File

@@ -26,8 +26,10 @@ import (
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/client/cache"
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/unversioned"
"k8s.io/kubernetes/pkg/client/record"
client "k8s.io/kubernetes/pkg/client/unversioned"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/errors"
@@ -50,7 +52,7 @@ const (
// PetSetController controls petsets.
type PetSetController struct {
kubeClient *client.Client
kubeClient internalclientset.Interface
// newSyncer returns an interface capable of syncing a single pet.
// Abstracted out for testing.
@@ -81,10 +83,10 @@ type PetSetController struct {
}
// NewPetSetController creates a new petset controller.
func NewPetSetController(podInformer cache.SharedIndexInformer, kubeClient *client.Client, resyncPeriod time.Duration) *PetSetController {
func NewPetSetController(podInformer cache.SharedIndexInformer, kubeClient internalclientset.Interface, resyncPeriod time.Duration) *PetSetController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(kubeClient.Events(""))
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: kubeClient.Core().Events("")})
recorder := eventBroadcaster.NewRecorder(api.EventSource{Component: "petset"})
pc := &apiServerPetClient{kubeClient, recorder, &defaultPetHealthChecker{}}
@@ -309,7 +311,7 @@ func (psc *PetSetController) Sync(key string) error {
}
numPets, syncErr := psc.syncPetSet(&ps, petList)
if updateErr := updatePetCount(psc.kubeClient, ps, numPets); updateErr != nil {
if updateErr := updatePetCount(psc.kubeClient.Apps(), ps, numPets); updateErr != nil {
glog.Infof("Failed to update replica count for petset %v/%v; requeuing; error: %v", ps.Namespace, ps.Name, updateErr)
return errors.NewAggregate([]error{syncErr, updateErr})
}

View File

@@ -23,7 +23,7 @@ import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/apps"
"k8s.io/kubernetes/pkg/client/cache"
client "k8s.io/kubernetes/pkg/client/unversioned"
appsclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/apps/unversioned"
"k8s.io/kubernetes/pkg/controller"
"github.com/golang/glog"
@@ -44,37 +44,26 @@ func (o overlappingPetSets) Less(i, j int) bool {
}
// updatePetCount attempts to update the Status.Replicas of the given PetSet, with a single GET/PUT retry.
func updatePetCount(kubeClient *client.Client, ps apps.PetSet, numPets int) (updateErr error) {
if ps.Status.Replicas == numPets || kubeClient == nil {
func updatePetCount(psClient appsclientset.PetSetsGetter, ps apps.PetSet, numPets int) (updateErr error) {
if ps.Status.Replicas == numPets || psClient == nil {
return nil
}
psClient := kubeClient.Apps().PetSets(ps.Namespace)
var getErr error
for i, ps := 0, &ps; ; i++ {
glog.V(4).Infof(fmt.Sprintf("Updating replica count for PetSet: %s/%s, ", ps.Namespace, ps.Name) +
fmt.Sprintf("replicas %d->%d (need %d), ", ps.Status.Replicas, numPets, ps.Spec.Replicas))
ps.Status = apps.PetSetStatus{Replicas: numPets}
_, updateErr = psClient.UpdateStatus(ps)
_, updateErr = psClient.PetSets(ps.Namespace).UpdateStatus(ps)
if updateErr == nil || i >= statusUpdateRetries {
return updateErr
}
if ps, getErr = psClient.Get(ps.Name); getErr != nil {
if ps, getErr = psClient.PetSets(ps.Namespace).Get(ps.Name); getErr != nil {
return getErr
}
}
}
// claimClient returns the pvcClient for the given kubeClient/ns.
func claimClient(kubeClient *client.Client, ns string) client.PersistentVolumeClaimInterface {
return kubeClient.PersistentVolumeClaims(ns)
}
// podClient returns the given podClient for the given kubeClient/ns.
func podClient(kubeClient *client.Client, ns string) client.PodInterface {
return kubeClient.Pods(ns)
}
// unhealthyPetTracker tracks unhealthy pets for petsets.
type unhealthyPetTracker struct {
pc petClient