Merge pull request #40385 from ncdc/shared-informers-02-swap-existing

Automatic merge from submit-queue

Replace hand-written informers with generated ones

Replace existing uses of hand-written informers with generated ones.
Follow-up commits will switch the use of one-off informers to shared
informers.

This is a precursor to #40097. That PR will switch one-off informers to shared informers for the majority of the code base (but not quite all of it...).

NOTE: this does create a second set of shared informers in the kube-controller-manager. This will be resolved back down to a single factory once #40097 is reviewed and merged.

There are a couple of places where I expanded the # of caches we wait for in the calls to `WaitForCacheSync` - please pay attention to those. I also added in a commented-out wait in the attach/detach controller. If @kubernetes/sig-storage-pr-reviews is ok with enabling the waiting, I'll do it (I'll just need to tweak an integration test slightly).

@deads2k @sttts @smarterclayton @liggitt @soltysh @timothysc @lavalamp @wojtek-t @gmarek @sjenning @derekwaynecarr @kubernetes/sig-scalability-pr-reviews
This commit is contained in:
Kubernetes Submit Queue
2017-02-06 16:25:42 -08:00
committed by GitHub
55 changed files with 936 additions and 823 deletions

View File

@@ -23,6 +23,7 @@ import (
"sync"
"time"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
@@ -38,9 +39,11 @@ import (
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
unversionedextensions "k8s.io/kubernetes/pkg/client/clientset_generated/clientset/typed/extensions/v1beta1"
"k8s.io/kubernetes/pkg/client/legacylisters"
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/core/v1"
extensionsinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/extensions/v1beta1"
corelisters "k8s.io/kubernetes/pkg/client/listers/core/v1"
extensionslisters "k8s.io/kubernetes/pkg/client/listers/extensions/v1beta1"
"k8s.io/kubernetes/pkg/controller"
"k8s.io/kubernetes/pkg/controller/informers"
"k8s.io/kubernetes/pkg/util/metrics"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
@@ -82,18 +85,18 @@ type DaemonSetsController struct {
syncHandler func(dsKey string) error
// A TTLCache of pod creates/deletes each ds expects to see
expectations controller.ControllerExpectationsInterface
// A store of daemon sets
dsStore *listers.StoreToDaemonSetLister
// A store of pods
podStore *listers.StoreToPodLister
// A store of nodes
nodeStore *listers.StoreToNodeLister
// dsLister can list/get daemonsets from the shared informer's store
dsLister extensionslisters.DaemonSetLister
// dsStoreSynced returns true if the daemonset store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
dsStoreSynced cache.InformerSynced
// podLister get list/get pods from the shared informers's store
podLister corelisters.PodLister
// podStoreSynced returns true if the pod store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
podStoreSynced cache.InformerSynced
// nodeLister can list/get nodes from the shared informer's store
nodeLister corelisters.NodeLister
// nodeStoreSynced returns true if the node store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
nodeStoreSynced cache.InformerSynced
@@ -104,7 +107,7 @@ type DaemonSetsController struct {
queue workqueue.RateLimitingInterface
}
func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podInformer informers.PodInformer, nodeInformer informers.NodeInformer, kubeClient clientset.Interface, lookupCacheSize int) *DaemonSetsController {
func NewDaemonSetsController(daemonSetInformer extensionsinformers.DaemonSetInformer, podInformer coreinformers.PodInformer, nodeInformer coreinformers.NodeInformer, kubeClient clientset.Interface, lookupCacheSize int) *DaemonSetsController {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
// TODO: remove the wrapper when every clients have moved to use the clientset.
@@ -154,7 +157,7 @@ func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podI
},
DeleteFunc: dsc.deleteDaemonset,
})
dsc.dsStore = daemonSetInformer.Lister()
dsc.dsLister = daemonSetInformer.Lister()
dsc.dsStoreSynced = daemonSetInformer.Informer().HasSynced
// Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete
@@ -164,7 +167,7 @@ func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podI
UpdateFunc: dsc.updatePod,
DeleteFunc: dsc.deletePod,
})
dsc.podStore = podInformer.Lister()
dsc.podLister = podInformer.Lister()
dsc.podStoreSynced = podInformer.Informer().HasSynced
nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
@@ -173,7 +176,7 @@ func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podI
},
)
dsc.nodeStoreSynced = nodeInformer.Informer().HasSynced
dsc.nodeStore = nodeInformer.Lister()
dsc.nodeLister = nodeInformer.Lister()
dsc.syncHandler = dsc.syncDaemonSet
dsc.lookupCache = controller.NewMatchingCache(lookupCacheSize)
@@ -206,6 +209,7 @@ func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) {
glog.Infof("Starting Daemon Sets controller manager")
if !cache.WaitForCacheSync(stopCh, dsc.podStoreSynced, dsc.nodeStoreSynced, dsc.dsStoreSynced) {
utilruntime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
return
}
@@ -266,7 +270,7 @@ func (dsc *DaemonSetsController) getPodDaemonSet(pod *v1.Pod) *extensions.Daemon
return ds
}
}
sets, err := dsc.dsStore.GetPodDaemonSets(pod)
sets, err := dsc.dsLister.GetPodDaemonSets(pod)
if err != nil {
glog.V(4).Infof("No daemon sets found for pod %v, daemon set controller will avoid syncing", pod.Name)
return nil
@@ -280,16 +284,16 @@ func (dsc *DaemonSetsController) getPodDaemonSet(pod *v1.Pod) *extensions.Daemon
}
// update lookup cache
dsc.lookupCache.Update(pod, &sets[0])
dsc.lookupCache.Update(pod, sets[0])
return &sets[0]
return sets[0]
}
// isCacheValid check if the cache is valid
func (dsc *DaemonSetsController) isCacheValid(pod *v1.Pod, cachedDS *extensions.DaemonSet) bool {
_, exists, err := dsc.dsStore.Get(cachedDS)
_, err := dsc.dsLister.DaemonSets(cachedDS.Namespace).Get(cachedDS.Name)
// ds has been deleted or updated, cache is invalid
if err != nil || !exists || !isDaemonSetMatch(pod, cachedDS) {
if err != nil || !isDaemonSetMatch(pod, cachedDS) {
return false
}
return true
@@ -388,14 +392,14 @@ func (dsc *DaemonSetsController) deletePod(obj interface{}) {
func (dsc *DaemonSetsController) addNode(obj interface{}) {
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
dsList, err := dsc.dsStore.List()
dsList, err := dsc.dsLister.List(labels.Everything())
if err != nil {
glog.V(4).Infof("Error enqueueing daemon sets: %v", err)
return
}
node := obj.(*v1.Node)
for i := range dsList.Items {
ds := &dsList.Items[i]
for i := range dsList {
ds := dsList[i]
_, shouldSchedule, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
if err != nil {
continue
@@ -413,14 +417,14 @@ func (dsc *DaemonSetsController) updateNode(old, cur interface{}) {
// If node labels didn't change, we can ignore this update.
return
}
dsList, err := dsc.dsStore.List()
dsList, err := dsc.dsLister.List(labels.Everything())
if err != nil {
glog.V(4).Infof("Error enqueueing daemon sets: %v", err)
return
}
// TODO: it'd be nice to pass a hint with these enqueues, so that each ds would only examine the added node (unless it has other work to do, too).
for i := range dsList.Items {
ds := &dsList.Items[i]
for i := range dsList {
ds := dsList[i]
_, oldShouldSchedule, oldShouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(oldNode, ds)
if err != nil {
continue
@@ -442,7 +446,7 @@ func (dsc *DaemonSetsController) getNodesToDaemonPods(ds *extensions.DaemonSet)
if err != nil {
return nil, err
}
daemonPods, err := dsc.podStore.Pods(ds.Namespace).List(selector)
daemonPods, err := dsc.podLister.Pods(ds.Namespace).List(selector)
if err != nil {
return nodeToDaemonPods, err
}
@@ -464,14 +468,15 @@ func (dsc *DaemonSetsController) manage(ds *extensions.DaemonSet) error {
// For each node, if the node is running the daemon pod but isn't supposed to, kill the daemon
// pod. If the node is supposed to run the daemon pod, but isn't, create the daemon pod on the node.
nodeList, err := dsc.nodeStore.List()
nodeList, err := dsc.nodeLister.List(labels.Everything())
if err != nil {
return fmt.Errorf("couldn't get list of nodes when syncing daemon set %#v: %v", ds, err)
}
var nodesNeedingDaemonPods, podsToDelete []string
var failedPodsObserved int
for _, node := range nodeList.Items {
_, shouldSchedule, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(&node, ds)
for i := range nodeList {
node := nodeList[i]
_, shouldSchedule, shouldContinueRunning, err := dsc.nodeShouldRunDaemonPod(node, ds)
if err != nil {
continue
}
@@ -630,14 +635,15 @@ func (dsc *DaemonSetsController) updateDaemonSetStatus(ds *extensions.DaemonSet)
return fmt.Errorf("error getting node to daemon pod mapping for daemon set %#v: %v", ds, err)
}
nodeList, err := dsc.nodeStore.List()
nodeList, err := dsc.nodeLister.List(labels.Everything())
if err != nil {
return fmt.Errorf("couldn't get list of nodes when updating daemon set %#v: %v", ds, err)
}
var desiredNumberScheduled, currentNumberScheduled, numberMisscheduled, numberReady int
for _, node := range nodeList.Items {
wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(&node, ds)
for i := range nodeList {
node := nodeList[i]
wantToRun, _, _, err := dsc.nodeShouldRunDaemonPod(node, ds)
if err != nil {
return err
}
@@ -676,16 +682,19 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
glog.V(4).Infof("Finished syncing daemon set %q (%v)", key, time.Now().Sub(startTime))
}()
obj, exists, err := dsc.dsStore.Store.GetByKey(key)
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return fmt.Errorf("unable to retrieve ds %v from store: %v", key, err)
return err
}
if !exists {
ds, err := dsc.dsLister.DaemonSets(namespace).Get(name)
if errors.IsNotFound(err) {
glog.V(3).Infof("daemon set has been deleted %v", key)
dsc.expectations.DeleteExpectations(key)
return nil
}
ds := obj.(*extensions.DaemonSet)
if err != nil {
return fmt.Errorf("unable to retrieve ds %v from store: %v", key, err)
}
everything := metav1.LabelSelector{}
if reflect.DeepEqual(ds.Spec.Selector, &everything) {
@@ -748,8 +757,12 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
pods := []*v1.Pod{}
for _, m := range dsc.podStore.Indexer.List() {
pod := m.(*v1.Pod)
podList, err := dsc.podLister.List(labels.Everything())
if err != nil {
return false, false, false, err
}
for i := range podList {
pod := podList[i]
if pod.Spec.NodeName != node.Name {
continue
}
@@ -822,7 +835,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
}
// byCreationTimestamp sorts a list by creation timestamp, using their names as a tie breaker.
type byCreationTimestamp []extensions.DaemonSet
type byCreationTimestamp []*extensions.DaemonSet
func (o byCreationTimestamp) Len() int { return len(o) }
func (o byCreationTimestamp) Swap(i, j int) { o[i], o[j] = o[j], o[i] }