Fix DaemonSet controller cache mutation

Add dsStoreSynced so we also wait on this cache when starting the
DaemonSetController.

Switch to using a fake clientset in the unit tests.

Fix TestNumberReadyStatus so it doesn't expect the cache to be mutated.
This commit is contained in:
Andy Goldstein
2016-12-19 16:39:23 -05:00
parent def8022729
commit febc641cee
3 changed files with 70 additions and 47 deletions

View File

@@ -23,6 +23,7 @@ import (
"sync"
"time"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/v1"
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
metav1 "k8s.io/kubernetes/pkg/apis/meta/v1"
@@ -78,6 +79,9 @@ type DaemonSetsController struct {
podStore *cache.StoreToPodLister
// A store of nodes
nodeStore *cache.StoreToNodeLister
// dsStoreSynced returns true if the daemonset store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
dsStoreSynced cache.InformerSynced
// podStoreSynced returns true if the pod store has been synced at least once.
// Added as a member to the struct to allow injection for testing.
podStoreSynced cache.InformerSynced
@@ -142,6 +146,7 @@ func NewDaemonSetsController(daemonSetInformer informers.DaemonSetInformer, podI
DeleteFunc: dsc.deleteDaemonset,
})
dsc.dsStore = daemonSetInformer.Lister()
dsc.dsStoreSynced = daemonSetInformer.Informer().HasSynced
// Watch for creation/deletion of pods. The reason we watch is that we don't want a daemon set to create/delete
// more pods until all the effects (expectations) of a daemon set's create/delete have been observed.
@@ -191,7 +196,7 @@ func (dsc *DaemonSetsController) Run(workers int, stopCh <-chan struct{}) {
glog.Infof("Starting Daemon Sets controller manager")
if !cache.WaitForCacheSync(stopCh, dsc.podStoreSynced, dsc.nodeStoreSynced) {
if !cache.WaitForCacheSync(stopCh, dsc.podStoreSynced, dsc.nodeStoreSynced, dsc.dsStoreSynced) {
return
}
@@ -539,19 +544,26 @@ func storeDaemonSetStatus(dsClient unversionedextensions.DaemonSetInterface, ds
return nil
}
clone, err := api.Scheme.DeepCopy(ds)
if err != nil {
return err
}
toUpdate := clone.(*extensions.DaemonSet)
var updateErr, getErr error
for i := 0; i < StatusUpdateRetries; i++ {
ds.Status.DesiredNumberScheduled = int32(desiredNumberScheduled)
ds.Status.CurrentNumberScheduled = int32(currentNumberScheduled)
ds.Status.NumberMisscheduled = int32(numberMisscheduled)
ds.Status.NumberReady = int32(numberReady)
toUpdate.Status.DesiredNumberScheduled = int32(desiredNumberScheduled)
toUpdate.Status.CurrentNumberScheduled = int32(currentNumberScheduled)
toUpdate.Status.NumberMisscheduled = int32(numberMisscheduled)
toUpdate.Status.NumberReady = int32(numberReady)
if _, updateErr = dsClient.UpdateStatus(ds); updateErr == nil {
if _, updateErr = dsClient.UpdateStatus(toUpdate); updateErr == nil {
return nil
}
// Update the set with the latest resource version for the next poll
if ds, getErr = dsClient.Get(ds.Name, metav1.GetOptions{}); getErr != nil {
if toUpdate, getErr = dsClient.Get(ds.Name, metav1.GetOptions{}); getErr != nil {
// If the GET fails we can't trust status.Replicas anymore. This error
// is bound to be more interesting than the update failure.
return getErr