Requeue service after endpoint deletion
- ensure endpoints that have been deleted and are desired are recreated despite a possibly out of date endpoint cache
This commit is contained in:
@@ -110,6 +110,9 @@ func NewEndpointController(podInformer coreinformers.PodInformer, serviceInforme
|
||||
e.podLister = podInformer.Lister()
|
||||
e.podsSynced = podInformer.Informer().HasSynced
|
||||
|
||||
endpointsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: e.onEndpointsDelete,
|
||||
})
|
||||
e.endpointsLister = endpointsInformer.Lister()
|
||||
e.endpointsSynced = endpointsInformer.Informer().HasSynced
|
||||
|
||||
@@ -287,6 +290,15 @@ func (e *EndpointController) onServiceDelete(obj interface{}) {
|
||||
e.queue.Add(key)
|
||||
}
|
||||
|
||||
func (e *EndpointController) onEndpointsDelete(obj interface{}) {
|
||||
key, err := controller.KeyFunc(obj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Couldn't get key for object %+v: %v", obj, err))
|
||||
return
|
||||
}
|
||||
e.queue.Add(key)
|
||||
}
|
||||
|
||||
// worker runs a worker thread that just dequeues items, processes them, and
|
||||
// marks them done. You may run as many of these in parallel as you wish; the
|
||||
// workqueue guarantees that they will not end up processing the same service
|
||||
|
Reference in New Issue
Block a user