Fix event broadcaster shutdown in multiple controllers
This commit is contained in:
@@ -84,10 +84,13 @@ var controllerKind = apps.SchemeGroupVersion.WithKind("DaemonSet")
|
||||
// DaemonSetsController is responsible for synchronizing DaemonSet objects stored
|
||||
// in the system with actual running pods.
|
||||
type DaemonSetsController struct {
|
||||
kubeClient clientset.Interface
|
||||
eventRecorder record.EventRecorder
|
||||
podControl controller.PodControlInterface
|
||||
crControl controller.ControllerRevisionControlInterface
|
||||
kubeClient clientset.Interface
|
||||
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
eventRecorder record.EventRecorder
|
||||
|
||||
podControl controller.PodControlInterface
|
||||
crControl controller.ControllerRevisionControlInterface
|
||||
|
||||
// An dsc is temporarily suspended after creating/deleting these many replicas.
|
||||
// It resumes normal action after observing the watch events for them.
|
||||
@@ -138,8 +141,6 @@ func NewDaemonSetsController(
|
||||
failedPodsBackoff *flowcontrol.Backoff,
|
||||
) (*DaemonSetsController, error) {
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartStructuredLogging(0)
|
||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: kubeClient.CoreV1().Events("")})
|
||||
|
||||
if kubeClient != nil && kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
if err := ratelimiter.RegisterMetricAndTrackRateLimiterUsage("daemon_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter()); err != nil {
|
||||
@@ -147,8 +148,9 @@ func NewDaemonSetsController(
|
||||
}
|
||||
}
|
||||
dsc := &DaemonSetsController{
|
||||
kubeClient: kubeClient,
|
||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "daemonset-controller"}),
|
||||
kubeClient: kubeClient,
|
||||
eventBroadcaster: eventBroadcaster,
|
||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "daemonset-controller"}),
|
||||
podControl: controller.RealPodControl{
|
||||
KubeClient: kubeClient,
|
||||
Recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "daemonset-controller"}),
|
||||
@@ -279,6 +281,11 @@ func (dsc *DaemonSetsController) deleteDaemonset(obj interface{}) {
|
||||
// Run begins watching and syncing daemon sets.
|
||||
func (dsc *DaemonSetsController) Run(ctx context.Context, workers int) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
dsc.eventBroadcaster.StartStructuredLogging(0)
|
||||
dsc.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: dsc.kubeClient.CoreV1().Events("")})
|
||||
defer dsc.eventBroadcaster.Shutdown()
|
||||
|
||||
defer dsc.queue.ShutDown()
|
||||
|
||||
klog.Infof("Starting daemon sets controller")
|
||||
|
Reference in New Issue
Block a user