Clean shutdown of nodecontroller integration tests
This commit is contained in:
@@ -300,7 +300,8 @@ type Controller struct {
|
||||
|
||||
getPodsAssignedToNode func(nodeName string) ([]*v1.Pod, error)
|
||||
|
||||
recorder record.EventRecorder
|
||||
broadcaster record.EventBroadcaster
|
||||
recorder record.EventRecorder
|
||||
|
||||
// Value controlling Controller monitoring period, i.e. how often does Controller
|
||||
// check node health signal posted from kubelet. This value should be lower than
|
||||
@@ -372,13 +373,6 @@ func NewNodeLifecycleController(
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "node-controller"})
|
||||
eventBroadcaster.StartStructuredLogging(0)
|
||||
|
||||
klog.Infof("Sending events to api server.")
|
||||
eventBroadcaster.StartRecordingToSink(
|
||||
&v1core.EventSinkImpl{
|
||||
Interface: v1core.New(kubeClient.CoreV1().RESTClient()).Events(""),
|
||||
})
|
||||
|
||||
if kubeClient.CoreV1().RESTClient().GetRateLimiter() != nil {
|
||||
ratelimiter.RegisterMetricAndTrackRateLimiterUsage("node_lifecycle_controller", kubeClient.CoreV1().RESTClient().GetRateLimiter())
|
||||
@@ -390,6 +384,7 @@ func NewNodeLifecycleController(
|
||||
knownNodeSet: make(map[string]*v1.Node),
|
||||
nodeHealthMap: newNodeHealthMap(),
|
||||
nodeEvictionMap: newNodeEvictionMap(),
|
||||
broadcaster: eventBroadcaster,
|
||||
recorder: recorder,
|
||||
nodeMonitorPeriod: nodeMonitorPeriod,
|
||||
nodeStartupGracePeriod: nodeStartupGracePeriod,
|
||||
@@ -536,6 +531,19 @@ func NewNodeLifecycleController(
|
||||
func (nc *Controller) Run(ctx context.Context) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
// Start events processing pipeline.
|
||||
nc.broadcaster.StartStructuredLogging(0)
|
||||
klog.Infof("Sending events to api server.")
|
||||
nc.broadcaster.StartRecordingToSink(
|
||||
&v1core.EventSinkImpl{
|
||||
Interface: v1core.New(nc.kubeClient.CoreV1().RESTClient()).Events(""),
|
||||
})
|
||||
defer nc.broadcaster.Shutdown()
|
||||
|
||||
// Close node update queue to cleanup go routine.
|
||||
defer nc.nodeUpdateQueue.ShutDown()
|
||||
defer nc.podUpdateQueue.ShutDown()
|
||||
|
||||
klog.Infof("Starting node controller")
|
||||
defer klog.Infof("Shutting down node controller")
|
||||
|
||||
@@ -547,10 +555,6 @@ func (nc *Controller) Run(ctx context.Context) {
|
||||
go nc.taintManager.Run(ctx)
|
||||
}
|
||||
|
||||
// Close node update queue to cleanup go routine.
|
||||
defer nc.nodeUpdateQueue.ShutDown()
|
||||
defer nc.podUpdateQueue.ShutDown()
|
||||
|
||||
// Start workers to reconcile labels and/or update NoSchedule taint for nodes.
|
||||
for i := 0; i < scheduler.UpdateWorkerSize; i++ {
|
||||
// Thanks to "workqueue", each worker just need to get item from queue, because
|
||||
|
Reference in New Issue
Block a user