consolidate node deletion logic between node lifecycle and cloud node controller
This commit is contained in:
@@ -46,6 +46,7 @@ go_library(
|
||||
"//pkg/controller/certificates/cleaner:go_default_library",
|
||||
"//pkg/controller/certificates/rootcacertpublisher:go_default_library",
|
||||
"//pkg/controller/certificates/signer:go_default_library",
|
||||
"//pkg/controller/cloud:go_default_library",
|
||||
"//pkg/controller/clusterroleaggregation:go_default_library",
|
||||
"//pkg/controller/cronjob:go_default_library",
|
||||
"//pkg/controller/daemon:go_default_library",
|
||||
|
@@ -383,13 +383,13 @@ func NewControllerInitializers(loopMode ControllerLoopMode) map[string]InitFunc
|
||||
controllers["bootstrapsigner"] = startBootstrapSignerController
|
||||
controllers["tokencleaner"] = startTokenCleanerController
|
||||
controllers["nodeipam"] = startNodeIpamController
|
||||
controllers["nodelifecycle"] = startNodeLifecycleController
|
||||
if loopMode == IncludeCloudLoops {
|
||||
controllers["service"] = startServiceController
|
||||
controllers["route"] = startRouteController
|
||||
controllers["cloudnodelifecycle"] = startCloudNodeLifecycleController
|
||||
// TODO: volume controller into the IncludeCloudLoops only set.
|
||||
// TODO: Separate cluster in cloud check from node lifecycle controller.
|
||||
}
|
||||
controllers["nodelifecycle"] = startNodeLifecycleController
|
||||
controllers["persistentvolume-binder"] = startPersistentVolumeBinderController
|
||||
controllers["attachdetach"] = startAttachDetachController
|
||||
controllers["persistentvolume-expander"] = startVolumeExpandController
|
||||
|
@@ -38,6 +38,7 @@ import (
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
csiclientset "k8s.io/csi-api/pkg/client/clientset/versioned"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
cloudcontroller "k8s.io/kubernetes/pkg/controller/cloud"
|
||||
endpointcontroller "k8s.io/kubernetes/pkg/controller/endpoint"
|
||||
"k8s.io/kubernetes/pkg/controller/garbagecollector"
|
||||
namespacecontroller "k8s.io/kubernetes/pkg/controller/namespace"
|
||||
@@ -125,7 +126,6 @@ func startNodeLifecycleController(ctx ControllerContext) (http.Handler, bool, er
|
||||
ctx.InformerFactory.Core().V1().Pods(),
|
||||
ctx.InformerFactory.Core().V1().Nodes(),
|
||||
ctx.InformerFactory.Extensions().V1beta1().DaemonSets(),
|
||||
ctx.Cloud,
|
||||
ctx.ClientBuilder.ClientOrDie("node-controller"),
|
||||
ctx.ComponentConfig.KubeCloudShared.NodeMonitorPeriod.Duration,
|
||||
ctx.ComponentConfig.NodeLifecycleController.NodeStartupGracePeriod.Duration,
|
||||
@@ -146,6 +146,24 @@ func startNodeLifecycleController(ctx ControllerContext) (http.Handler, bool, er
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
func startCloudNodeLifecycleController(ctx ControllerContext) (http.Handler, bool, error) {
|
||||
cloudNodeLifecycleController, err := cloudcontroller.NewCloudNodeLifecycleController(
|
||||
ctx.InformerFactory.Core().V1().Nodes(),
|
||||
ctx.ClientBuilder.ClientOrDie("cloud-node-lifecycle-controller"),
|
||||
ctx.Cloud,
|
||||
ctx.ComponentConfig.KubeCloudShared.NodeMonitorPeriod.Duration,
|
||||
)
|
||||
if err != nil {
|
||||
// the controller manager should continue to run if the "Instances" interface is not
|
||||
// supported, though it's unlikely for a cloud provider to not support it
|
||||
klog.Errorf("failed to start cloud node lifecycle controller: %v", err)
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
go cloudNodeLifecycleController.Run(ctx.Stop)
|
||||
return nil, true, nil
|
||||
}
|
||||
|
||||
func startRouteController(ctx ControllerContext) (http.Handler, bool, error) {
|
||||
if !ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs || !ctx.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes {
|
||||
klog.Infof("Will not configure cloud provider routes for allocate-node-cidrs: %v, configure-cloud-routes: %v.", ctx.ComponentConfig.KubeCloudShared.AllocateNodeCIDRs, ctx.ComponentConfig.KubeCloudShared.ConfigureCloudRoutes)
|
||||
|
Reference in New Issue
Block a user