cleaning newNodeController from unsupported kubelet version
This commit is contained in:
@@ -186,7 +186,6 @@ type Controller struct {
|
||||
cidrAllocator ipam.CIDRAllocator
|
||||
taintManager *scheduler.NoExecuteTaintManager
|
||||
|
||||
forcefullyDeletePod func(*v1.Pod) error
|
||||
nodeExistsInCloudProvider func(types.NodeName) (bool, error)
|
||||
computeZoneStateFunc func(nodeConditions []*v1.NodeCondition) (int, ZoneState)
|
||||
enterPartialDisruptionFunc func(nodeNum int) float32
|
||||
@@ -285,9 +284,6 @@ func NewNodeController(
|
||||
serviceCIDR: serviceCIDR,
|
||||
allocateNodeCIDRs: allocateNodeCIDRs,
|
||||
allocatorType: allocatorType,
|
||||
forcefullyDeletePod: func(p *v1.Pod) error {
|
||||
return util.ForcefullyDeletePod(kubeClient, p)
|
||||
},
|
||||
nodeExistsInCloudProvider: func(nodeName types.NodeName) (bool, error) {
|
||||
return util.NodeExistsInCloudProvider(cloud, nodeName)
|
||||
},
|
||||
|
@@ -129,17 +129,6 @@ func SetPodTerminationReason(kubeClient clientset.Interface, pod *v1.Pod, nodeNa
|
||||
return updatedPod, nil
|
||||
}
|
||||
|
||||
// ForcefullyDeletePod deletes the pod immediately.
|
||||
func ForcefullyDeletePod(c clientset.Interface, pod *v1.Pod) error {
|
||||
var zero int64
|
||||
glog.Infof("NodeController is force deleting Pod: %v:%v", pod.Namespace, pod.Name)
|
||||
err := c.Core().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{GracePeriodSeconds: &zero})
|
||||
if err == nil {
|
||||
glog.V(4).Infof("forceful deletion of %s succeeded", pod.Name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ForcefullyDeleteNode deletes the node immediately. The pods on the
|
||||
// node are cleaned up by the podGC.
|
||||
func ForcefullyDeleteNode(kubeClient clientset.Interface, nodeName string) error {
|
||||
|
Reference in New Issue
Block a user