add node shutdown taint
shutdowned -> stopped use shutdown everywhere use patch in taints api call use notimplemented in clouds use AddOrUpdateTaintOnNode correct log text add fake cloud try to fix bazel add shutdown tests add context
This commit is contained in:
@@ -37,16 +37,23 @@ import (
|
||||
clientretry "k8s.io/client-go/util/retry"
|
||||
nodeutilv1 "k8s.io/kubernetes/pkg/api/v1/node"
|
||||
"k8s.io/kubernetes/pkg/cloudprovider"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||
nodeutil "k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
var UpdateNodeSpecBackoff = wait.Backoff{
|
||||
Steps: 20,
|
||||
Duration: 50 * time.Millisecond,
|
||||
Jitter: 1.0,
|
||||
}
|
||||
var (
|
||||
UpdateNodeSpecBackoff = wait.Backoff{
|
||||
Steps: 20,
|
||||
Duration: 50 * time.Millisecond,
|
||||
Jitter: 1.0}
|
||||
|
||||
ShutDownTaint = &v1.Taint{
|
||||
Key: algorithm.TaintNodeShutdown,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}
|
||||
)
|
||||
|
||||
type CloudNodeController struct {
|
||||
nodeInformer coreinformers.NodeInformer
|
||||
@@ -240,9 +247,28 @@ func (cnc *CloudNodeController) MonitorNode() {
|
||||
// from the cloud provider. If node cannot be found in cloudprovider, then delete the node immediately
|
||||
if currentReadyCondition != nil {
|
||||
if currentReadyCondition.Status != v1.ConditionTrue {
|
||||
// we need to check this first to get taint working in similar in all cloudproviders
|
||||
// current problem is that shutdown nodes are not working in similar way ie. all cloudproviders
|
||||
// does not delete node from kubernetes cluster when instance it is shutdown see issue #46442
|
||||
exists, err := instances.InstanceShutdownByProviderID(context.TODO(), node.Spec.ProviderID)
|
||||
if err != nil && err != cloudprovider.NotImplemented {
|
||||
glog.Errorf("Error getting data for node %s from cloud: %v", node.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if exists {
|
||||
// if node is shutdown add shutdown taint
|
||||
err = controller.AddOrUpdateTaintOnNode(cnc.kubeClient, node.Name, ShutDownTaint)
|
||||
if err != nil {
|
||||
glog.Errorf("Error patching node taints: %v", err)
|
||||
}
|
||||
// Continue checking the remaining nodes since the current one is fine.
|
||||
continue
|
||||
}
|
||||
|
||||
// Check with the cloud provider to see if the node still exists. If it
|
||||
// doesn't, delete the node immediately.
|
||||
exists, err := ensureNodeExistsByProviderIDOrExternalID(instances, node)
|
||||
exists, err = ensureNodeExistsByProviderIDOrExternalID(instances, node)
|
||||
if err != nil {
|
||||
glog.Errorf("Error getting data for node %s from cloud: %v", node.Name, err)
|
||||
continue
|
||||
@@ -272,6 +298,12 @@ func (cnc *CloudNodeController) MonitorNode() {
|
||||
}
|
||||
}(node.Name)
|
||||
|
||||
} else {
|
||||
// if taint exist remove taint
|
||||
err = controller.RemoveTaintOffNode(cnc.kubeClient, node.Name, node, ShutDownTaint)
|
||||
if err != nil {
|
||||
glog.Errorf("Error patching node taints: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -148,6 +148,115 @@ func TestEnsureNodeExistsByProviderIDOrNodeName(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func TestNodeShutdown(t *testing.T) {
|
||||
|
||||
testCases := []struct {
|
||||
testName string
|
||||
node *v1.Node
|
||||
existsByProviderID bool
|
||||
shutdown bool
|
||||
}{
|
||||
{
|
||||
testName: "node shutdowned add taint",
|
||||
existsByProviderID: true,
|
||||
shutdown: true,
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "node0",
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionUnknown,
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
testName: "node started after shutdown remove taint",
|
||||
existsByProviderID: true,
|
||||
shutdown: false,
|
||||
node: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "node0",
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Spec: v1.NodeSpec{
|
||||
ProviderID: "node0",
|
||||
Taints: []v1.Taint{
|
||||
{
|
||||
Key: algorithm.TaintNodeShutdown,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
LastHeartbeatTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
LastTransitionTime: metav1.Date(2015, 1, 1, 12, 0, 0, 0, time.UTC),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
fc := &fakecloud.FakeCloud{
|
||||
ExistsByProviderID: tc.existsByProviderID,
|
||||
NodeShutdown: tc.shutdown,
|
||||
}
|
||||
fnh := &testutil.FakeNodeHandler{
|
||||
Existing: []*v1.Node{tc.node},
|
||||
Clientset: fake.NewSimpleClientset(),
|
||||
PatchWaitChan: make(chan struct{}),
|
||||
}
|
||||
|
||||
factory := informers.NewSharedInformerFactory(fnh, controller.NoResyncPeriodFunc())
|
||||
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
cloudNodeController := &CloudNodeController{
|
||||
kubeClient: fnh,
|
||||
nodeInformer: factory.Core().V1().Nodes(),
|
||||
cloud: fc,
|
||||
nodeMonitorPeriod: 1 * time.Second,
|
||||
recorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "cloud-node-controller"}),
|
||||
nodeStatusUpdateFrequency: 1 * time.Second,
|
||||
}
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
|
||||
cloudNodeController.Run()
|
||||
|
||||
select {
|
||||
case <-fnh.PatchWaitChan:
|
||||
case <-time.After(1 * time.Second):
|
||||
t.Errorf("Timed out waiting %v for node to be updated", wait.ForeverTestTimeout)
|
||||
}
|
||||
|
||||
assert.Equal(t, 1, len(fnh.UpdatedNodes), "Node was not updated")
|
||||
if tc.shutdown {
|
||||
assert.Equal(t, 1, len(fnh.UpdatedNodes[0].Spec.Taints), "Node Taint was not added")
|
||||
assert.Equal(t, "node.cloudprovider.kubernetes.io/shutdown", fnh.UpdatedNodes[0].Spec.Taints[0].Key, "Node Taint key is not correct")
|
||||
} else {
|
||||
assert.Equal(t, 0, len(fnh.UpdatedNodes[0].Spec.Taints), "Node Taint was not removed after node is back in ready state")
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// This test checks that the node is deleted when kubelet stops reporting
|
||||
// and cloud provider says node is gone
|
||||
func TestNodeDeleted(t *testing.T) {
|
||||
|
@@ -67,6 +67,7 @@ type FakeNodeHandler struct {
|
||||
// Synchronization
|
||||
lock sync.Mutex
|
||||
DeleteWaitChan chan struct{}
|
||||
PatchWaitChan chan struct{}
|
||||
}
|
||||
|
||||
// FakeLegacyHandler is a fake implemtation of CoreV1Interface.
|
||||
@@ -270,6 +271,9 @@ func (m *FakeNodeHandler) Patch(name string, pt types.PatchType, data []byte, su
|
||||
m.lock.Lock()
|
||||
defer func() {
|
||||
m.RequestCount++
|
||||
if m.PatchWaitChan != nil {
|
||||
m.PatchWaitChan <- struct{}{}
|
||||
}
|
||||
m.lock.Unlock()
|
||||
}()
|
||||
var nodeCopy v1.Node
|
||||
|
Reference in New Issue
Block a user