Merge pull request #73556 from msau42/triage-72931

Mark volume as in use even when node status didn't change
This commit is contained in:
Kubernetes Prow Robot
2019-02-12 17:29:05 -08:00
committed by GitHub
4 changed files with 328 additions and 1 deletions

View File

@@ -54,6 +54,7 @@ import (
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/nodestatus"
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
taintutil "k8s.io/kubernetes/pkg/util/taints"
"k8s.io/kubernetes/pkg/version"
@@ -1028,6 +1029,213 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
assert.IsType(t, core.GetActionImpl{}, actions[9])
}
func TestUpdateNodeStatusAndVolumesInUseWithoutNodeLease(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeLease, false)()
cases := []struct {
desc string
existingVolumes []v1.UniqueVolumeName // volumes to initially populate volumeManager
existingNode *v1.Node // existing node object
expectedNode *v1.Node // new node object after patch
expectedReportedInUse []v1.UniqueVolumeName // expected volumes reported in use in volumeManager
}{
{
desc: "no volumes and no update",
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
},
{
desc: "volumes inuse on node and volumeManager",
existingVolumes: []v1.UniqueVolumeName{"vol1"},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
},
expectedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
},
expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
},
{
desc: "volumes inuse on node but not in volumeManager",
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
},
expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
},
{
desc: "volumes inuse in volumeManager but not on node",
existingVolumes: []v1.UniqueVolumeName{"vol1"},
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
expectedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
},
expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
},
}
for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) {
// Setup
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.containerManager = &localCM{ContainerManager: cm.NewStubContainerManager()}
kubelet.lastStatusReportTime = kubelet.clock.Now()
kubelet.nodeStatusReportFrequency = time.Hour
kubelet.machineInfo = &cadvisorapi.MachineInfo{}
// override test volumeManager
fakeVolumeManager := kubeletvolume.NewFakeVolumeManager(tc.existingVolumes)
kubelet.volumeManager = fakeVolumeManager
// Only test VolumesInUse setter
kubelet.setNodeStatusFuncs = []func(*v1.Node) error{
nodestatus.VolumesInUse(kubelet.volumeManager.ReconcilerStatesHasBeenSynced,
kubelet.volumeManager.GetVolumesInUse),
}
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*tc.existingNode}}).ReactionChain
// Execute
assert.NoError(t, kubelet.updateNodeStatus())
// Validate
actions := kubeClient.Actions()
if tc.expectedNode != nil {
assert.Len(t, actions, 2)
assert.IsType(t, core.GetActionImpl{}, actions[0])
assert.IsType(t, core.PatchActionImpl{}, actions[1])
patchAction := actions[1].(core.PatchActionImpl)
updatedNode, err := applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
require.NoError(t, err)
assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedNode, updatedNode), "%s", diff.ObjectDiff(tc.expectedNode, updatedNode))
} else {
assert.Len(t, actions, 1)
assert.IsType(t, core.GetActionImpl{}, actions[0])
}
reportedInUse := fakeVolumeManager.GetVolumesReportedInUse()
assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedReportedInUse, reportedInUse), "%s", diff.ObjectDiff(tc.expectedReportedInUse, reportedInUse))
})
}
}
func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
defer utilfeaturetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeLease, true)()
cases := []struct {
desc string
existingVolumes []v1.UniqueVolumeName // volumes to initially populate volumeManager
existingNode *v1.Node // existing node object
expectedNode *v1.Node // new node object after patch
expectedReportedInUse []v1.UniqueVolumeName // expected volumes reported in use in volumeManager
}{
{
desc: "no volumes and no update",
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
},
{
desc: "volumes inuse on node and volumeManager",
existingVolumes: []v1.UniqueVolumeName{"vol1"},
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
},
expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
},
{
desc: "volumes inuse on node but not in volumeManager",
existingNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
},
expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
},
{
desc: "volumes inuse in volumeManager but not on node",
existingVolumes: []v1.UniqueVolumeName{"vol1"},
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
expectedNode: &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
},
},
expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
},
}
for _, tc := range cases {
t.Run(tc.desc, func(t *testing.T) {
// Setup
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()
kubelet := testKubelet.kubelet
kubelet.kubeClient = nil // ensure only the heartbeat client is used
kubelet.containerManager = &localCM{ContainerManager: cm.NewStubContainerManager()}
kubelet.lastStatusReportTime = kubelet.clock.Now()
kubelet.nodeStatusReportFrequency = time.Hour
kubelet.machineInfo = &cadvisorapi.MachineInfo{}
// override test volumeManager
fakeVolumeManager := kubeletvolume.NewFakeVolumeManager(tc.existingVolumes)
kubelet.volumeManager = fakeVolumeManager
// Only test VolumesInUse setter
kubelet.setNodeStatusFuncs = []func(*v1.Node) error{
nodestatus.VolumesInUse(kubelet.volumeManager.ReconcilerStatesHasBeenSynced,
kubelet.volumeManager.GetVolumesInUse),
}
kubeClient := testKubelet.fakeKubeClient
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*tc.existingNode}}).ReactionChain
// Execute
assert.NoError(t, kubelet.updateNodeStatus())
// Validate
actions := kubeClient.Actions()
if tc.expectedNode != nil {
assert.Len(t, actions, 2)
assert.IsType(t, core.GetActionImpl{}, actions[0])
assert.IsType(t, core.PatchActionImpl{}, actions[1])
patchAction := actions[1].(core.PatchActionImpl)
updatedNode, err := applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
require.NoError(t, err)
assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedNode, updatedNode), "%s", diff.ObjectDiff(tc.expectedNode, updatedNode))
} else {
assert.Len(t, actions, 1)
assert.IsType(t, core.GetActionImpl{}, actions[0])
}
reportedInUse := fakeVolumeManager.GetVolumesReportedInUse()
assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedReportedInUse, reportedInUse), "%s", diff.ObjectDiff(tc.expectedReportedInUse, reportedInUse))
})
}
}
func TestRegisterWithApiServer(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
defer testKubelet.Cleanup()