Promote NodeLease feature to GA
This commit is contained in:
@@ -43,14 +43,11 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/client-go/rest"
|
||||
core "k8s.io/client-go/testing"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/component-base/version"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
@@ -787,8 +784,6 @@ func TestUpdateNodeStatusError(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateNodeStatusWithLease(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeLease, true)()
|
||||
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
clock := testKubelet.fakeClock
|
||||
@@ -1021,116 +1016,7 @@ func TestUpdateNodeStatusWithLease(t *testing.T) {
|
||||
assert.IsType(t, core.GetActionImpl{}, actions[9])
|
||||
}
|
||||
|
||||
func TestUpdateNodeStatusAndVolumesInUseWithoutNodeLease(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeLease, false)()
|
||||
|
||||
cases := []struct {
|
||||
desc string
|
||||
existingVolumes []v1.UniqueVolumeName // volumes to initially populate volumeManager
|
||||
existingNode *v1.Node // existing node object
|
||||
expectedNode *v1.Node // new node object after patch
|
||||
expectedReportedInUse []v1.UniqueVolumeName // expected volumes reported in use in volumeManager
|
||||
}{
|
||||
{
|
||||
desc: "no volumes and no update",
|
||||
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
|
||||
expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
|
||||
},
|
||||
{
|
||||
desc: "volumes inuse on node and volumeManager",
|
||||
existingVolumes: []v1.UniqueVolumeName{"vol1"},
|
||||
existingNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Status: v1.NodeStatus{
|
||||
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
|
||||
},
|
||||
},
|
||||
expectedNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Status: v1.NodeStatus{
|
||||
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
|
||||
},
|
||||
},
|
||||
expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
|
||||
},
|
||||
{
|
||||
desc: "volumes inuse on node but not in volumeManager",
|
||||
existingNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Status: v1.NodeStatus{
|
||||
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
|
||||
},
|
||||
},
|
||||
expectedNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
|
||||
},
|
||||
{
|
||||
desc: "volumes inuse in volumeManager but not on node",
|
||||
existingVolumes: []v1.UniqueVolumeName{"vol1"},
|
||||
existingNode: &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}},
|
||||
expectedNode: &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Status: v1.NodeStatus{
|
||||
VolumesInUse: []v1.UniqueVolumeName{"vol1"},
|
||||
},
|
||||
},
|
||||
expectedReportedInUse: []v1.UniqueVolumeName{"vol1"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
// Setup
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
|
||||
kubelet := testKubelet.kubelet
|
||||
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
||||
kubelet.containerManager = &localCM{ContainerManager: cm.NewStubContainerManager()}
|
||||
kubelet.lastStatusReportTime = kubelet.clock.Now()
|
||||
kubelet.nodeStatusReportFrequency = time.Hour
|
||||
kubelet.machineInfo = &cadvisorapi.MachineInfo{}
|
||||
|
||||
// override test volumeManager
|
||||
fakeVolumeManager := kubeletvolume.NewFakeVolumeManager(tc.existingVolumes)
|
||||
kubelet.volumeManager = fakeVolumeManager
|
||||
|
||||
// Only test VolumesInUse setter
|
||||
kubelet.setNodeStatusFuncs = []func(*v1.Node) error{
|
||||
nodestatus.VolumesInUse(kubelet.volumeManager.ReconcilerStatesHasBeenSynced,
|
||||
kubelet.volumeManager.GetVolumesInUse),
|
||||
}
|
||||
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{*tc.existingNode}}).ReactionChain
|
||||
|
||||
// Execute
|
||||
assert.NoError(t, kubelet.updateNodeStatus())
|
||||
|
||||
// Validate
|
||||
actions := kubeClient.Actions()
|
||||
if tc.expectedNode != nil {
|
||||
assert.Len(t, actions, 2)
|
||||
assert.IsType(t, core.GetActionImpl{}, actions[0])
|
||||
assert.IsType(t, core.PatchActionImpl{}, actions[1])
|
||||
patchAction := actions[1].(core.PatchActionImpl)
|
||||
|
||||
updatedNode, err := applyNodeStatusPatch(tc.existingNode, patchAction.GetPatch())
|
||||
require.NoError(t, err)
|
||||
assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedNode, updatedNode), "%s", diff.ObjectDiff(tc.expectedNode, updatedNode))
|
||||
} else {
|
||||
assert.Len(t, actions, 1)
|
||||
assert.IsType(t, core.GetActionImpl{}, actions[0])
|
||||
}
|
||||
|
||||
reportedInUse := fakeVolumeManager.GetVolumesReportedInUse()
|
||||
assert.True(t, apiequality.Semantic.DeepEqual(tc.expectedReportedInUse, reportedInUse), "%s", diff.ObjectDiff(tc.expectedReportedInUse, reportedInUse))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateNodeStatusAndVolumesInUseWithNodeLease(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NodeLease, true)()
|
||||
|
||||
cases := []struct {
|
||||
desc string
|
||||
existingVolumes []v1.UniqueVolumeName // volumes to initially populate volumeManager
|
||||
|
Reference in New Issue
Block a user