Merge pull request #65660 from mtaufen/incremental-refactor-kubelet-node-status
Automatic merge from submit-queue (batch tested with PRs 66152, 66406, 66218, 66278, 65660). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. Refactor kubelet node status setters, add test coverage This internal refactor moves the node status setters to a new package, explicitly injects dependencies to facilitate unit testing, and adds individual unit tests for the setters. I gave each setter a distinct commit to facilitate review. Non-goals: - I intentionally excluded the class of setters that return a "modified" boolean, as I want to think more carefully about how to cleanly handle the behavior, and this PR is already rather large. - I would like to clean up the status update control loops as well, but that belongs in a separate PR. ```release-note NONE ```
This commit is contained in:
@@ -46,11 +46,11 @@ import (
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
core "k8s.io/client-go/testing"
|
||||
fakecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/fake"
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
cadvisortest "k8s.io/kubernetes/pkg/kubelet/cadvisor/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/nodestatus"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/sliceutils"
|
||||
"k8s.io/kubernetes/pkg/version"
|
||||
"k8s.io/kubernetes/pkg/volume/util"
|
||||
@@ -85,7 +85,7 @@ func makeExpectedImageList(imageList []kubecontainer.Image, maxImages int) []v1.
|
||||
var expectedImageList []v1.ContainerImage
|
||||
for _, kubeImage := range imageList {
|
||||
apiImage := v1.ContainerImage{
|
||||
Names: kubeImage.RepoTags[0:maxNamesPerImageInNodeStatus],
|
||||
Names: kubeImage.RepoTags[0:nodestatus.MaxNamesPerImageInNodeStatus],
|
||||
SizeBytes: kubeImage.Size,
|
||||
}
|
||||
|
||||
@@ -100,9 +100,9 @@ func makeExpectedImageList(imageList []kubecontainer.Image, maxImages int) []v1.
|
||||
|
||||
func generateImageTags() []string {
|
||||
var tagList []string
|
||||
// Generate > maxNamesPerImageInNodeStatus tags so that the test can verify
|
||||
// that kubelet report up to maxNamesPerImageInNodeStatus tags.
|
||||
count := rand.IntnRange(maxNamesPerImageInNodeStatus+1, maxImageTagsForTest+1)
|
||||
// Generate > MaxNamesPerImageInNodeStatus tags so that the test can verify
|
||||
// that kubelet report up to MaxNamesPerImageInNodeStatus tags.
|
||||
count := rand.IntnRange(nodestatus.MaxNamesPerImageInNodeStatus+1, maxImageTagsForTest+1)
|
||||
for ; count > 0; count-- {
|
||||
tagList = append(tagList, "k8s.gcr.io:v"+strconv.Itoa(count))
|
||||
}
|
||||
@@ -140,160 +140,6 @@ func (lcm *localCM) GetCapacity() v1.ResourceList {
|
||||
return lcm.capacity
|
||||
}
|
||||
|
||||
func TestNodeStatusWithCloudProviderNodeIP(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
||||
kubelet.hostname = testKubeletHostname
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
nodeIP net.IP
|
||||
nodeAddresses []v1.NodeAddress
|
||||
expectedAddresses []v1.NodeAddress
|
||||
shouldError bool
|
||||
}{
|
||||
{
|
||||
name: "A single InternalIP",
|
||||
nodeIP: net.ParseIP("10.1.1.1"),
|
||||
nodeAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
||||
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||
},
|
||||
expectedAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
||||
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||
},
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
name: "NodeIP is external",
|
||||
nodeIP: net.ParseIP("55.55.55.55"),
|
||||
nodeAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
||||
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
|
||||
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||
},
|
||||
expectedAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
||||
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
|
||||
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||
},
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
// Accommodating #45201 and #49202
|
||||
name: "InternalIP and ExternalIP are the same",
|
||||
nodeIP: net.ParseIP("55.55.55.55"),
|
||||
nodeAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "55.55.55.55"},
|
||||
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
|
||||
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||
},
|
||||
expectedAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "55.55.55.55"},
|
||||
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
|
||||
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||
},
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
name: "An Internal/ExternalIP, an Internal/ExternalDNS",
|
||||
nodeIP: net.ParseIP("10.1.1.1"),
|
||||
nodeAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
||||
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
|
||||
{Type: v1.NodeInternalDNS, Address: "ip-10-1-1-1.us-west-2.compute.internal"},
|
||||
{Type: v1.NodeExternalDNS, Address: "ec2-55-55-55-55.us-west-2.compute.amazonaws.com"},
|
||||
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||
},
|
||||
expectedAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
||||
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
|
||||
{Type: v1.NodeInternalDNS, Address: "ip-10-1-1-1.us-west-2.compute.internal"},
|
||||
{Type: v1.NodeExternalDNS, Address: "ec2-55-55-55-55.us-west-2.compute.amazonaws.com"},
|
||||
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||
},
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
name: "An Internal with multiple internal IPs",
|
||||
nodeIP: net.ParseIP("10.1.1.1"),
|
||||
nodeAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
||||
{Type: v1.NodeInternalIP, Address: "10.2.2.2"},
|
||||
{Type: v1.NodeInternalIP, Address: "10.3.3.3"},
|
||||
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
|
||||
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||
},
|
||||
expectedAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
||||
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
|
||||
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||
},
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
name: "An InternalIP that isn't valid: should error",
|
||||
nodeIP: net.ParseIP("10.2.2.2"),
|
||||
nodeAddresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "10.1.1.1"},
|
||||
{Type: v1.NodeExternalIP, Address: "55.55.55.55"},
|
||||
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||
},
|
||||
expectedAddresses: nil,
|
||||
shouldError: true,
|
||||
},
|
||||
}
|
||||
for _, testCase := range cases {
|
||||
// testCase setup
|
||||
existingNode := v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Annotations: make(map[string]string)},
|
||||
Spec: v1.NodeSpec{},
|
||||
}
|
||||
|
||||
kubelet.nodeIP = testCase.nodeIP
|
||||
|
||||
fakeCloud := &fakecloud.FakeCloud{
|
||||
Addresses: testCase.nodeAddresses,
|
||||
Err: nil,
|
||||
}
|
||||
kubelet.cloud = fakeCloud
|
||||
kubelet.cloudResourceSyncManager = NewCloudResourceSyncManager(kubelet.cloud, kubelet.nodeName, kubelet.nodeStatusUpdateFrequency)
|
||||
stopCh := make(chan struct{})
|
||||
go kubelet.cloudResourceSyncManager.Run(stopCh)
|
||||
kubelet.nodeIPValidator = func(nodeIP net.IP) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// execute method
|
||||
err := kubelet.setNodeAddress(&existingNode)
|
||||
close(stopCh)
|
||||
if err != nil && !testCase.shouldError {
|
||||
t.Errorf("Unexpected error for test %s: %q", testCase.name, err)
|
||||
continue
|
||||
} else if err != nil && testCase.shouldError {
|
||||
// expected an error
|
||||
continue
|
||||
}
|
||||
|
||||
// Sort both sets for consistent equality
|
||||
sortNodeAddresses(testCase.expectedAddresses)
|
||||
sortNodeAddresses(existingNode.Status.Addresses)
|
||||
|
||||
assert.True(
|
||||
t,
|
||||
apiequality.Semantic.DeepEqual(
|
||||
testCase.expectedAddresses,
|
||||
existingNode.Status.Addresses,
|
||||
),
|
||||
fmt.Sprintf("Test %s failed %%s", testCase.name),
|
||||
diff.ObjectDiff(testCase.expectedAddresses, existingNode.Status.Addresses),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// sortableNodeAddress is a type for sorting []v1.NodeAddress
|
||||
type sortableNodeAddress []v1.NodeAddress
|
||||
|
||||
@@ -350,6 +196,10 @@ func TestUpdateNewNodeStatus(t *testing.T) {
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
// Since this test retroactively overrides the stub container manager,
|
||||
// we have to regenerate default status setters.
|
||||
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
|
||||
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
|
||||
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
|
||||
@@ -483,6 +333,9 @@ func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
// Since this test retroactively overrides the stub container manager,
|
||||
// we have to regenerate default status setters.
|
||||
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
|
||||
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
existingNode := v1.Node{
|
||||
@@ -749,6 +602,9 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(20E9, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
// Since this test retroactively overrides the stub container manager,
|
||||
// we have to regenerate default status setters.
|
||||
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
|
||||
|
||||
clock := testKubelet.fakeClock
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
@@ -1190,6 +1046,10 @@ func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
// Since this test retroactively overrides the stub container manager,
|
||||
// we have to regenerate default status setters.
|
||||
kubelet.setNodeStatusFuncs = kubelet.defaultNodeStatusFuncs()
|
||||
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
|
||||
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
|
||||
@@ -1641,85 +1501,3 @@ func TestValidateNodeIPParam(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSetVolumeLimits(t *testing.T) {
|
||||
testKubelet := newTestKubeletWithoutFakeVolumePlugin(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
||||
kubelet.hostname = testKubeletHostname
|
||||
|
||||
var testcases = []struct {
|
||||
name string
|
||||
cloudProviderName string
|
||||
expectedVolumeKey string
|
||||
expectedLimit int64
|
||||
}{
|
||||
{
|
||||
name: "For default GCE cloudprovider",
|
||||
cloudProviderName: "gce",
|
||||
expectedVolumeKey: util.GCEVolumeLimitKey,
|
||||
expectedLimit: 16,
|
||||
},
|
||||
{
|
||||
name: "For default AWS Cloudprovider",
|
||||
cloudProviderName: "aws",
|
||||
expectedVolumeKey: util.EBSVolumeLimitKey,
|
||||
expectedLimit: 39,
|
||||
},
|
||||
{
|
||||
name: "for default Azure cloudprovider",
|
||||
cloudProviderName: "azure",
|
||||
expectedVolumeKey: util.AzureVolumeLimitKey,
|
||||
expectedLimit: 16,
|
||||
},
|
||||
{
|
||||
name: "when no cloudprovider is present",
|
||||
cloudProviderName: "",
|
||||
expectedVolumeKey: util.AzureVolumeLimitKey,
|
||||
expectedLimit: -1,
|
||||
},
|
||||
}
|
||||
for _, test := range testcases {
|
||||
node := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname, Annotations: make(map[string]string)},
|
||||
Spec: v1.NodeSpec{},
|
||||
}
|
||||
|
||||
if test.cloudProviderName != "" {
|
||||
fakeCloud := &fakecloud.FakeCloud{
|
||||
Provider: test.cloudProviderName,
|
||||
Err: nil,
|
||||
}
|
||||
kubelet.cloud = fakeCloud
|
||||
} else {
|
||||
kubelet.cloud = nil
|
||||
}
|
||||
|
||||
kubelet.setVolumeLimits(node)
|
||||
nodeLimits := []v1.ResourceList{}
|
||||
nodeLimits = append(nodeLimits, node.Status.Allocatable)
|
||||
nodeLimits = append(nodeLimits, node.Status.Capacity)
|
||||
for _, volumeLimits := range nodeLimits {
|
||||
if test.expectedLimit == -1 {
|
||||
_, ok := volumeLimits[v1.ResourceName(test.expectedVolumeKey)]
|
||||
if ok {
|
||||
t.Errorf("Expected no volume limit found for %s", test.expectedVolumeKey)
|
||||
}
|
||||
} else {
|
||||
fl, ok := volumeLimits[v1.ResourceName(test.expectedVolumeKey)]
|
||||
|
||||
if !ok {
|
||||
t.Errorf("Expected to found volume limit for %s found none", test.expectedVolumeKey)
|
||||
}
|
||||
foundLimit, _ := fl.AsInt64()
|
||||
expectedValue := resource.NewQuantity(test.expectedLimit, resource.DecimalSI)
|
||||
if expectedValue.Cmp(fl) != 0 {
|
||||
t.Errorf("Expected volume limit for %s to be %v found %v", test.expectedVolumeKey, test.expectedLimit, foundLimit)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
Reference in New Issue
Block a user