Merge pull request #64170 from mtaufen/cap-node-num-images
Automatic merge from submit-queue (batch tested with PRs 61803, 64305, 64170, 64361, 64339). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>. add a flag to control the cap on images reported in node status While I normally try to avoid adding flags, this is a short term scalability fix for v1.11, and there are other long-term solutions in the works, so we shouldn't commit to this in the v1beta1 Kubelet config. Flags are our escape hatch here. ```release-note NONE ```
This commit is contained in:
@@ -60,8 +60,8 @@ const (
|
||||
maxImageTagsForTest = 20
|
||||
)
|
||||
|
||||
// generateTestingImageList generate randomly generated image list and corresponding expectedImageList.
|
||||
func generateTestingImageList(count int) ([]kubecontainer.Image, []v1.ContainerImage) {
|
||||
// generateTestingImageLists generate randomly generated image list and corresponding expectedImageList.
|
||||
func generateTestingImageLists(count int, maxImages int) ([]kubecontainer.Image, []v1.ContainerImage) {
|
||||
// imageList is randomly generated image list
|
||||
var imageList []kubecontainer.Image
|
||||
for ; count > 0; count-- {
|
||||
@@ -73,7 +73,12 @@ func generateTestingImageList(count int) ([]kubecontainer.Image, []v1.ContainerI
|
||||
imageList = append(imageList, imageItem)
|
||||
}
|
||||
|
||||
// expectedImageList is generated by imageList according to size and maxImagesInNodeStatus
|
||||
expectedImageList := makeExpectedImageList(imageList, maxImages)
|
||||
return imageList, expectedImageList
|
||||
}
|
||||
|
||||
func makeExpectedImageList(imageList []kubecontainer.Image, maxImages int) []v1.ContainerImage {
|
||||
// expectedImageList is generated by imageList according to size and maxImages
|
||||
// 1. sort the imageList by size
|
||||
sort.Sort(sliceutils.ByImageSize(imageList))
|
||||
// 2. convert sorted imageList to v1.ContainerImage list
|
||||
@@ -86,8 +91,11 @@ func generateTestingImageList(count int) ([]kubecontainer.Image, []v1.ContainerI
|
||||
|
||||
expectedImageList = append(expectedImageList, apiImage)
|
||||
}
|
||||
// 3. only returns the top maxImagesInNodeStatus images in expectedImageList
|
||||
return imageList, expectedImageList[0:maxImagesInNodeStatus]
|
||||
// 3. only returns the top maxImages images in expectedImageList
|
||||
if maxImages == -1 { // -1 means no limit
|
||||
return expectedImageList
|
||||
}
|
||||
return expectedImageList[0:maxImages]
|
||||
}
|
||||
|
||||
func generateImageTags() []string {
|
||||
@@ -299,165 +307,190 @@ func sortNodeAddresses(addrs sortableNodeAddress) {
|
||||
}
|
||||
|
||||
func TestUpdateNewNodeStatus(t *testing.T) {
|
||||
// generate one more than maxImagesInNodeStatus in inputImageList
|
||||
inputImageList, expectedImageList := generateTestingImageList(maxImagesInNodeStatus + 1)
|
||||
testKubelet := newTestKubeletWithImageList(
|
||||
t, inputImageList, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
||||
kubelet.containerManager = &localCM{
|
||||
ContainerManager: cm.NewStubContainerManager(),
|
||||
allocatableReservation: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
|
||||
cases := []struct {
|
||||
desc string
|
||||
nodeStatusMaxImages int32
|
||||
}{
|
||||
{
|
||||
desc: "5 image limit",
|
||||
nodeStatusMaxImages: 5,
|
||||
},
|
||||
capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
|
||||
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
|
||||
machineInfo := &cadvisorapi.MachineInfo{
|
||||
MachineID: "123",
|
||||
SystemUUID: "abc",
|
||||
BootID: "1b3",
|
||||
NumCores: 2,
|
||||
MemoryCapacity: 10E9, // 10G
|
||||
}
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
mockCadvisor.On("Start").Return(nil)
|
||||
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
|
||||
versionInfo := &cadvisorapi.VersionInfo{
|
||||
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
||||
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
||||
}
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Usage: 400,
|
||||
Capacity: 5000,
|
||||
Available: 600,
|
||||
}, nil)
|
||||
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Usage: 400,
|
||||
Capacity: 5000,
|
||||
Available: 600,
|
||||
}, nil)
|
||||
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
||||
maxAge := 0 * time.Second
|
||||
options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge}
|
||||
mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil)
|
||||
kubelet.machineInfo = machineInfo
|
||||
|
||||
expectedNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Spec: v1.NodeSpec{},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeOutOfDisk,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "KubeletHasSufficientDisk",
|
||||
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
|
||||
LastHeartbeatTime: metav1.Time{},
|
||||
LastTransitionTime: metav1.Time{},
|
||||
},
|
||||
{
|
||||
Type: v1.NodeMemoryPressure,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "KubeletHasSufficientMemory",
|
||||
Message: fmt.Sprintf("kubelet has sufficient memory available"),
|
||||
LastHeartbeatTime: metav1.Time{},
|
||||
LastTransitionTime: metav1.Time{},
|
||||
},
|
||||
{
|
||||
Type: v1.NodeDiskPressure,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "KubeletHasNoDiskPressure",
|
||||
Message: fmt.Sprintf("kubelet has no disk pressure"),
|
||||
LastHeartbeatTime: metav1.Time{},
|
||||
LastTransitionTime: metav1.Time{},
|
||||
},
|
||||
{
|
||||
Type: v1.NodePIDPressure,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "KubeletHasSufficientPID",
|
||||
Message: fmt.Sprintf("kubelet has sufficient PID available"),
|
||||
LastHeartbeatTime: metav1.Time{},
|
||||
LastTransitionTime: metav1.Time{},
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "KubeletReady",
|
||||
Message: fmt.Sprintf("kubelet is posting ready status"),
|
||||
LastHeartbeatTime: metav1.Time{},
|
||||
LastTransitionTime: metav1.Time{},
|
||||
},
|
||||
},
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
MachineID: "123",
|
||||
SystemUUID: "abc",
|
||||
BootID: "1b3",
|
||||
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
||||
OSImage: "Debian GNU/Linux 7 (wheezy)",
|
||||
OperatingSystem: goruntime.GOOS,
|
||||
Architecture: goruntime.GOARCH,
|
||||
ContainerRuntimeVersion: "test://1.5.0",
|
||||
KubeletVersion: version.Get().String(),
|
||||
KubeProxyVersion: version.Get().String(),
|
||||
},
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
|
||||
},
|
||||
Addresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
||||
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||
},
|
||||
Images: expectedImageList,
|
||||
{
|
||||
desc: "no image limit",
|
||||
nodeStatusMaxImages: -1,
|
||||
},
|
||||
}
|
||||
|
||||
kubelet.updateRuntimeUp()
|
||||
assert.NoError(t, kubelet.updateNodeStatus())
|
||||
actions := kubeClient.Actions()
|
||||
require.Len(t, actions, 2)
|
||||
require.True(t, actions[1].Matches("patch", "nodes"))
|
||||
require.Equal(t, actions[1].GetSubresource(), "status")
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.desc, func(t *testing.T) {
|
||||
// generate one more in inputImageList than we configure the Kubelet to report,
|
||||
// or 5 images if unlimited
|
||||
numTestImages := int(tc.nodeStatusMaxImages) + 1
|
||||
if tc.nodeStatusMaxImages == -1 {
|
||||
numTestImages = 5
|
||||
}
|
||||
inputImageList, expectedImageList := generateTestingImageLists(numTestImages, int(tc.nodeStatusMaxImages))
|
||||
testKubelet := newTestKubeletWithImageList(
|
||||
t, inputImageList, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
kubelet.nodeStatusMaxImages = tc.nodeStatusMaxImages
|
||||
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
||||
kubelet.containerManager = &localCM{
|
||||
ContainerManager: cm.NewStubContainerManager(),
|
||||
allocatableReservation: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(200, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(100E6, resource.BinarySI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(2000, resource.BinarySI),
|
||||
},
|
||||
capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
||||
},
|
||||
}
|
||||
kubeClient := testKubelet.fakeKubeClient
|
||||
existingNode := v1.Node{ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname}}
|
||||
kubeClient.ReactionChain = fake.NewSimpleClientset(&v1.NodeList{Items: []v1.Node{existingNode}}).ReactionChain
|
||||
machineInfo := &cadvisorapi.MachineInfo{
|
||||
MachineID: "123",
|
||||
SystemUUID: "abc",
|
||||
BootID: "1b3",
|
||||
NumCores: 2,
|
||||
MemoryCapacity: 10E9, // 10G
|
||||
}
|
||||
mockCadvisor := testKubelet.fakeCadvisor
|
||||
mockCadvisor.On("Start").Return(nil)
|
||||
mockCadvisor.On("MachineInfo").Return(machineInfo, nil)
|
||||
versionInfo := &cadvisorapi.VersionInfo{
|
||||
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
||||
ContainerOsVersion: "Debian GNU/Linux 7 (wheezy)",
|
||||
}
|
||||
mockCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Usage: 400,
|
||||
Capacity: 5000,
|
||||
Available: 600,
|
||||
}, nil)
|
||||
mockCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{
|
||||
Usage: 400,
|
||||
Capacity: 5000,
|
||||
Available: 600,
|
||||
}, nil)
|
||||
mockCadvisor.On("VersionInfo").Return(versionInfo, nil)
|
||||
maxAge := 0 * time.Second
|
||||
options := cadvisorapiv2.RequestOptions{IdType: cadvisorapiv2.TypeName, Count: 2, Recursive: false, MaxAge: &maxAge}
|
||||
mockCadvisor.On("ContainerInfoV2", "/", options).Return(map[string]cadvisorapiv2.ContainerInfo{}, nil)
|
||||
kubelet.machineInfo = machineInfo
|
||||
|
||||
updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
|
||||
assert.NoError(t, err)
|
||||
for i, cond := range updatedNode.Status.Conditions {
|
||||
assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
|
||||
assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
|
||||
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
|
||||
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
|
||||
expectedNode := &v1.Node{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: testKubeletHostname},
|
||||
Spec: v1.NodeSpec{},
|
||||
Status: v1.NodeStatus{
|
||||
Conditions: []v1.NodeCondition{
|
||||
{
|
||||
Type: v1.NodeOutOfDisk,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "KubeletHasSufficientDisk",
|
||||
Message: fmt.Sprintf("kubelet has sufficient disk space available"),
|
||||
LastHeartbeatTime: metav1.Time{},
|
||||
LastTransitionTime: metav1.Time{},
|
||||
},
|
||||
{
|
||||
Type: v1.NodeMemoryPressure,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "KubeletHasSufficientMemory",
|
||||
Message: fmt.Sprintf("kubelet has sufficient memory available"),
|
||||
LastHeartbeatTime: metav1.Time{},
|
||||
LastTransitionTime: metav1.Time{},
|
||||
},
|
||||
{
|
||||
Type: v1.NodeDiskPressure,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "KubeletHasNoDiskPressure",
|
||||
Message: fmt.Sprintf("kubelet has no disk pressure"),
|
||||
LastHeartbeatTime: metav1.Time{},
|
||||
LastTransitionTime: metav1.Time{},
|
||||
},
|
||||
{
|
||||
Type: v1.NodePIDPressure,
|
||||
Status: v1.ConditionFalse,
|
||||
Reason: "KubeletHasSufficientPID",
|
||||
Message: fmt.Sprintf("kubelet has sufficient PID available"),
|
||||
LastHeartbeatTime: metav1.Time{},
|
||||
LastTransitionTime: metav1.Time{},
|
||||
},
|
||||
{
|
||||
Type: v1.NodeReady,
|
||||
Status: v1.ConditionTrue,
|
||||
Reason: "KubeletReady",
|
||||
Message: fmt.Sprintf("kubelet is posting ready status"),
|
||||
LastHeartbeatTime: metav1.Time{},
|
||||
LastTransitionTime: metav1.Time{},
|
||||
},
|
||||
},
|
||||
NodeInfo: v1.NodeSystemInfo{
|
||||
MachineID: "123",
|
||||
SystemUUID: "abc",
|
||||
BootID: "1b3",
|
||||
KernelVersion: "3.16.0-0.bpo.4-amd64",
|
||||
OSImage: "Debian GNU/Linux 7 (wheezy)",
|
||||
OperatingSystem: goruntime.GOOS,
|
||||
Architecture: goruntime.GOARCH,
|
||||
ContainerRuntimeVersion: "test://1.5.0",
|
||||
KubeletVersion: version.Get().String(),
|
||||
KubeProxyVersion: version.Get().String(),
|
||||
},
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(2000, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(10E9, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(5000, resource.BinarySI),
|
||||
},
|
||||
Allocatable: v1.ResourceList{
|
||||
v1.ResourceCPU: *resource.NewMilliQuantity(1800, resource.DecimalSI),
|
||||
v1.ResourceMemory: *resource.NewQuantity(9900E6, resource.BinarySI),
|
||||
v1.ResourcePods: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
v1.ResourceEphemeralStorage: *resource.NewQuantity(3000, resource.BinarySI),
|
||||
},
|
||||
Addresses: []v1.NodeAddress{
|
||||
{Type: v1.NodeInternalIP, Address: "127.0.0.1"},
|
||||
{Type: v1.NodeHostName, Address: testKubeletHostname},
|
||||
},
|
||||
Images: expectedImageList,
|
||||
},
|
||||
}
|
||||
|
||||
kubelet.updateRuntimeUp()
|
||||
assert.NoError(t, kubelet.updateNodeStatus())
|
||||
actions := kubeClient.Actions()
|
||||
require.Len(t, actions, 2)
|
||||
require.True(t, actions[1].Matches("patch", "nodes"))
|
||||
require.Equal(t, actions[1].GetSubresource(), "status")
|
||||
|
||||
updatedNode, err := applyNodeStatusPatch(&existingNode, actions[1].(core.PatchActionImpl).GetPatch())
|
||||
assert.NoError(t, err)
|
||||
for i, cond := range updatedNode.Status.Conditions {
|
||||
assert.False(t, cond.LastHeartbeatTime.IsZero(), "LastHeartbeatTime for %v condition is zero", cond.Type)
|
||||
assert.False(t, cond.LastTransitionTime.IsZero(), "LastTransitionTime for %v condition is zero", cond.Type)
|
||||
updatedNode.Status.Conditions[i].LastHeartbeatTime = metav1.Time{}
|
||||
updatedNode.Status.Conditions[i].LastTransitionTime = metav1.Time{}
|
||||
}
|
||||
|
||||
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
|
||||
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
|
||||
"NotReady should be last")
|
||||
assert.Len(t, updatedNode.Status.Images, len(expectedImageList))
|
||||
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
|
||||
})
|
||||
}
|
||||
|
||||
// Version skew workaround. See: https://github.com/kubernetes/kubernetes/issues/16961
|
||||
assert.Equal(t, v1.NodeReady, updatedNode.Status.Conditions[len(updatedNode.Status.Conditions)-1].Type,
|
||||
"NotReady should be last")
|
||||
assert.Len(t, updatedNode.Status.Images, maxImagesInNodeStatus)
|
||||
assert.True(t, apiequality.Semantic.DeepEqual(expectedNode, updatedNode), "%s", diff.ObjectDiff(expectedNode, updatedNode))
|
||||
}
|
||||
|
||||
func TestUpdateExistingNodeStatus(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
||||
kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
|
||||
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
||||
kubelet.containerManager = &localCM{
|
||||
ContainerManager: cm.NewStubContainerManager(),
|
||||
allocatableReservation: v1.ResourceList{
|
||||
@@ -742,7 +775,8 @@ func TestUpdateNodeStatusWithRuntimeStateError(t *testing.T) {
|
||||
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
||||
kubelet.nodeStatusMaxImages = 5 // don't truncate the image list that gets constructed by hand for this test
|
||||
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
||||
kubelet.containerManager = &localCM{
|
||||
ContainerManager: cm.NewStubContainerManager(),
|
||||
allocatableReservation: v1.ResourceList{
|
||||
@@ -1213,12 +1247,15 @@ func TestTryRegisterWithApiServer(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestUpdateNewNodeStatusTooLargeReservation(t *testing.T) {
|
||||
// generate one more than maxImagesInNodeStatus in inputImageList
|
||||
inputImageList, _ := generateTestingImageList(maxImagesInNodeStatus + 1)
|
||||
const nodeStatusMaxImages = 5
|
||||
|
||||
// generate one more in inputImageList than we configure the Kubelet to report
|
||||
inputImageList, _ := generateTestingImageLists(nodeStatusMaxImages+1, nodeStatusMaxImages)
|
||||
testKubelet := newTestKubeletWithImageList(
|
||||
t, inputImageList, false /* controllerAttachDetachEnabled */)
|
||||
defer testKubelet.Cleanup()
|
||||
kubelet := testKubelet.kubelet
|
||||
kubelet.nodeStatusMaxImages = nodeStatusMaxImages
|
||||
kubelet.kubeClient = nil // ensure only the heartbeat client is used
|
||||
kubelet.containerManager = &localCM{
|
||||
ContainerManager: cm.NewStubContainerManager(),
|
||||
|
Reference in New Issue
Block a user