Eviction manager needs to start as runtime dependent module
This commit is contained in:
parent
8780b45a04
commit
3e75f2effb
@ -101,8 +101,9 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Start starts the control loop to observe and response to low compute resources.
|
// Start starts the control loop to observe and response to low compute resources.
|
||||||
func (m *managerImpl) Start(podFunc ActivePodsFunc, monitoringInterval time.Duration) {
|
func (m *managerImpl) Start(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc, monitoringInterval time.Duration) error {
|
||||||
go wait.Until(func() { m.synchronize(podFunc) }, monitoringInterval, wait.NeverStop)
|
go wait.Until(func() { m.synchronize(podFunc) }, monitoringInterval, wait.NeverStop)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsUnderMemoryPressure returns true if the node is under memory pressure.
|
// IsUnderMemoryPressure returns true if the node is under memory pressure.
|
||||||
|
@ -65,12 +65,18 @@ type Threshold struct {
|
|||||||
// Manager evaluates when an eviction threshold for node stability has been met on the node.
|
// Manager evaluates when an eviction threshold for node stability has been met on the node.
|
||||||
type Manager interface {
|
type Manager interface {
|
||||||
// Start starts the control loop to monitor eviction thresholds at specified interval.
|
// Start starts the control loop to monitor eviction thresholds at specified interval.
|
||||||
Start(podFunc ActivePodsFunc, monitoringInterval time.Duration)
|
Start(diskInfoProvider DiskInfoProvider, podFunc ActivePodsFunc, monitoringInterval time.Duration) error
|
||||||
|
|
||||||
// IsUnderMemoryPressure returns true if the node is under memory pressure.
|
// IsUnderMemoryPressure returns true if the node is under memory pressure.
|
||||||
IsUnderMemoryPressure() bool
|
IsUnderMemoryPressure() bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// DiskInfoProvider is responsible for informing the manager how disk is configured.
|
||||||
|
type DiskInfoProvider interface {
|
||||||
|
// HasDedicatedImageFs returns true if the imagefs is on a separate device from the rootfs.
|
||||||
|
HasDedicatedImageFs() (bool, error)
|
||||||
|
}
|
||||||
|
|
||||||
// KillPodFunc kills a pod.
|
// KillPodFunc kills a pod.
|
||||||
// The pod status is updated, and then it is killed with the specified grace period.
|
// The pod status is updated, and then it is killed with the specified grace period.
|
||||||
// This function must block until either the pod is killed or an error is encountered.
|
// This function must block until either the pod is killed or an error is encountered.
|
||||||
|
@ -926,7 +926,11 @@ func (kl *Kubelet) initializeModules() error {
|
|||||||
// initializeRuntimeDependentModules will initialize internal modules that require the container runtime to be up.
|
// initializeRuntimeDependentModules will initialize internal modules that require the container runtime to be up.
|
||||||
func (kl *Kubelet) initializeRuntimeDependentModules() {
|
func (kl *Kubelet) initializeRuntimeDependentModules() {
|
||||||
if err := kl.cadvisor.Start(); err != nil {
|
if err := kl.cadvisor.Start(); err != nil {
|
||||||
kl.runtimeState.setInternalError(fmt.Errorf("Failed to start cAdvisor %v", err))
|
kl.runtimeState.setInternalError(fmt.Errorf("failed to start cAdvisor %v", err))
|
||||||
|
}
|
||||||
|
// eviction manager must start after cadvisor because it needs to know if the container runtime has a dedicated imagefs
|
||||||
|
if err := kl.evictionManager.Start(kl, kl.getActivePods, evictionMonitoringPeriod); err != nil {
|
||||||
|
kl.runtimeState.setInternalError(fmt.Errorf("failed to start eviction manager %v", err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -961,7 +965,6 @@ func (kl *Kubelet) Run(updates <-chan kubetypes.PodUpdate) {
|
|||||||
// Start component sync loops.
|
// Start component sync loops.
|
||||||
kl.statusManager.Start()
|
kl.statusManager.Start()
|
||||||
kl.probeManager.Start()
|
kl.probeManager.Start()
|
||||||
kl.evictionManager.Start(kl.getActivePods, evictionMonitoringPeriod)
|
|
||||||
|
|
||||||
// Start the pod lifecycle event generator.
|
// Start the pod lifecycle event generator.
|
||||||
kl.pleg.Start()
|
kl.pleg.Start()
|
||||||
|
@ -45,6 +45,19 @@ func (kl *Kubelet) GetContainerInfo(podFullName string, podUID types.UID, contai
|
|||||||
return &ci, nil
|
return &ci, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// HasDedicatedImageFs returns true if the imagefs has a dedicated device.
|
||||||
|
func (kl *Kubelet) HasDedicatedImageFs() (bool, error) {
|
||||||
|
imageFsInfo, err := kl.ImagesFsInfo()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
rootFsInfo, err := kl.RootFsInfo()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
return imageFsInfo.Device != rootFsInfo.Device, nil
|
||||||
|
}
|
||||||
|
|
||||||
// GetContainerInfoV2 returns stats (from Cadvisor) for containers.
|
// GetContainerInfoV2 returns stats (from Cadvisor) for containers.
|
||||||
func (kl *Kubelet) GetContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) {
|
func (kl *Kubelet) GetContainerInfoV2(name string, options cadvisorapiv2.RequestOptions) (map[string]cadvisorapiv2.ContainerInfo, error) {
|
||||||
return kl.cadvisor.ContainerInfoV2(name, options)
|
return kl.cadvisor.ContainerInfoV2(name, options)
|
||||||
|
@ -21,6 +21,7 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
cadvisorapi "github.com/google/cadvisor/info/v1"
|
cadvisorapi "github.com/google/cadvisor/info/v1"
|
||||||
|
cadvisorapiv2 "github.com/google/cadvisor/info/v2"
|
||||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||||
kubecontainertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
kubecontainertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||||
)
|
)
|
||||||
@ -232,3 +233,37 @@ func TestGetContainerInfoWithNoMatchingContainers(t *testing.T) {
|
|||||||
}
|
}
|
||||||
mockCadvisor.AssertExpectations(t)
|
mockCadvisor.AssertExpectations(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestHasDedicatedImageFs(t *testing.T) {
|
||||||
|
testCases := map[string]struct {
|
||||||
|
imageFsInfo cadvisorapiv2.FsInfo
|
||||||
|
rootFsInfo cadvisorapiv2.FsInfo
|
||||||
|
expected bool
|
||||||
|
}{
|
||||||
|
"has-dedicated-image-fs": {
|
||||||
|
imageFsInfo: cadvisorapiv2.FsInfo{Device: "123"},
|
||||||
|
rootFsInfo: cadvisorapiv2.FsInfo{Device: "456"},
|
||||||
|
expected: true,
|
||||||
|
},
|
||||||
|
"has-unified-image-fs": {
|
||||||
|
imageFsInfo: cadvisorapiv2.FsInfo{Device: "123"},
|
||||||
|
rootFsInfo: cadvisorapiv2.FsInfo{Device: "123"},
|
||||||
|
expected: false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for testName, testCase := range testCases {
|
||||||
|
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
|
||||||
|
kubelet := testKubelet.kubelet
|
||||||
|
mockCadvisor := testKubelet.fakeCadvisor
|
||||||
|
mockCadvisor.On("Start").Return(nil)
|
||||||
|
mockCadvisor.On("ImagesFsInfo").Return(testCase.imageFsInfo, nil)
|
||||||
|
mockCadvisor.On("RootFsInfo").Return(testCase.rootFsInfo, nil)
|
||||||
|
actual, err := kubelet.HasDedicatedImageFs()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("case: %s, unexpected error: %v", testName, err)
|
||||||
|
}
|
||||||
|
if actual != testCase.expected {
|
||||||
|
t.Errorf("case: %s, expected: %v, actual: %v", testName, testCase.expected, actual)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user