|
|
|
@@ -26,6 +26,8 @@ import (
|
|
|
|
|
"github.com/golang/glog"
|
|
|
|
|
|
|
|
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
|
|
|
|
"k8s.io/apimachinery/pkg/types"
|
|
|
|
|
clientv1 "k8s.io/client-go/pkg/api/v1"
|
|
|
|
|
"k8s.io/kubernetes/pkg/api/v1"
|
|
|
|
|
"k8s.io/kubernetes/pkg/kubelet/events"
|
|
|
|
|
evictionapi "k8s.io/kubernetes/pkg/kubelet/eviction/api"
|
|
|
|
@@ -69,6 +71,15 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
|
|
|
|
|
Name: CgroupName(cm.cgroupRoot),
|
|
|
|
|
ResourceParameters: getCgroupConfig(nodeAllocatable),
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Using ObjectReference for events as the node maybe not cached; refer to #42701 for detail.
|
|
|
|
|
nodeRef := &clientv1.ObjectReference{
|
|
|
|
|
Kind: "Node",
|
|
|
|
|
Name: cm.nodeInfo.Name,
|
|
|
|
|
UID: types.UID(cm.nodeInfo.Name),
|
|
|
|
|
Namespace: "",
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If Node Allocatable is enforced on a node that has not been drained or is updated on an existing node to a lower value,
|
|
|
|
|
// existing memory usage across pods might be higher that current Node Allocatable Memory Limits.
|
|
|
|
|
// Pod Evictions are expected to bring down memory usage to below Node Allocatable limits.
|
|
|
|
@@ -79,11 +90,11 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
|
|
|
|
|
for {
|
|
|
|
|
err := cm.cgroupManager.Update(cgroupConfig)
|
|
|
|
|
if err == nil {
|
|
|
|
|
cm.recorder.Event(cm.nodeInfo, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated Node Allocatable limit across pods")
|
|
|
|
|
cm.recorder.Event(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated Node Allocatable limit across pods")
|
|
|
|
|
return
|
|
|
|
|
}
|
|
|
|
|
message := fmt.Sprintf("Failed to update Node Allocatable Limits %q: %v", cm.cgroupRoot, err)
|
|
|
|
|
cm.recorder.Event(cm.nodeInfo, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
|
|
|
|
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
|
|
|
|
time.Sleep(time.Minute)
|
|
|
|
|
}
|
|
|
|
|
}()
|
|
|
|
@@ -93,19 +104,19 @@ func (cm *containerManagerImpl) enforceNodeAllocatableCgroups() error {
|
|
|
|
|
glog.V(2).Infof("Enforcing System reserved on cgroup %q with limits: %+v", nc.SystemReservedCgroupName, nc.SystemReserved)
|
|
|
|
|
if err := enforceExistingCgroup(cm.cgroupManager, nc.SystemReservedCgroupName, nc.SystemReserved); err != nil {
|
|
|
|
|
message := fmt.Sprintf("Failed to enforce System Reserved Cgroup Limits on %q: %v", nc.SystemReservedCgroupName, err)
|
|
|
|
|
cm.recorder.Event(cm.nodeInfo, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
|
|
|
|
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
|
|
|
|
return fmt.Errorf(message)
|
|
|
|
|
}
|
|
|
|
|
cm.recorder.Eventf(cm.nodeInfo, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName)
|
|
|
|
|
cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on system reserved cgroup %v", nc.SystemReservedCgroupName)
|
|
|
|
|
}
|
|
|
|
|
if nc.EnforceNodeAllocatable.Has(KubeReservedEnforcementKey) {
|
|
|
|
|
glog.V(2).Infof("Enforcing kube reserved on cgroup %q with limits: %+v", nc.KubeReservedCgroupName, nc.KubeReserved)
|
|
|
|
|
if err := enforceExistingCgroup(cm.cgroupManager, nc.KubeReservedCgroupName, nc.KubeReserved); err != nil {
|
|
|
|
|
message := fmt.Sprintf("Failed to enforce Kube Reserved Cgroup Limits on %q: %v", nc.KubeReservedCgroupName, err)
|
|
|
|
|
cm.recorder.Event(cm.nodeInfo, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
|
|
|
|
cm.recorder.Event(nodeRef, v1.EventTypeWarning, events.FailedNodeAllocatableEnforcement, message)
|
|
|
|
|
return fmt.Errorf(message)
|
|
|
|
|
}
|
|
|
|
|
cm.recorder.Eventf(cm.nodeInfo, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName)
|
|
|
|
|
cm.recorder.Eventf(nodeRef, v1.EventTypeNormal, events.SuccessfulNodeAllocatableEnforcement, "Updated limits on kube reserved cgroup %v", nc.KubeReservedCgroupName)
|
|
|
|
|
}
|
|
|
|
|
return nil
|
|
|
|
|
}
|
|
|
|
|