move predicates into library (address #12744)
DONE:
1. refactor all predicates: predicates return fitOrNot(bool) and error(Error) in which the latter is of type
PredicateFailureError or InsufficientResourceError. (For violation of either MaxEBSVolumeCount or
MaxGCEPDVolumeCount, returns one same error type as ErrMaxVolumeCountExceeded)
2. GeneralPredicates() is a predicate function, which includes serveral other predicate functions (PodFitsResource,
PodFitsHost, PodFitsHostPort). It is registered as one of the predicates in DefaultAlgorithmProvider, and
is also called in canAdmitPod() in Kubelet and should be called by other components (like rescheduler, etc)
if necessary. See discussion in issue #12744
3. remove podNumber check from GeneralPredicates
4. HostName is now verified in Kubelet's canAdminPod(). add TestHostNameConflicts in kubelet_test.go
5. add getNodeAnyWay() method in Kubelet to get node information in standaloneMode
TODO:
1. determine which predicates should be included in GeneralPredicates()
2. separate GeneralPredicates() into:
a. GeneralPredicatesEvictPod() and
b. GeneralPredicatesNotEvictPod()
3. DaemonSet should use GeneralPredicates()
This commit is contained in:
@@ -127,7 +127,7 @@ func NoDiskConflict(pod *api.Pod, nodeName string, nodeInfo *schedulercache.Node
|
||||
for _, v := range pod.Spec.Volumes {
|
||||
for _, ev := range nodeInfo.Pods() {
|
||||
if isVolumeConflict(v, ev) {
|
||||
return false, nil
|
||||
return false, ErrDiskConflict
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -229,7 +229,8 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *api.Pod, nodeName string, nodeI
|
||||
numNewVolumes := len(newVolumes)
|
||||
|
||||
if numExistingVolumes+numNewVolumes > c.maxVolumes {
|
||||
return false, nil
|
||||
// violates MaxEBSVolumeCount or MaxGCEPDVolumeCount
|
||||
return false, ErrMaxVolumeCountExceeded
|
||||
}
|
||||
|
||||
return true, nil
|
||||
@@ -362,7 +363,7 @@ func (c *VolumeZoneChecker) predicate(pod *api.Pod, nodeName string, nodeInfo *s
|
||||
nodeV, _ := nodeConstraints[k]
|
||||
if v != nodeV {
|
||||
glog.V(2).Infof("Won't schedule pod %q onto node %q due to volume %q (mismatch on %q)", pod.Name, nodeName, pvName, k)
|
||||
return false, nil
|
||||
return false, ErrVolumeZoneConflict
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -421,20 +422,9 @@ func podName(pod *api.Pod) string {
|
||||
return pod.Namespace + "/" + pod.Name
|
||||
}
|
||||
|
||||
// PodFitsResources calculates fit based on requested, rather than used resources
|
||||
func (r *ResourceFit) PodFitsResources(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||
info, err := r.info.GetNodeInfo(nodeName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
func podFitsResourcesInternal(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo, info *api.Node) (bool, error) {
|
||||
allocatable := info.Status.Allocatable
|
||||
allowedPodNumber := allocatable.Pods().Value()
|
||||
if int64(len(nodeInfo.Pods()))+1 > allowedPodNumber {
|
||||
return false,
|
||||
newInsufficientResourceError(podCountResourceName, 1, int64(len(nodeInfo.Pods())), allowedPodNumber)
|
||||
}
|
||||
|
||||
podRequest := getResourceRequest(pod)
|
||||
if podRequest.milliCPU == 0 && podRequest.memory == 0 {
|
||||
return true, nil
|
||||
@@ -442,6 +432,7 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, nodeName string, nodeInfo *
|
||||
|
||||
totalMilliCPU := allocatable.Cpu().MilliValue()
|
||||
totalMemory := allocatable.Memory().Value()
|
||||
|
||||
if totalMilliCPU < podRequest.milliCPU+nodeInfo.RequestedResource().MilliCPU {
|
||||
return false,
|
||||
newInsufficientResourceError(cpuResourceName, podRequest.milliCPU, nodeInfo.RequestedResource().MilliCPU, totalMilliCPU)
|
||||
@@ -455,15 +446,30 @@ func (r *ResourceFit) PodFitsResources(pod *api.Pod, nodeName string, nodeInfo *
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (r *NodeStatus) PodFitsResources(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||
info, err := r.info.GetNodeInfo(nodeName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
// TODO: move the following podNumber check to podFitsResourcesInternal when Kubelet allows podNumber check (See #20263).
|
||||
allocatable := info.Status.Allocatable
|
||||
allowedPodNumber := allocatable.Pods().Value()
|
||||
if int64(len(nodeInfo.Pods()))+1 > allowedPodNumber {
|
||||
return false,
|
||||
newInsufficientResourceError(podCountResourceName, 1, int64(len(nodeInfo.Pods())), allowedPodNumber)
|
||||
}
|
||||
return podFitsResourcesInternal(pod, nodeName, nodeInfo, info)
|
||||
}
|
||||
|
||||
func NewResourceFitPredicate(info NodeInfo) algorithm.FitPredicate {
|
||||
fit := &ResourceFit{
|
||||
fit := &NodeStatus{
|
||||
info: info,
|
||||
}
|
||||
return fit.PodFitsResources
|
||||
}
|
||||
|
||||
func NewSelectorMatchPredicate(info NodeInfo) algorithm.FitPredicate {
|
||||
selector := &NodeSelector{
|
||||
selector := &NodeStatus{
|
||||
info: info,
|
||||
}
|
||||
return selector.PodSelectorMatches
|
||||
@@ -542,19 +548,25 @@ type NodeSelector struct {
|
||||
info NodeInfo
|
||||
}
|
||||
|
||||
func (n *NodeSelector) PodSelectorMatches(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||
func (n *NodeStatus) PodSelectorMatches(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||
node, err := n.info.GetNodeInfo(nodeName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return PodMatchesNodeLabels(pod, node), nil
|
||||
if PodMatchesNodeLabels(pod, node) {
|
||||
return true, nil
|
||||
}
|
||||
return false, ErrNodeSelectorNotMatch
|
||||
}
|
||||
|
||||
func PodFitsHost(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||
if len(pod.Spec.NodeName) == 0 {
|
||||
return true, nil
|
||||
}
|
||||
return pod.Spec.NodeName == nodeName, nil
|
||||
if pod.Spec.NodeName == nodeName {
|
||||
return true, nil
|
||||
}
|
||||
return false, ErrPodNotMatchHostName
|
||||
}
|
||||
|
||||
type NodeLabelChecker struct {
|
||||
@@ -594,7 +606,7 @@ func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *api.Pod, nodeName string,
|
||||
for _, label := range n.labels {
|
||||
exists = nodeLabels.Has(label)
|
||||
if (exists && !n.presence) || (!exists && n.presence) {
|
||||
return false, nil
|
||||
return false, ErrNodeLabelPresenceViolated
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
@@ -692,7 +704,10 @@ func (s *ServiceAffinity) CheckServiceAffinity(pod *api.Pod, nodeName string, no
|
||||
}
|
||||
|
||||
// check if the node matches the selector
|
||||
return affinitySelector.Matches(labels.Set(node.Labels)), nil
|
||||
if affinitySelector.Matches(labels.Set(node.Labels)) {
|
||||
return true, nil
|
||||
}
|
||||
return false, ErrServiceAffinityViolated
|
||||
}
|
||||
|
||||
func PodFitsHostPorts(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||
@@ -706,7 +721,7 @@ func PodFitsHostPorts(pod *api.Pod, nodeName string, nodeInfo *schedulercache.No
|
||||
continue
|
||||
}
|
||||
if existingPorts[wport] {
|
||||
return false, nil
|
||||
return false, ErrPodNotFitsHostPorts
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
@@ -735,3 +750,41 @@ func haveSame(a1, a2 []string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type NodeStatus struct {
|
||||
info NodeInfo
|
||||
}
|
||||
|
||||
func GeneralPredicates(info NodeInfo) algorithm.FitPredicate {
|
||||
node := &NodeStatus{
|
||||
info: info,
|
||||
}
|
||||
return node.SchedulerGeneralPredicates
|
||||
}
|
||||
|
||||
func (n *NodeStatus) SchedulerGeneralPredicates(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo) (bool, error) {
|
||||
node, err := n.info.GetNodeInfo(nodeName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return RunGeneralPredicates(pod, nodeName, nodeInfo, node)
|
||||
}
|
||||
|
||||
func RunGeneralPredicates(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo, node *api.Node) (bool, error) {
|
||||
fit, err := podFitsResourcesInternal(pod, nodeName, nodeInfo, node)
|
||||
if !fit {
|
||||
return fit, err
|
||||
}
|
||||
fit, err = PodFitsHost(pod, nodeName, nodeInfo)
|
||||
if !fit {
|
||||
return fit, err
|
||||
}
|
||||
fit, err = PodFitsHostPorts(pod, nodeName, nodeInfo)
|
||||
if !fit {
|
||||
return fit, err
|
||||
}
|
||||
if !PodMatchesNodeLabels(pod, node) {
|
||||
return false, ErrNodeSelectorNotMatch
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user