Merge pull request #42370 from janetkuo/ds-e2e-ignore-no-schedule-taint

Automatic merge from submit-queue (batch tested with PRs 42456, 42457, 42414, 42480, 42370)

In DaemonSet e2e test, don't check nodes with NoSchedule taints

Fixes #42345 

For example, master node has a ismaster:NoSchedule taint. We don't expect pods to be created there without toleration. 

cc @marun @lukaszo @kargakis @yujuhong @Random-Liu @davidopp @kubernetes/sig-apps-pr-reviews
This commit is contained in:
Kubernetes Submit Queue
2017-03-04 00:17:47 -08:00
committed by GitHub
4 changed files with 66 additions and 24 deletions

View File

@@ -788,9 +788,7 @@ func (dsc *DaemonSetsController) syncDaemonSet(key string) error {
// Returns true when a daemonset should continue running on a node if a daemonset pod is already
// running on that node.
func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *extensions.DaemonSet) (wantToRun, shouldSchedule, shouldContinueRunning bool, err error) {
newPod := &v1.Pod{Spec: ds.Spec.Template.Spec, ObjectMeta: ds.Spec.Template.ObjectMeta}
newPod.Namespace = ds.Namespace
newPod.Spec.NodeName = node.Name
newPod := NewPod(ds, node.Name)
critical := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) && kubelettypes.IsCriticalPod(newPod)
// Because these bools require an && of all their required conditions, we start
@@ -867,14 +865,14 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
nodeInfo := schedulercache.NewNodeInfo(pods...)
nodeInfo.SetNode(node)
_, reasons, err := daemonSetPredicates(newPod, nodeInfo)
_, reasons, err := Predicates(newPod, nodeInfo)
if err != nil {
glog.Warningf("daemonSetPredicates failed on ds '%s/%s' due to unexpected error: %v", ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, err)
glog.Warningf("DaemonSet Predicates failed on node %s for ds '%s/%s' due to unexpected error: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, err)
return false, false, false, err
}
for _, r := range reasons {
glog.V(4).Infof("daemonSetPredicates failed on ds '%s/%s' for reason: %v", ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason())
glog.V(4).Infof("DaemonSet Predicates failed on node %s for ds '%s/%s' for reason: %v", node.Name, ds.ObjectMeta.Namespace, ds.ObjectMeta.Name, r.GetReason())
switch reason := r.(type) {
case *predicates.InsufficientResourceError:
dsc.eventRecorder.Eventf(ds, v1.EventTypeNormal, FailedPlacementReason, "failed to place pod on %q: %s", node.ObjectMeta.Name, reason.Error())
@@ -911,7 +909,7 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
predicates.ErrPodAffinityNotMatch,
predicates.ErrServiceAffinityViolated:
glog.Warningf("unexpected predicate failure reason: %s", reason.GetReason())
return false, false, false, fmt.Errorf("unexpected reason: daemonSetPredicates should not return reason %s", reason.GetReason())
return false, false, false, fmt.Errorf("unexpected reason: DaemonSet Predicates should not return reason %s", reason.GetReason())
default:
glog.V(4).Infof("unknown predicate failure reason: %s", reason.GetReason())
wantToRun, shouldSchedule, shouldContinueRunning = false, false, false
@@ -925,9 +923,16 @@ func (dsc *DaemonSetsController) nodeShouldRunDaemonPod(node *v1.Node, ds *exten
return
}
// daemonSetPredicates checks if a DaemonSet's pod can be scheduled on a node using GeneralPredicates
func NewPod(ds *extensions.DaemonSet, nodeName string) *v1.Pod {
newPod := &v1.Pod{Spec: ds.Spec.Template.Spec, ObjectMeta: ds.Spec.Template.ObjectMeta}
newPod.Namespace = ds.Namespace
newPod.Spec.NodeName = nodeName
return newPod
}
// Predicates checks if a DaemonSet's pod can be scheduled on a node using GeneralPredicates
// and PodToleratesNodeTaints predicate
func daemonSetPredicates(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
func Predicates(pod *v1.Pod, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
var predicateFails []algorithm.PredicateFailureReason
critical := utilfeature.DefaultFeatureGate.Enabled(features.ExperimentalCriticalPodAnnotation) && kubelettypes.IsCriticalPod(pod)