remove OutOfDisk from controllers

This commit is contained in:
David Ashpole
2017-08-28 11:17:38 -07:00
parent 870406bec5
commit 97ab99aaa0
8 changed files with 7 additions and 514 deletions

View File

@@ -47,8 +47,6 @@ import (
"k8s.io/kubernetes/pkg/securitycontext"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
)
var (
@@ -503,20 +501,6 @@ func TestNotReadNodeDaemonDoesNotLaunchPod(t *testing.T) {
}
}
// DaemonSets should not place onto OutOfDisk nodes
func TestOutOfDiskNodeDaemonDoesNotLaunchPod(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("foo")
ds.Spec.UpdateStrategy = *strategy
manager, podControl, _ := newTestController(ds)
node := newNode("not-enough-disk", nil)
node.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}
manager.nodeStore.Add(node)
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
}
}
func resourcePodSpec(nodeName, memory, cpu string) v1.PodSpec {
return v1.PodSpec{
NodeName: nodeName,
@@ -1267,30 +1251,8 @@ func setDaemonSetToleration(ds *extensions.DaemonSet, tolerations []v1.Toleratio
ds.Spec.Template.Spec.Tolerations = tolerations
}
// DaemonSet should launch a critical pod even when the node is OutOfDisk.
func TestOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("critical")
ds.Spec.UpdateStrategy = *strategy
setDaemonSetCritical(ds)
manager, podControl, _ := newTestController(ds)
node := newNode("not-enough-disk", nil)
node.Status.Conditions = []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}
manager.nodeStore.Add(node)
// Without enabling critical pod annotation feature gate, we shouldn't create critical pod
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=False")
manager.dsStore.Add(ds)
syncAndValidateDaemonSets(t, manager, ds, podControl, 0, 0, 0)
// Enabling critical pod annotation feature gate should create critical pod
utilfeature.DefaultFeatureGate.Set("ExperimentalCriticalPodAnnotation=True")
syncAndValidateDaemonSets(t, manager, ds, podControl, 1, 0, 0)
}
}
// DaemonSet should launch a critical pod even when the node with OutOfDisk taints.
// TODO(#48843) OutOfDisk taints will be removed in 1.10
func TestTaintOutOfDiskNodeDaemonLaunchesCriticalPod(t *testing.T) {
for _, strategy := range updateStrategies() {
ds := newDaemonSet("critical")
@@ -1454,23 +1416,6 @@ func TestNodeShouldRunDaemonPod(t *testing.T) {
shouldSchedule: true,
shouldContinueRunning: true,
},
{
ds: &extensions.DaemonSet{
Spec: extensions.DaemonSetSpec{
Selector: &metav1.LabelSelector{MatchLabels: simpleDaemonSetLabel},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: simpleDaemonSetLabel,
},
Spec: resourcePodSpec("", "50M", "0.5"),
},
},
},
nodeCondition: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}},
wantToRun: true,
shouldSchedule: false,
shouldContinueRunning: true,
},
{
ds: &extensions.DaemonSet{
Spec: extensions.DaemonSetSpec{
@@ -1612,41 +1557,6 @@ func TestUpdateNode(t *testing.T) {
ds: newDaemonSet("ds"),
shouldEnqueue: true,
},
{
test: "Node conditions changed",
oldNode: func() *v1.Node {
node := newNode("node1", nil)
node.Status.Conditions = []v1.NodeCondition{
{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue},
}
return node
}(),
newNode: newNode("node1", nil),
ds: newDaemonSet("ds"),
shouldEnqueue: true,
},
{
test: "Node conditions not changed",
oldNode: func() *v1.Node {
node := newNode("node1", nil)
node.Status.Conditions = []v1.NodeCondition{
{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue},
{Type: v1.NodeMemoryPressure, Status: v1.ConditionFalse},
{Type: v1.NodeDiskPressure, Status: v1.ConditionFalse},
{Type: v1.NodeNetworkUnavailable, Status: v1.ConditionFalse},
}
return node
}(),
newNode: func() *v1.Node {
node := newNode("node1", nil)
node.Status.Conditions = []v1.NodeCondition{
{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue},
}
return node
}(),
ds: newDaemonSet("ds"),
shouldEnqueue: false,
},
}
for _, c := range cases {
for _, strategy := range updateStrategies() {
@@ -2205,57 +2115,3 @@ func getQueuedKeys(queue workqueue.RateLimitingInterface) []string {
sort.Strings(keys)
return keys
}
func TestPredicates(t *testing.T) {
type args struct {
pod *v1.Pod
node *v1.Node
}
tests := []struct {
name string
args args
want bool
wantRes []algorithm.PredicateFailureReason
wantErr bool
}{
{
name: "retrun OutOfDiskErr if outOfDisk",
args: args{
pod: newPod("pod1-", "node-0", nil, nil),
node: &v1.Node{
ObjectMeta: metav1.ObjectMeta{
Name: "node-0",
},
Status: v1.NodeStatus{
Conditions: []v1.NodeCondition{
{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue},
},
Allocatable: v1.ResourceList{
v1.ResourcePods: resource.MustParse("100"),
},
},
},
},
want: false,
wantRes: []algorithm.PredicateFailureReason{predicates.ErrNodeOutOfDisk},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
nodeInfo := schedulercache.NewNodeInfo(tt.args.pod)
nodeInfo.SetNode(tt.args.node)
got, res, err := Predicates(tt.args.pod, nodeInfo)
if (err != nil) != tt.wantErr {
t.Errorf("%s (error): error = %v, wantErr %v", tt.name, err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("%s (fit): got = %v, want %v", tt.name, got, tt.want)
}
if !reflect.DeepEqual(res, tt.wantRes) {
t.Errorf("%s (reasons): got = %v, want %v", tt.name, res, tt.wantRes)
}
})
}
}