[migration phase 1] PodFitsResources as framework plugin
This commit is contained in:
		| @@ -66,7 +66,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
|   ] | ||||
| }`, | ||||
| 			wantPredicates: sets.NewString( | ||||
| 				"PodFitsResources", | ||||
| 				"PodFitsPorts", | ||||
| 				"TestServiceAffinity", | ||||
| 				"TestLabelsPresence", | ||||
| @@ -80,6 +79,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 			wantPlugins: map[string][]kubeschedulerconfig.Plugin{ | ||||
| 				"FilterPlugin": { | ||||
| 					{Name: "NodeAffinity"}, | ||||
| 					{Name: "NodeResources"}, | ||||
| 					{Name: "VolumeRestrictions"}, | ||||
| 				}, | ||||
| 			}, | ||||
| @@ -110,7 +110,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 		}`, | ||||
| 			wantPredicates: sets.NewString( | ||||
| 				"PodFitsHostPorts", | ||||
| 				"PodFitsResources", | ||||
| 				"TestServiceAffinity", | ||||
| 				"TestLabelsPresence", | ||||
| 			), | ||||
| @@ -126,6 +125,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 				"FilterPlugin": { | ||||
| 					{Name: "NodeName"}, | ||||
| 					{Name: "NodeAffinity"}, | ||||
| 					{Name: "NodeResources"}, | ||||
| 					{Name: "VolumeRestrictions"}, | ||||
| 				}, | ||||
| 			}, | ||||
| @@ -161,7 +161,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 		  ] | ||||
| 		}`, | ||||
| 			wantPredicates: sets.NewString( | ||||
| 				"PodFitsResources", | ||||
| 				"PodFitsHostPorts", | ||||
| 				"NoVolumeZoneConflict", | ||||
| 				"MaxEBSVolumeCount", | ||||
| @@ -184,6 +183,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 				"FilterPlugin": { | ||||
| 					{Name: "NodeName"}, | ||||
| 					{Name: "NodeAffinity"}, | ||||
| 					{Name: "NodeResources"}, | ||||
| 					{Name: "VolumeRestrictions"}, | ||||
| 				}, | ||||
| 			}, | ||||
| @@ -223,7 +223,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 		  ] | ||||
| 		}`, | ||||
| 			wantPredicates: sets.NewString( | ||||
| 				"PodFitsResources", | ||||
| 				"PodFitsHostPorts", | ||||
| 				"NoVolumeZoneConflict", | ||||
| 				"CheckNodeMemoryPressure", | ||||
| @@ -248,6 +247,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 				"FilterPlugin": { | ||||
| 					{Name: "NodeName"}, | ||||
| 					{Name: "NodeAffinity"}, | ||||
| 					{Name: "NodeResources"}, | ||||
| 					{Name: "VolumeRestrictions"}, | ||||
| 					{Name: "TaintToleration"}, | ||||
| 				}, | ||||
| @@ -292,7 +292,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 		  ] | ||||
| 		}`, | ||||
| 			wantPredicates: sets.NewString( | ||||
| 				"PodFitsResources", | ||||
| 				"PodFitsHostPorts", | ||||
| 				"NoVolumeZoneConflict", | ||||
| 				"CheckNodeMemoryPressure", | ||||
| @@ -320,6 +319,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 				"FilterPlugin": { | ||||
| 					{Name: "NodeName"}, | ||||
| 					{Name: "NodeAffinity"}, | ||||
| 					{Name: "NodeResources"}, | ||||
| 					{Name: "VolumeRestrictions"}, | ||||
| 					{Name: "TaintToleration"}, | ||||
| 				}, | ||||
| @@ -373,7 +373,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 		  }] | ||||
| 		}`, | ||||
| 			wantPredicates: sets.NewString( | ||||
| 				"PodFitsResources", | ||||
| 				"PodFitsHostPorts", | ||||
| 				"NoVolumeZoneConflict", | ||||
| 				"CheckNodeMemoryPressure", | ||||
| @@ -401,6 +400,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 				"FilterPlugin": { | ||||
| 					{Name: "NodeName"}, | ||||
| 					{Name: "NodeAffinity"}, | ||||
| 					{Name: "NodeResources"}, | ||||
| 					{Name: "VolumeRestrictions"}, | ||||
| 					{Name: "TaintToleration"}, | ||||
| 				}, | ||||
| @@ -466,7 +466,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 		  }] | ||||
| 		}`, | ||||
| 			wantPredicates: sets.NewString( | ||||
| 				"PodFitsResources", | ||||
| 				"PodFitsHostPorts", | ||||
| 				"NoVolumeZoneConflict", | ||||
| 				"CheckNodeMemoryPressure", | ||||
| @@ -495,6 +494,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 				"FilterPlugin": { | ||||
| 					{Name: "NodeName"}, | ||||
| 					{Name: "NodeAffinity"}, | ||||
| 					{Name: "NodeResources"}, | ||||
| 					{Name: "VolumeRestrictions"}, | ||||
| 					{Name: "TaintToleration"}, | ||||
| 				}, | ||||
| @@ -561,7 +561,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 		  }] | ||||
| 		}`, | ||||
| 			wantPredicates: sets.NewString( | ||||
| 				"PodFitsResources", | ||||
| 				"PodFitsHostPorts", | ||||
| 				"NoVolumeZoneConflict", | ||||
| 				"CheckNodeMemoryPressure", | ||||
| @@ -590,6 +589,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 				"FilterPlugin": { | ||||
| 					{Name: "NodeName"}, | ||||
| 					{Name: "NodeAffinity"}, | ||||
| 					{Name: "NodeResources"}, | ||||
| 					{Name: "VolumeRestrictions"}, | ||||
| 					{Name: "TaintToleration"}, | ||||
| 					{Name: "VolumeBinding"}, | ||||
| @@ -661,7 +661,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 		  }] | ||||
| 		}`, | ||||
| 			wantPredicates: sets.NewString( | ||||
| 				"PodFitsResources", | ||||
| 				"PodFitsHostPorts", | ||||
| 				"NoVolumeZoneConflict", | ||||
| 				"CheckNodeMemoryPressure", | ||||
| @@ -691,6 +690,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 				"FilterPlugin": { | ||||
| 					{Name: "NodeName"}, | ||||
| 					{Name: "NodeAffinity"}, | ||||
| 					{Name: "NodeResources"}, | ||||
| 					{Name: "VolumeRestrictions"}, | ||||
| 					{Name: "TaintToleration"}, | ||||
| 					{Name: "VolumeBinding"}, | ||||
| @@ -774,7 +774,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 		  }] | ||||
| 		}`, | ||||
| 			wantPredicates: sets.NewString( | ||||
| 				"PodFitsResources", | ||||
| 				"PodFitsHostPorts", | ||||
| 				"NoVolumeZoneConflict", | ||||
| 				"CheckNodeMemoryPressure", | ||||
| @@ -805,6 +804,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 				"FilterPlugin": { | ||||
| 					{Name: "NodeName"}, | ||||
| 					{Name: "NodeAffinity"}, | ||||
| 					{Name: "NodeResources"}, | ||||
| 					{Name: "VolumeRestrictions"}, | ||||
| 					{Name: "TaintToleration"}, | ||||
| 					{Name: "VolumeBinding"}, | ||||
| @@ -889,7 +889,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 		  }] | ||||
| 		}`, | ||||
| 			wantPredicates: sets.NewString( | ||||
| 				"PodFitsResources", | ||||
| 				"PodFitsHostPorts", | ||||
| 				"NoVolumeZoneConflict", | ||||
| 				"CheckNodeMemoryPressure", | ||||
| @@ -921,6 +920,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 				"FilterPlugin": { | ||||
| 					{Name: "NodeName"}, | ||||
| 					{Name: "NodeAffinity"}, | ||||
| 					{Name: "NodeResources"}, | ||||
| 					{Name: "VolumeRestrictions"}, | ||||
| 					{Name: "TaintToleration"}, | ||||
| 					{Name: "VolumeBinding"}, | ||||
| @@ -1004,7 +1004,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 		  }] | ||||
| 		}`, | ||||
| 			wantPredicates: sets.NewString( | ||||
| 				"PodFitsResources", | ||||
| 				"PodFitsHostPorts", | ||||
| 				"NoVolumeZoneConflict", | ||||
| 				"CheckNodeMemoryPressure", | ||||
| @@ -1037,6 +1036,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 				"FilterPlugin": { | ||||
| 					{Name: "NodeName"}, | ||||
| 					{Name: "NodeAffinity"}, | ||||
| 					{Name: "NodeResources"}, | ||||
| 					{Name: "VolumeRestrictions"}, | ||||
| 					{Name: "TaintToleration"}, | ||||
| 					{Name: "VolumeBinding"}, | ||||
| @@ -1124,7 +1124,6 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 		  }] | ||||
| 		}`, | ||||
| 			wantPredicates: sets.NewString( | ||||
| 				"PodFitsResources", | ||||
| 				"PodFitsHostPorts", | ||||
| 				"NoVolumeZoneConflict", | ||||
| 				"CheckNodeMemoryPressure", | ||||
| @@ -1157,6 +1156,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 				"FilterPlugin": { | ||||
| 					{Name: "NodeName"}, | ||||
| 					{Name: "NodeAffinity"}, | ||||
| 					{Name: "NodeResources"}, | ||||
| 					{Name: "VolumeRestrictions"}, | ||||
| 					{Name: "TaintToleration"}, | ||||
| 					{Name: "VolumeBinding"}, | ||||
| @@ -1186,6 +1186,7 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { | ||||
| 	filterToPredicateMap := map[string]string{ | ||||
| 		"TaintToleration":    "PodToleratesNodeTaints", | ||||
| 		"NodeName":           "HostName", | ||||
| 		"NodeResources":      "PodFitsResources", | ||||
| 		"NodeAffinity":       "MatchNodeSelector", | ||||
| 		"VolumeBinding":      "CheckVolumeBinding", | ||||
| 		"VolumeRestrictions": "NoDiskConflict", | ||||
|   | ||||
| @@ -12,6 +12,7 @@ go_library( | ||||
|         "//pkg/scheduler/apis/config:go_default_library", | ||||
|         "//pkg/scheduler/framework/plugins/nodeaffinity:go_default_library", | ||||
|         "//pkg/scheduler/framework/plugins/nodename:go_default_library", | ||||
|         "//pkg/scheduler/framework/plugins/noderesources:go_default_library", | ||||
|         "//pkg/scheduler/framework/plugins/tainttoleration:go_default_library", | ||||
|         "//pkg/scheduler/framework/plugins/volumebinding:go_default_library", | ||||
|         "//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library", | ||||
| @@ -39,6 +40,7 @@ filegroup( | ||||
|         "//pkg/scheduler/framework/plugins/migration:all-srcs", | ||||
|         "//pkg/scheduler/framework/plugins/nodeaffinity:all-srcs", | ||||
|         "//pkg/scheduler/framework/plugins/nodename:all-srcs", | ||||
|         "//pkg/scheduler/framework/plugins/noderesources:all-srcs", | ||||
|         "//pkg/scheduler/framework/plugins/tainttoleration:all-srcs", | ||||
|         "//pkg/scheduler/framework/plugins/volumebinding:all-srcs", | ||||
|         "//pkg/scheduler/framework/plugins/volumerestrictions:all-srcs", | ||||
|   | ||||
| @@ -28,6 +28,7 @@ import ( | ||||
| 	"k8s.io/kubernetes/pkg/scheduler/apis/config" | ||||
| 	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeaffinity" | ||||
| 	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodename" | ||||
| 	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources" | ||||
| 	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/tainttoleration" | ||||
| 	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumebinding" | ||||
| 	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions" | ||||
| @@ -56,6 +57,7 @@ type RegistryArgs struct { | ||||
| func NewDefaultRegistry(args *RegistryArgs) framework.Registry { | ||||
| 	return framework.Registry{ | ||||
| 		tainttoleration.Name: tainttoleration.New, | ||||
| 		noderesources.Name:   noderesources.New, | ||||
| 		nodename.Name:        nodename.New, | ||||
| 		nodeaffinity.Name:    nodeaffinity.New, | ||||
| 		volumebinding.Name: func(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) { | ||||
| @@ -94,6 +96,11 @@ func NewDefaultConfigProducerRegistry() *ConfigProducerRegistry { | ||||
| 			plugins.Filter = appendToPluginSet(plugins.Filter, tainttoleration.Name, nil) | ||||
| 			return | ||||
| 		}) | ||||
| 	registry.RegisterPredicate(predicates.PodFitsResourcesPred, | ||||
| 		func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { | ||||
| 			plugins.Filter = appendToPluginSet(plugins.Filter, noderesources.Name, nil) | ||||
| 			return | ||||
| 		}) | ||||
| 	registry.RegisterPredicate(predicates.HostNamePred, | ||||
| 		func(_ ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) { | ||||
| 			plugins.Filter = appendToPluginSet(plugins.Filter, nodename.Name, nil) | ||||
|   | ||||
| @@ -104,3 +104,18 @@ func PriorityMetadata(state *framework.CycleState) interface{} { | ||||
| 	} | ||||
| 	return meta | ||||
| } | ||||
|  | ||||
| // PredicateMetadata returns predicate metadata stored in CycleState. | ||||
| func PredicateMetadata(state *framework.CycleState) interface{} { | ||||
| 	if state == nil { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	var meta interface{} | ||||
| 	if s, err := state.Read(PredicatesStateKey); err == nil { | ||||
| 		meta = s.(*PredicatesStateData).Reference | ||||
| 	} else { | ||||
| 		klog.Errorf("reading key %q from CycleState, continuing without metadata: %v", PredicatesStateKey, err) | ||||
| 	} | ||||
| 	return meta | ||||
| } | ||||
|   | ||||
							
								
								
									
										49
									
								
								pkg/scheduler/framework/plugins/noderesources/BUILD
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										49
									
								
								pkg/scheduler/framework/plugins/noderesources/BUILD
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,49 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = ["node_resources.go"], | ||||
|     importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources", | ||||
|     visibility = ["//visibility:public"], | ||||
|     deps = [ | ||||
|         "//pkg/scheduler/algorithm/predicates:go_default_library", | ||||
|         "//pkg/scheduler/framework/plugins/migration:go_default_library", | ||||
|         "//pkg/scheduler/framework/v1alpha1:go_default_library", | ||||
|         "//pkg/scheduler/nodeinfo:go_default_library", | ||||
|         "//staging/src/k8s.io/api/core/v1:go_default_library", | ||||
|         "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", | ||||
|     ], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
|  | ||||
| go_test( | ||||
|     name = "go_default_test", | ||||
|     srcs = ["node_resources_test.go"], | ||||
|     embed = [":go_default_library"], | ||||
|     deps = [ | ||||
|         "//pkg/apis/core/v1/helper:go_default_library", | ||||
|         "//pkg/features:go_default_library", | ||||
|         "//pkg/scheduler/algorithm/predicates:go_default_library", | ||||
|         "//pkg/scheduler/framework/plugins/migration:go_default_library", | ||||
|         "//pkg/scheduler/framework/v1alpha1:go_default_library", | ||||
|         "//pkg/scheduler/nodeinfo:go_default_library", | ||||
|         "//staging/src/k8s.io/api/core/v1:go_default_library", | ||||
|         "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", | ||||
|         "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", | ||||
|         "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", | ||||
|         "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", | ||||
|     ], | ||||
| ) | ||||
| @@ -0,0 +1,56 @@ | ||||
| /* | ||||
| Copyright 2019 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package noderesources | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
|  | ||||
| 	"k8s.io/api/core/v1" | ||||
| 	"k8s.io/apimachinery/pkg/runtime" | ||||
| 	"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" | ||||
| 	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" | ||||
| 	framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" | ||||
| 	"k8s.io/kubernetes/pkg/scheduler/nodeinfo" | ||||
| ) | ||||
|  | ||||
| // NodeResources is a plugin that checks if a node has sufficient resources. | ||||
| type NodeResources struct{} | ||||
|  | ||||
| var _ = framework.FilterPlugin(&NodeResources{}) | ||||
|  | ||||
| // Name is the name of the plugin used in the plugin registry and configurations. | ||||
| const Name = "NodeResources" | ||||
|  | ||||
| // Name returns name of the plugin. It is used in logs, etc. | ||||
| func (pl *NodeResources) Name() string { | ||||
| 	return Name | ||||
| } | ||||
|  | ||||
| // Filter invoked at the filter extension point. | ||||
| func (pl *NodeResources) Filter(cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { | ||||
| 	meta, ok := migration.PredicateMetadata(cycleState).(predicates.PredicateMetadata) | ||||
| 	if !ok { | ||||
| 		return migration.ErrorToFrameworkStatus(fmt.Errorf("%+v convert to predicates.PredicateMetadata error", cycleState)) | ||||
| 	} | ||||
| 	_, reasons, err := predicates.PodFitsResources(pod, meta, nodeInfo) | ||||
| 	return migration.PredicateResultToFrameworkStatus(reasons, err) | ||||
| } | ||||
|  | ||||
| // New initializes a new plugin and returns it. | ||||
| func New(_ *runtime.Unknown, _ framework.FrameworkHandle) (framework.Plugin, error) { | ||||
| 	return &NodeResources{}, nil | ||||
| } | ||||
| @@ -0,0 +1,464 @@ | ||||
| /* | ||||
| Copyright 2019 The Kubernetes Authors. | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|     http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package noderesources | ||||
|  | ||||
| import ( | ||||
| 	"reflect" | ||||
| 	"testing" | ||||
|  | ||||
| 	"k8s.io/api/core/v1" | ||||
| 	"k8s.io/apimachinery/pkg/api/resource" | ||||
| 	"k8s.io/apimachinery/pkg/util/sets" | ||||
| 	utilfeature "k8s.io/apiserver/pkg/util/feature" | ||||
| 	featuregatetesting "k8s.io/component-base/featuregate/testing" | ||||
| 	v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" | ||||
| 	"k8s.io/kubernetes/pkg/features" | ||||
| 	"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" | ||||
| 	"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration" | ||||
| 	framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" | ||||
| 	schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	extendedResourceA     = v1.ResourceName("example.com/aaa") | ||||
| 	extendedResourceB     = v1.ResourceName("example.com/bbb") | ||||
| 	kubernetesIOResourceA = v1.ResourceName("kubernetes.io/something") | ||||
| 	kubernetesIOResourceB = v1.ResourceName("subdomain.kubernetes.io/something") | ||||
| 	hugePageResourceA     = v1helper.HugePageResourceName(resource.MustParse("2Mi")) | ||||
| ) | ||||
|  | ||||
| func makeResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.NodeResources { | ||||
| 	return v1.NodeResources{ | ||||
| 		Capacity: v1.ResourceList{ | ||||
| 			v1.ResourceCPU:              *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), | ||||
| 			v1.ResourceMemory:           *resource.NewQuantity(memory, resource.BinarySI), | ||||
| 			v1.ResourcePods:             *resource.NewQuantity(pods, resource.DecimalSI), | ||||
| 			extendedResourceA:           *resource.NewQuantity(extendedA, resource.DecimalSI), | ||||
| 			v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI), | ||||
| 			hugePageResourceA:           *resource.NewQuantity(hugePageA, resource.BinarySI), | ||||
| 		}, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePageA int64) v1.ResourceList { | ||||
| 	return v1.ResourceList{ | ||||
| 		v1.ResourceCPU:              *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), | ||||
| 		v1.ResourceMemory:           *resource.NewQuantity(memory, resource.BinarySI), | ||||
| 		v1.ResourcePods:             *resource.NewQuantity(pods, resource.DecimalSI), | ||||
| 		extendedResourceA:           *resource.NewQuantity(extendedA, resource.DecimalSI), | ||||
| 		v1.ResourceEphemeralStorage: *resource.NewQuantity(storage, resource.BinarySI), | ||||
| 		hugePageResourceA:           *resource.NewQuantity(hugePageA, resource.BinarySI), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func newResourcePod(usage ...schedulernodeinfo.Resource) *v1.Pod { | ||||
| 	containers := []v1.Container{} | ||||
| 	for _, req := range usage { | ||||
| 		containers = append(containers, v1.Container{ | ||||
| 			Resources: v1.ResourceRequirements{Requests: req.ResourceList()}, | ||||
| 		}) | ||||
| 	} | ||||
| 	return &v1.Pod{ | ||||
| 		Spec: v1.PodSpec{ | ||||
| 			Containers: containers, | ||||
| 		}, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func newResourceInitPod(pod *v1.Pod, usage ...schedulernodeinfo.Resource) *v1.Pod { | ||||
| 	pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers | ||||
| 	return pod | ||||
| } | ||||
|  | ||||
| func newResourceOverheadPod(pod *v1.Pod, overhead v1.ResourceList) *v1.Pod { | ||||
| 	pod.Spec.Overhead = overhead | ||||
| 	return pod | ||||
| } | ||||
| func TestNodeResources(t *testing.T) { | ||||
| 	defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.PodOverhead, true)() | ||||
|  | ||||
| 	enoughPodsTests := []struct { | ||||
| 		pod                      *v1.Pod | ||||
| 		nodeInfo                 *schedulernodeinfo.NodeInfo | ||||
| 		name                     string | ||||
| 		ignoredExtendedResources sets.String | ||||
| 		wantStatus               *framework.Status | ||||
| 	}{ | ||||
| 		{ | ||||
| 			pod: &v1.Pod{}, | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})), | ||||
| 			name: "no resources requested always fits", | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})), | ||||
| 			name:       "too many resources fails", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourceCPU, 2, 10, 10).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 3, Memory: 1}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})), | ||||
| 			name:       "too many resources fails due to init container cpu", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 3, Memory: 1}, schedulernodeinfo.Resource{MilliCPU: 2, Memory: 1}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})), | ||||
| 			name:       "too many resources fails due to highest init container cpu", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})), | ||||
| 			name:       "too many resources fails due to init container memory", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})), | ||||
| 			name:       "too many resources fails due to highest init container memory", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})), | ||||
| 			name: "init container fits because it's the max, not sum, of containers and init containers", | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})), | ||||
| 			name: "multiple init containers fit because it's the max, not sum, of containers and init containers", | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})), | ||||
| 			name: "both resources fit", | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 1}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 5})), | ||||
| 			name:       "one resource memory fits", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourceCPU, 2, 9, 10).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), | ||||
| 			name:       "one resource cpu fits", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourceMemory, 2, 19, 20).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), | ||||
| 			name: "equal edge case", | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 4, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), | ||||
| 			name: "equal edge case for init container", | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod:      newResourcePod(schedulernodeinfo.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{})), | ||||
| 			name:     "extended resource fits", | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod:      newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), schedulernodeinfo.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{})), | ||||
| 			name:     "extended resource fits for init container", | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourcePod( | ||||
| 				schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})), | ||||
| 			name:       "extended resource capacity enforced", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(extendedResourceA, 10, 0, 5).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), | ||||
| 				schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})), | ||||
| 			name:       "extended resource capacity enforced for init container", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(extendedResourceA, 10, 0, 5).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourcePod( | ||||
| 				schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})), | ||||
| 			name:       "extended resource allocatable enforced", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(extendedResourceA, 1, 5, 5).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), | ||||
| 				schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})), | ||||
| 			name:       "extended resource allocatable enforced for init container", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(extendedResourceA, 1, 5, 5).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourcePod( | ||||
| 				schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}, | ||||
| 				schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), | ||||
| 			name:       "extended resource allocatable enforced for multiple containers", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(extendedResourceA, 6, 2, 5).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), | ||||
| 				schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}, | ||||
| 				schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), | ||||
| 			name: "extended resource allocatable admits multiple init containers", | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), | ||||
| 				schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 6}}, | ||||
| 				schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), | ||||
| 			name:       "extended resource allocatable enforced for multiple init containers", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(extendedResourceA, 6, 2, 5).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourcePod( | ||||
| 				schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})), | ||||
| 			name:       "extended resource allocatable enforced for unknown resource", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(extendedResourceB, 1, 0, 0).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), | ||||
| 				schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})), | ||||
| 			name:       "extended resource allocatable enforced for unknown resource for init container", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(extendedResourceB, 1, 0, 0).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourcePod( | ||||
| 				schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})), | ||||
| 			name:       "kubernetes.io resource capacity enforced", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(kubernetesIOResourceA, 10, 0, 0).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), | ||||
| 				schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})), | ||||
| 			name:       "kubernetes.io resource capacity enforced for init container", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(kubernetesIOResourceB, 10, 0, 0).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourcePod( | ||||
| 				schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})), | ||||
| 			name:       "hugepages resource capacity enforced", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(hugePageResourceA, 10, 0, 5).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), | ||||
| 				schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})), | ||||
| 			name:       "hugepages resource capacity enforced for init container", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(hugePageResourceA, 10, 0, 5).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourcePod( | ||||
| 				schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}, | ||||
| 				schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})), | ||||
| 			name:       "hugepages resource allocatable enforced for multiple containers", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(hugePageResourceA, 6, 2, 5).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourcePod( | ||||
| 				schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})), | ||||
| 			ignoredExtendedResources: sets.NewString(string(extendedResourceB)), | ||||
| 			name:                     "skip checking ignored extended resource", | ||||
| 			wantStatus:               framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(extendedResourceB, 2, 10, 10).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourceOverheadPod( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), | ||||
| 				v1.ResourceList{v1.ResourceCPU: resource.MustParse("3m"), v1.ResourceMemory: resource.MustParse("13")}, | ||||
| 			), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})), | ||||
| 			ignoredExtendedResources: sets.NewString(string(extendedResourceB)), | ||||
| 			name:                     "resources + pod overhead fits", | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourceOverheadPod( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), | ||||
| 				v1.ResourceList{v1.ResourceCPU: resource.MustParse("1m"), v1.ResourceMemory: resource.MustParse("15")}, | ||||
| 			), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})), | ||||
| 			ignoredExtendedResources: sets.NewString(string(extendedResourceB)), | ||||
| 			name:                     "requests + overhead does not fit for memory", | ||||
| 			wantStatus:               framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourceMemory, 16, 5, 20).GetReason()), | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for _, test := range enoughPodsTests { | ||||
| 		t.Run(test.name, func(t *testing.T) { | ||||
| 			meta := predicates.GetPredicateMetadata(test.pod, nil) | ||||
| 			state := framework.NewCycleState() | ||||
| 			state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta}) | ||||
|  | ||||
| 			node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}} | ||||
| 			test.nodeInfo.SetNode(&node) | ||||
|  | ||||
| 			p, _ := New(nil, nil) | ||||
| 			gotStatus := p.(framework.FilterPlugin).Filter(state, test.pod, test.nodeInfo) | ||||
| 			if !reflect.DeepEqual(gotStatus, test.wantStatus) { | ||||
| 				t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) | ||||
| 			} | ||||
| 		}) | ||||
| 	} | ||||
|  | ||||
| 	notEnoughPodsTests := []struct { | ||||
| 		pod        *v1.Pod | ||||
| 		nodeInfo   *schedulernodeinfo.NodeInfo | ||||
| 		fits       bool | ||||
| 		name       string | ||||
| 		wantStatus *framework.Status | ||||
| 	}{ | ||||
| 		{ | ||||
| 			pod: &v1.Pod{}, | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})), | ||||
| 			name:       "even without specified resources predicate fails when there's no space for additional pod", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})), | ||||
| 			name:       "even if both resources fit predicate fails when there's no space for additional pod", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), | ||||
| 			name:       "even for equal edge case predicate fails when there's no space for additional pod", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), | ||||
| 			name:       "even for equal edge case predicate fails when there's no space for additional pod due to init container", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1).GetReason()), | ||||
| 		}, | ||||
| 	} | ||||
| 	for _, test := range notEnoughPodsTests { | ||||
| 		t.Run(test.name, func(t *testing.T) { | ||||
| 			meta := predicates.GetPredicateMetadata(test.pod, nil) | ||||
| 			state := framework.NewCycleState() | ||||
| 			state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta}) | ||||
|  | ||||
| 			node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 1, 0, 0, 0)}} | ||||
| 			test.nodeInfo.SetNode(&node) | ||||
|  | ||||
| 			p, _ := New(nil, nil) | ||||
| 			gotStatus := p.(framework.FilterPlugin).Filter(state, test.pod, test.nodeInfo) | ||||
| 			if !reflect.DeepEqual(gotStatus, test.wantStatus) { | ||||
| 				t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) | ||||
| 			} | ||||
| 		}) | ||||
| 	} | ||||
|  | ||||
| 	storagePodsTests := []struct { | ||||
| 		pod        *v1.Pod | ||||
| 		nodeInfo   *schedulernodeinfo.NodeInfo | ||||
| 		name       string | ||||
| 		wantStatus *framework.Status | ||||
| 	}{ | ||||
| 		{ | ||||
| 			pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 10})), | ||||
| 			name:       "due to container scratch disk", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 10})), | ||||
| 			name: "pod fit", | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourcePod(schedulernodeinfo.Resource{EphemeralStorage: 25}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 2})), | ||||
| 			name:       "storage ephemeral local storage request exceeds allocatable", | ||||
| 			wantStatus: framework.NewStatus(framework.Unschedulable, predicates.NewInsufficientResourceError(v1.ResourceEphemeralStorage, 25, 0, 20).GetReason()), | ||||
| 		}, | ||||
| 		{ | ||||
| 			pod: newResourcePod(schedulernodeinfo.Resource{EphemeralStorage: 10}), | ||||
| 			nodeInfo: schedulernodeinfo.NewNodeInfo( | ||||
| 				newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 2})), | ||||
| 			name: "pod fits", | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	for _, test := range storagePodsTests { | ||||
| 		t.Run(test.name, func(t *testing.T) { | ||||
| 			meta := predicates.GetPredicateMetadata(test.pod, nil) | ||||
| 			state := framework.NewCycleState() | ||||
| 			state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta}) | ||||
|  | ||||
| 			node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 32, 5, 20, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 32, 5, 20, 5)}} | ||||
| 			test.nodeInfo.SetNode(&node) | ||||
|  | ||||
| 			p, _ := New(nil, nil) | ||||
| 			gotStatus := p.(framework.FilterPlugin).Filter(state, test.pod, test.nodeInfo) | ||||
| 			if !reflect.DeepEqual(gotStatus, test.wantStatus) { | ||||
| 				t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus) | ||||
| 			} | ||||
| 		}) | ||||
| 	} | ||||
|  | ||||
| } | ||||
		Reference in New Issue
	
	Block a user
	 Guoliang Wang
					Guoliang Wang