From a5d81727151fb7c6ec1baafe7191f634b7eaa1f7 Mon Sep 17 00:00:00 2001 From: Abdullah Gharaibeh Date: Mon, 6 Apr 2020 21:40:15 -0400 Subject: [PATCH] move nodeinfo type to framework pkg --- pkg/scheduler/BUILD | 4 - pkg/scheduler/core/BUILD | 6 +- pkg/scheduler/core/extender.go | 11 +- pkg/scheduler/core/extender_test.go | 6 +- pkg/scheduler/core/generic_scheduler.go | 16 +- pkg/scheduler/core/generic_scheduler_test.go | 27 +- pkg/scheduler/factory_test.go | 6 +- .../plugins/defaultpodtopologyspread/BUILD | 1 - .../default_pod_topology_spread.go | 3 +- .../framework/plugins/imagelocality/BUILD | 1 - .../plugins/imagelocality/image_locality.go | 5 +- .../framework/plugins/interpodaffinity/BUILD | 4 - .../plugins/interpodaffinity/filtering.go | 24 +- .../interpodaffinity/filtering_test.go | 5 +- .../plugins/interpodaffinity/plugin.go | 3 +- .../plugins/interpodaffinity/scoring.go | 3 +- .../framework/plugins/nodeaffinity/BUILD | 2 - .../plugins/nodeaffinity/node_affinity.go | 3 +- .../nodeaffinity/node_affinity_test.go | 3 +- .../framework/plugins/nodelabel/BUILD | 2 - .../framework/plugins/nodelabel/node_label.go | 3 +- .../plugins/nodelabel/node_label_test.go | 5 +- .../framework/plugins/nodename/BUILD | 2 - .../framework/plugins/nodename/node_name.go | 5 +- .../plugins/nodename/node_name_test.go | 3 +- .../framework/plugins/nodeports/BUILD | 2 - .../framework/plugins/nodeports/node_ports.go | 7 +- .../plugins/nodeports/node_ports_test.go | 31 ++- .../framework/plugins/noderesources/BUILD | 2 - .../framework/plugins/noderesources/fit.go | 11 +- .../plugins/noderesources/fit_test.go | 253 +++++++++--------- .../noderesources/resource_allocation.go | 5 +- .../plugins/noderesources/resource_limits.go | 11 +- .../framework/plugins/nodeunschedulable/BUILD | 2 - .../nodeunschedulable/node_unschedulable.go | 3 +- .../node_unschedulable_test.go | 3 +- .../framework/plugins/nodevolumelimits/BUILD | 4 +- .../framework/plugins/nodevolumelimits/csi.go | 5 +- .../plugins/nodevolumelimits/csi_test.go | 25 +- .../plugins/nodevolumelimits/non_csi.go | 3 +- .../plugins/nodevolumelimits/non_csi_test.go | 10 +- .../framework/plugins/podtopologyspread/BUILD | 3 - .../plugins/podtopologyspread/filtering.go | 7 +- .../podtopologyspread/filtering_test.go | 3 +- .../plugins/podtopologyspread/plugin.go | 3 +- .../framework/plugins/serviceaffinity/BUILD | 5 +- .../serviceaffinity/service_affinity.go | 12 +- .../serviceaffinity/service_affinity_test.go | 13 +- .../framework/plugins/tainttoleration/BUILD | 2 - .../tainttoleration/taint_toleration.go | 3 +- .../tainttoleration/taint_toleration_test.go | 3 +- .../framework/plugins/volumebinding/BUILD | 2 - .../plugins/volumebinding/volume_binding.go | 3 +- .../volumebinding/volume_binding_test.go | 3 +- .../plugins/volumerestrictions/BUILD | 2 - .../volumerestrictions/volume_restrictions.go | 3 +- .../volume_restrictions_test.go | 41 ++- .../framework/plugins/volumezone/BUILD | 4 +- .../plugins/volumezone/volume_zone.go | 3 +- .../plugins/volumezone/volume_zone_test.go | 23 +- pkg/scheduler/framework/v1alpha1/BUILD | 19 +- .../v1alpha1}/fake/BUILD | 5 +- .../v1alpha1}/fake/listers.go | 23 +- pkg/scheduler/framework/v1alpha1/framework.go | 22 +- .../framework/v1alpha1/framework_test.go | 11 +- pkg/scheduler/framework/v1alpha1/interface.go | 16 +- .../v1alpha1}/listers.go | 9 +- .../v1alpha1/types.go} | 116 +++++++- .../v1alpha1/types_test.go} | 212 ++++++++++++++- pkg/scheduler/internal/cache/BUILD | 5 +- pkg/scheduler/internal/cache/cache.go | 47 ++-- pkg/scheduler/internal/cache/cache_test.go | 156 +++++------ pkg/scheduler/internal/cache/debugger/BUILD | 4 +- .../internal/cache/debugger/comparer.go | 6 +- .../internal/cache/debugger/comparer_test.go | 10 +- .../internal/cache/debugger/dumper.go | 4 +- pkg/scheduler/internal/cache/fake/BUILD | 2 +- .../internal/cache/fake/fake_cache.go | 4 +- pkg/scheduler/internal/cache/interface.go | 7 +- pkg/scheduler/internal/cache/snapshot.go | 45 ++-- pkg/scheduler/internal/cache/snapshot_test.go | 8 +- pkg/scheduler/listers/BUILD | 31 --- pkg/scheduler/nodeinfo/BUILD | 2 +- pkg/scheduler/nodeinfo/node_info.go | 10 +- pkg/scheduler/scheduler_test.go | 3 +- pkg/scheduler/types/BUILD | 49 ---- pkg/scheduler/types/host_ports.go | 135 ---------- pkg/scheduler/types/host_ports_test.go | 231 ---------------- test/e2e/apps/BUILD | 2 +- test/e2e/apps/daemon_set.go | 4 +- test/e2e/framework/.import-restrictions | 1 - test/e2e/framework/node/BUILD | 2 +- test/e2e/framework/node/resource.go | 4 +- test/integration/framework/BUILD | 2 +- test/integration/framework/util.go | 4 +- test/integration/scheduler/BUILD | 1 - test/integration/scheduler/framework_test.go | 3 +- test/integration/scheduler/preemption_test.go | 7 +- 98 files changed, 837 insertions(+), 1053 deletions(-) rename pkg/scheduler/{listers => framework/v1alpha1}/fake/BUILD (86%) rename pkg/scheduler/{listers => framework/v1alpha1}/fake/listers.go (92%) rename pkg/scheduler/{listers => framework/v1alpha1}/listers.go (91%) rename pkg/scheduler/{types/node_info.go => framework/v1alpha1/types.go} (89%) rename pkg/scheduler/{types/node_info_test.go => framework/v1alpha1/types_test.go} (83%) delete mode 100644 pkg/scheduler/listers/BUILD delete mode 100644 pkg/scheduler/types/BUILD delete mode 100644 pkg/scheduler/types/host_ports.go delete mode 100644 pkg/scheduler/types/host_ports_test.go diff --git a/pkg/scheduler/BUILD b/pkg/scheduler/BUILD index 8377f3d48d9..c202cb145aa 100644 --- a/pkg/scheduler/BUILD +++ b/pkg/scheduler/BUILD @@ -80,10 +80,8 @@ go_test( "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache/fake:go_default_library", "//pkg/scheduler/internal/queue:go_default_library", - "//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/profile:go_default_library", "//pkg/scheduler/testing:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/events/v1beta1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", @@ -127,12 +125,10 @@ filegroup( "//pkg/scheduler/internal/heap:all-srcs", "//pkg/scheduler/internal/parallelize:all-srcs", "//pkg/scheduler/internal/queue:all-srcs", - "//pkg/scheduler/listers:all-srcs", "//pkg/scheduler/metrics:all-srcs", "//pkg/scheduler/nodeinfo:all-srcs", "//pkg/scheduler/profile:all-srcs", "//pkg/scheduler/testing:all-srcs", - "//pkg/scheduler/types:all-srcs", "//pkg/scheduler/util:all-srcs", ], tags = ["automanaged"], diff --git a/pkg/scheduler/core/BUILD b/pkg/scheduler/core/BUILD index f375b967249..5c3331d4a65 100644 --- a/pkg/scheduler/core/BUILD +++ b/pkg/scheduler/core/BUILD @@ -15,10 +15,8 @@ go_library( "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/parallelize:go_default_library", "//pkg/scheduler/internal/queue:go_default_library", - "//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/profile:go_default_library", - "//pkg/scheduler/types:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", @@ -60,13 +58,11 @@ go_test( "//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library", "//pkg/scheduler/framework/plugins/volumezone:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/framework/v1alpha1/fake:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/queue:go_default_library", - "//pkg/scheduler/listers:go_default_library", - "//pkg/scheduler/listers/fake:go_default_library", "//pkg/scheduler/profile:go_default_library", "//pkg/scheduler/testing:go_default_library", - "//pkg/scheduler/types:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/policy/v1beta1:go_default_library", diff --git a/pkg/scheduler/core/extender.go b/pkg/scheduler/core/extender.go index 6dd46bc275b..420c5561ee9 100644 --- a/pkg/scheduler/core/extender.go +++ b/pkg/scheduler/core/extender.go @@ -30,8 +30,7 @@ import ( restclient "k8s.io/client-go/rest" extenderv1 "k8s.io/kube-scheduler/extender/v1" schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/kubernetes/pkg/scheduler/listers" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) const ( @@ -77,7 +76,7 @@ type SchedulerExtender interface { ProcessPreemption( pod *v1.Pod, nodeToVictims map[*v1.Node]*extenderv1.Victims, - nodeInfos listers.NodeInfoLister) (map[*v1.Node]*extenderv1.Victims, error) + nodeInfos framework.NodeInfoLister) (map[*v1.Node]*extenderv1.Victims, error) // SupportsPreemption returns if the scheduler extender support preemption or not. SupportsPreemption() bool @@ -214,7 +213,7 @@ func (h *HTTPExtender) SupportsPreemption() bool { func (h *HTTPExtender) ProcessPreemption( pod *v1.Pod, nodeToVictims map[*v1.Node]*extenderv1.Victims, - nodeInfos listers.NodeInfoLister, + nodeInfos framework.NodeInfoLister, ) (map[*v1.Node]*extenderv1.Victims, error) { var ( result extenderv1.ExtenderPreemptionResult @@ -258,7 +257,7 @@ func (h *HTTPExtender) ProcessPreemption( // such as UIDs and names, to object pointers. func (h *HTTPExtender) convertToNodeToVictims( nodeNameToMetaVictims map[string]*extenderv1.MetaVictims, - nodeInfos listers.NodeInfoLister, + nodeInfos framework.NodeInfoLister, ) (map[*v1.Node]*extenderv1.Victims, error) { nodeToVictims := map[*v1.Node]*extenderv1.Victims{} for nodeName, metaVictims := range nodeNameToMetaVictims { @@ -287,7 +286,7 @@ func (h *HTTPExtender) convertToNodeToVictims( // and extender, i.e. when the pod is not found in nodeInfo.Pods. func (h *HTTPExtender) convertPodUIDToPod( metaPod *extenderv1.MetaPod, - nodeInfo *schedulertypes.NodeInfo) (*v1.Pod, error) { + nodeInfo *framework.NodeInfo) (*v1.Pod, error) { for _, pod := range nodeInfo.Pods() { if string(pod.UID) == metaPod.UID { return pod, nil diff --git a/pkg/scheduler/core/extender_test.go b/pkg/scheduler/core/extender_test.go index 67cd9f9cc40..6af3d532fe2 100644 --- a/pkg/scheduler/core/extender_test.go +++ b/pkg/scheduler/core/extender_test.go @@ -40,10 +40,8 @@ import ( framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" - "k8s.io/kubernetes/pkg/scheduler/listers" "k8s.io/kubernetes/pkg/scheduler/profile" st "k8s.io/kubernetes/pkg/scheduler/testing" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" "k8s.io/kubernetes/pkg/scheduler/util" ) @@ -143,7 +141,7 @@ type FakeExtender struct { ignorable bool // Cached node information for fake extender - cachedNodeNameToInfo map[string]*schedulertypes.NodeInfo + cachedNodeNameToInfo map[string]*framework.NodeInfo } func (f *FakeExtender) Name() string { @@ -162,7 +160,7 @@ func (f *FakeExtender) SupportsPreemption() bool { func (f *FakeExtender) ProcessPreemption( pod *v1.Pod, nodeToVictims map[*v1.Node]*extenderv1.Victims, - nodeInfos listers.NodeInfoLister, + nodeInfos framework.NodeInfoLister, ) (map[*v1.Node]*extenderv1.Victims, error) { nodeToVictimsCopy := map[*v1.Node]*extenderv1.Victims{} // We don't want to change the original nodeToVictims diff --git a/pkg/scheduler/core/generic_scheduler.go b/pkg/scheduler/core/generic_scheduler.go index 527af3dc277..3a1acde5358 100644 --- a/pkg/scheduler/core/generic_scheduler.go +++ b/pkg/scheduler/core/generic_scheduler.go @@ -41,10 +41,8 @@ import ( internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/parallelize" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" - "k8s.io/kubernetes/pkg/scheduler/listers" "k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/profile" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" "k8s.io/kubernetes/pkg/scheduler/util" utiltrace "k8s.io/utils/trace" ) @@ -524,7 +522,7 @@ func (g *genericScheduler) findNodesThatPassExtenders(pod *v1.Pod, filtered []*v // addNominatedPods adds pods with equal or greater priority which are nominated // to run on the node. It returns 1) whether any pod was added, 2) augmented cycleState, // 3) augmented nodeInfo. -func (g *genericScheduler) addNominatedPods(ctx context.Context, prof *profile.Profile, pod *v1.Pod, state *framework.CycleState, nodeInfo *schedulertypes.NodeInfo) (bool, *framework.CycleState, *schedulertypes.NodeInfo, error) { +func (g *genericScheduler) addNominatedPods(ctx context.Context, prof *profile.Profile, pod *v1.Pod, state *framework.CycleState, nodeInfo *framework.NodeInfo) (bool, *framework.CycleState, *framework.NodeInfo, error) { if g.schedulingQueue == nil || nodeInfo == nil || nodeInfo.Node() == nil { // This may happen only in tests. return false, state, nodeInfo, nil @@ -564,7 +562,7 @@ func (g *genericScheduler) podPassesFiltersOnNode( prof *profile.Profile, state *framework.CycleState, pod *v1.Pod, - info *schedulertypes.NodeInfo, + info *framework.NodeInfo, ) (bool, *framework.Status, error) { var status *framework.Status @@ -856,7 +854,7 @@ func (g *genericScheduler) selectNodesForPreemption( prof *profile.Profile, state *framework.CycleState, pod *v1.Pod, - potentialNodes []*schedulertypes.NodeInfo, + potentialNodes []*framework.NodeInfo, pdbs []*policy.PodDisruptionBudget, ) (map[*v1.Node]*extenderv1.Victims, error) { nodeToVictims := map[*v1.Node]*extenderv1.Victims{} @@ -946,7 +944,7 @@ func (g *genericScheduler) selectVictimsOnNode( prof *profile.Profile, state *framework.CycleState, pod *v1.Pod, - nodeInfo *schedulertypes.NodeInfo, + nodeInfo *framework.NodeInfo, pdbs []*policy.PodDisruptionBudget, ) ([]*v1.Pod, int, bool) { var potentialVictims []*v1.Pod @@ -1034,8 +1032,8 @@ func (g *genericScheduler) selectVictimsOnNode( // nodesWherePreemptionMightHelp returns a list of nodes with failed predicates // that may be satisfied by removing pods from the node. -func nodesWherePreemptionMightHelp(nodes []*schedulertypes.NodeInfo, fitErr *FitError) []*schedulertypes.NodeInfo { - var potentialNodes []*schedulertypes.NodeInfo +func nodesWherePreemptionMightHelp(nodes []*framework.NodeInfo, fitErr *FitError) []*framework.NodeInfo { + var potentialNodes []*framework.NodeInfo for _, node := range nodes { name := node.Node().Name // We reply on the status by each plugin - 'Unschedulable' or 'UnschedulableAndUnresolvable' @@ -1055,7 +1053,7 @@ func nodesWherePreemptionMightHelp(nodes []*schedulertypes.NodeInfo, fitErr *Fit // considered for preemption. // We look at the node that is nominated for this pod and as long as there are // terminating pods on the node, we don't consider this for preempting more pods. -func podEligibleToPreemptOthers(pod *v1.Pod, nodeInfos listers.NodeInfoLister, enableNonPreempting bool) bool { +func podEligibleToPreemptOthers(pod *v1.Pod, nodeInfos framework.NodeInfoLister, enableNonPreempting bool) bool { if enableNonPreempting && pod.Spec.PreemptionPolicy != nil && *pod.Spec.PreemptionPolicy == v1.PreemptNever { klog.V(5).Infof("Pod %v/%v is not eligible for preemption because it has a preemptionPolicy of %v", pod.Namespace, pod.Name, v1.PreemptNever) return false diff --git a/pkg/scheduler/core/generic_scheduler_test.go b/pkg/scheduler/core/generic_scheduler_test.go index 859a2f9da15..12cff0672b8 100644 --- a/pkg/scheduler/core/generic_scheduler_test.go +++ b/pkg/scheduler/core/generic_scheduler_test.go @@ -54,12 +54,11 @@ import ( "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/fake" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" - fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake" "k8s.io/kubernetes/pkg/scheduler/profile" st "k8s.io/kubernetes/pkg/scheduler/testing" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" schedutil "k8s.io/kubernetes/pkg/scheduler/util" ) @@ -77,7 +76,7 @@ func (pl *trueFilterPlugin) Name() string { } // Filter invoked at the filter extension point. -func (pl *trueFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *trueFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { return nil } @@ -94,7 +93,7 @@ func (pl *falseFilterPlugin) Name() string { } // Filter invoked at the filter extension point. -func (pl *falseFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *falseFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { return framework.NewStatus(framework.Unschedulable, ErrReasonFake) } @@ -111,7 +110,7 @@ func (pl *matchFilterPlugin) Name() string { } // Filter invoked at the filter extension point. -func (pl *matchFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *matchFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { node := nodeInfo.Node() if node == nil { return framework.NewStatus(framework.Error, "node not found") @@ -135,7 +134,7 @@ func (pl *noPodsFilterPlugin) Name() string { } // Filter invoked at the filter extension point. -func (pl *noPodsFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *noPodsFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { if len(nodeInfo.Pods()) == 0 { return nil } @@ -160,7 +159,7 @@ func (pl *fakeFilterPlugin) Name() string { } // Filter invoked at the filter extension point. -func (pl *fakeFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *fakeFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { atomic.AddInt32(&pl.numFilterCalled, 1) if returnCode, ok := pl.failedNodeReturnCodeMap[nodeInfo.Node().Name]; ok { @@ -808,7 +807,7 @@ func TestGenericScheduler(t *testing.T) { var pvcs []v1.PersistentVolumeClaim pvcs = append(pvcs, test.pvcs...) - pvcLister := fakelisters.PersistentVolumeClaimLister(pvcs) + pvcLister := fakeframework.PersistentVolumeClaimLister(pvcs) scheduler := NewGenericScheduler( cache, @@ -2028,9 +2027,9 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) { fitErr := FitError{ FilteredNodesStatuses: test.nodesStatuses, } - var nodeInfos []*schedulertypes.NodeInfo + var nodeInfos []*framework.NodeInfo for _, n := range makeNodeList(nodeNames) { - ni := schedulertypes.NewNodeInfo() + ni := framework.NewNodeInfo() ni.SetNode(n) nodeInfos = append(nodeInfos, ni) } @@ -2371,7 +2370,7 @@ func TestPreempt(t *testing.T) { for _, pod := range test.pods { cache.AddPod(pod) } - cachedNodeInfoMap := map[string]*schedulertypes.NodeInfo{} + cachedNodeInfoMap := map[string]*framework.NodeInfo{} nodeNames := defaultNodeNames if len(test.nodeNames) != 0 { nodeNames = test.nodeNames @@ -2391,7 +2390,7 @@ func TestPreempt(t *testing.T) { nodeNames[i] = node.Name // Set nodeInfo to extenders to mock extenders' cache for preemption. - cachedNodeInfo := schedulertypes.NewNodeInfo() + cachedNodeInfo := framework.NewNodeInfo() cachedNodeInfo.SetNode(node) cachedNodeInfoMap[node.Name] = cachedNodeInfo } @@ -2570,8 +2569,8 @@ func TestFairEvaluationForNodes(t *testing.T) { } } -func nodesToNodeInfos(nodes []*v1.Node, snapshot *internalcache.Snapshot) ([]*schedulertypes.NodeInfo, error) { - var nodeInfos []*schedulertypes.NodeInfo +func nodesToNodeInfos(nodes []*v1.Node, snapshot *internalcache.Snapshot) ([]*framework.NodeInfo, error) { + var nodeInfos []*framework.NodeInfo for _, n := range nodes { nodeInfo, err := snapshot.NodeInfos().Get(n.Name) if err != nil { diff --git a/pkg/scheduler/factory_test.go b/pkg/scheduler/factory_test.go index ae1db9a95f8..cd1eb70c0f0 100644 --- a/pkg/scheduler/factory_test.go +++ b/pkg/scheduler/factory_test.go @@ -53,9 +53,7 @@ import ( framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" - "k8s.io/kubernetes/pkg/scheduler/listers" "k8s.io/kubernetes/pkg/scheduler/profile" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) const ( @@ -538,7 +536,7 @@ func (f *fakeExtender) IsIgnorable() bool { func (f *fakeExtender) ProcessPreemption( pod *v1.Pod, nodeToVictims map[*v1.Node]*extenderv1.Victims, - nodeInfos listers.NodeInfoLister, + nodeInfos framework.NodeInfoLister, ) (map[*v1.Node]*extenderv1.Victims, error) { return nil, nil } @@ -593,6 +591,6 @@ func (t *TestPlugin) ScoreExtensions() framework.ScoreExtensions { return nil } -func (t *TestPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (t *TestPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { return nil } diff --git a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD index 82da0677a02..6b40217b153 100644 --- a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD +++ b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/BUILD @@ -8,7 +8,6 @@ go_library( deps = [ "//pkg/scheduler/framework/plugins/helper:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/types:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", diff --git a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread.go b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread.go index 48ddb306b68..d0019114123 100644 --- a/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread.go +++ b/pkg/scheduler/framework/plugins/defaultpodtopologyspread/default_pod_topology_spread.go @@ -25,7 +25,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" utilnode "k8s.io/kubernetes/pkg/util/node" ) @@ -196,7 +195,7 @@ func New(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin } // countMatchingPods counts pods based on namespace and matching all selectors -func countMatchingPods(namespace string, selector labels.Selector, nodeInfo *schedulertypes.NodeInfo) int { +func countMatchingPods(namespace string, selector labels.Selector, nodeInfo *framework.NodeInfo) int { if len(nodeInfo.Pods()) == 0 || selector.Empty() { return 0 } diff --git a/pkg/scheduler/framework/plugins/imagelocality/BUILD b/pkg/scheduler/framework/plugins/imagelocality/BUILD index 1e1fc7687b1..e941ec0df0f 100644 --- a/pkg/scheduler/framework/plugins/imagelocality/BUILD +++ b/pkg/scheduler/framework/plugins/imagelocality/BUILD @@ -7,7 +7,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/types:go_default_library", "//pkg/util/parsers:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/scheduler/framework/plugins/imagelocality/image_locality.go b/pkg/scheduler/framework/plugins/imagelocality/image_locality.go index 8d73cfadcc0..2da5119bab9 100644 --- a/pkg/scheduler/framework/plugins/imagelocality/image_locality.go +++ b/pkg/scheduler/framework/plugins/imagelocality/image_locality.go @@ -24,7 +24,6 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" "k8s.io/kubernetes/pkg/util/parsers" ) @@ -94,7 +93,7 @@ func calculatePriority(sumScores int64) int64 { // sumImageScores returns the sum of image scores of all the containers that are already on the node. // Each image receives a raw score of its size, scaled by scaledImageScore. The raw scores are later used to calculate // the final score. Note that the init containers are not considered for it's rare for users to deploy huge init containers. -func sumImageScores(nodeInfo *schedulertypes.NodeInfo, containers []v1.Container, totalNumNodes int) int64 { +func sumImageScores(nodeInfo *framework.NodeInfo, containers []v1.Container, totalNumNodes int) int64 { var sum int64 imageStates := nodeInfo.ImageStates() @@ -111,7 +110,7 @@ func sumImageScores(nodeInfo *schedulertypes.NodeInfo, containers []v1.Container // The size of the image is used as the base score, scaled by a factor which considers how much nodes the image has "spread" to. // This heuristic aims to mitigate the undesirable "node heating problem", i.e., pods get assigned to the same or // a few nodes due to image locality. -func scaledImageScore(imageState *schedulertypes.ImageStateSummary, totalNumNodes int) int64 { +func scaledImageScore(imageState *framework.ImageStateSummary, totalNumNodes int) int64 { spread := float64(imageState.NumNodes) / float64(totalNumNodes) return int64(float64(imageState.Size) * spread) } diff --git a/pkg/scheduler/framework/plugins/interpodaffinity/BUILD b/pkg/scheduler/framework/plugins/interpodaffinity/BUILD index 8bea46d3872..7aa3ff0a393 100644 --- a/pkg/scheduler/framework/plugins/interpodaffinity/BUILD +++ b/pkg/scheduler/framework/plugins/interpodaffinity/BUILD @@ -12,9 +12,6 @@ go_library( deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/internal/parallelize:go_default_library", - "//pkg/scheduler/listers:go_default_library", - "//pkg/scheduler/nodeinfo:go_default_library", - "//pkg/scheduler/types:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", @@ -37,7 +34,6 @@ go_test( deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go b/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go index c71dde56287..5ebcde1d773 100644 --- a/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go +++ b/pkg/scheduler/framework/plugins/interpodaffinity/filtering.go @@ -28,8 +28,6 @@ import ( "k8s.io/klog" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/internal/parallelize" - "k8s.io/kubernetes/pkg/scheduler/nodeinfo" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" schedutil "k8s.io/kubernetes/pkg/scheduler/util" ) @@ -210,7 +208,7 @@ func podMatchesAllAffinityTerms(pod *v1.Pod, terms []*affinityTerm) bool { // getTPMapMatchingExistingAntiAffinity calculates the following for each existing pod on each node: // (1) Whether it has PodAntiAffinity // (2) Whether any AffinityTerm matches the incoming pod -func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, allNodes []*nodeinfo.NodeInfo) (topologyToMatchedTermCount, error) { +func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, allNodes []*framework.NodeInfo) (topologyToMatchedTermCount, error) { errCh := parallelize.NewErrorChannel() var lock sync.Mutex topologyMap := make(topologyToMatchedTermCount) @@ -254,7 +252,7 @@ func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, allNodes []*nodeinfo.Node // It returns a topologyToMatchedTermCount that are checked later by the affinity // predicate. With this topologyToMatchedTermCount available, the affinity predicate does not // need to check all the pods in the cluster. -func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, allNodes []*schedulertypes.NodeInfo) (topologyToMatchedTermCount, topologyToMatchedTermCount, error) { +func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, allNodes []*framework.NodeInfo) (topologyToMatchedTermCount, topologyToMatchedTermCount, error) { topologyPairsAffinityPodsMap := make(topologyToMatchedTermCount) topologyToMatchedExistingAntiAffinityTerms := make(topologyToMatchedTermCount) affinity := pod.Spec.Affinity @@ -329,8 +327,8 @@ func targetPodMatchesAffinityOfPod(pod, targetPod *v1.Pod) bool { // PreFilter invoked at the prefilter extension point. func (pl *InterPodAffinity) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) *framework.Status { - var allNodes []*schedulertypes.NodeInfo - var havePodsWithAffinityNodes []*schedulertypes.NodeInfo + var allNodes []*framework.NodeInfo + var havePodsWithAffinityNodes []*framework.NodeInfo var err error if allNodes, err = pl.sharedLister.NodeInfos().List(); err != nil { return framework.NewStatus(framework.Error, fmt.Sprintf("failed to list NodeInfos: %v", err)) @@ -367,7 +365,7 @@ func (pl *InterPodAffinity) PreFilterExtensions() framework.PreFilterExtensions } // AddPod from pre-computed data in cycleState. -func (pl *InterPodAffinity) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *InterPodAffinity) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { state, err := getPreFilterState(cycleState) if err != nil { return framework.NewStatus(framework.Error, err.Error()) @@ -377,7 +375,7 @@ func (pl *InterPodAffinity) AddPod(ctx context.Context, cycleState *framework.Cy } // RemovePod from pre-computed data in cycleState. -func (pl *InterPodAffinity) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *InterPodAffinity) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { state, err := getPreFilterState(cycleState) if err != nil { return framework.NewStatus(framework.Error, err.Error()) @@ -402,7 +400,7 @@ func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error // Checks if scheduling the pod onto this node would break any anti-affinity // terms indicated by the existing pods. -func (pl *InterPodAffinity) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, state *preFilterState, nodeInfo *schedulertypes.NodeInfo) (bool, error) { +func (pl *InterPodAffinity) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, state *preFilterState, nodeInfo *framework.NodeInfo) (bool, error) { node := nodeInfo.Node() topologyMap := state.topologyToMatchedExistingAntiAffinityTerms @@ -418,7 +416,7 @@ func (pl *InterPodAffinity) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, state } // nodeMatchesAllTopologyTerms checks whether "nodeInfo" matches topology of all the "terms" for the given "pod". -func nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPairs topologyToMatchedTermCount, nodeInfo *schedulertypes.NodeInfo, terms []v1.PodAffinityTerm) bool { +func nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPairs topologyToMatchedTermCount, nodeInfo *framework.NodeInfo, terms []v1.PodAffinityTerm) bool { node := nodeInfo.Node() for _, term := range terms { if topologyValue, ok := node.Labels[term.TopologyKey]; ok { @@ -435,7 +433,7 @@ func nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPairs topologyToMatchedTer // nodeMatchesAnyTopologyTerm checks whether "nodeInfo" matches // topology of any "term" for the given "pod". -func nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPairs topologyToMatchedTermCount, nodeInfo *schedulertypes.NodeInfo, terms []v1.PodAffinityTerm) bool { +func nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPairs topologyToMatchedTermCount, nodeInfo *framework.NodeInfo, terms []v1.PodAffinityTerm) bool { node := nodeInfo.Node() for _, term := range terms { if topologyValue, ok := node.Labels[term.TopologyKey]; ok { @@ -478,7 +476,7 @@ func getMatchingAntiAffinityTopologyPairsOfPod(newPod *v1.Pod, existingPod *v1.P // This function returns two boolean flags. The first boolean flag indicates whether the pod matches affinity rules // or not. The second boolean flag indicates if the pod matches anti-affinity rules. func (pl *InterPodAffinity) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, - state *preFilterState, nodeInfo *schedulertypes.NodeInfo, + state *preFilterState, nodeInfo *framework.NodeInfo, affinity *v1.Affinity) (bool, bool, error) { node := nodeInfo.Node() if node == nil { @@ -514,7 +512,7 @@ func (pl *InterPodAffinity) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, // Filter invoked at the filter extension point. // It checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration. -func (pl *InterPodAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *InterPodAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { state, err := getPreFilterState(cycleState) if err != nil { return framework.NewStatus(framework.Error, err.Error()) diff --git a/pkg/scheduler/framework/plugins/interpodaffinity/filtering_test.go b/pkg/scheduler/framework/plugins/interpodaffinity/filtering_test.go index cd2d03ca408..0e346457c0a 100644 --- a/pkg/scheduler/framework/plugins/interpodaffinity/filtering_test.go +++ b/pkg/scheduler/framework/plugins/interpodaffinity/filtering_test.go @@ -25,7 +25,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/internal/cache" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) var ( @@ -1636,7 +1635,7 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) { func TestPreFilterDisabled(t *testing.T) { pod := &v1.Pod{} - nodeInfo := schedulertypes.NewNodeInfo() + nodeInfo := framework.NewNodeInfo() node := v1.Node{} nodeInfo.SetNode(&node) p := &InterPodAffinity{} @@ -2211,7 +2210,7 @@ func TestGetTPMapMatchingIncomingAffinityAntiAffinity(t *testing.T) { } } -func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *schedulertypes.NodeInfo { +func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *framework.NodeInfo { t.Helper() nodeInfo, err := snapshot.NodeInfos().Get(name) if err != nil { diff --git a/pkg/scheduler/framework/plugins/interpodaffinity/plugin.go b/pkg/scheduler/framework/plugins/interpodaffinity/plugin.go index f9fd6ec1c29..ff8cb7f7a62 100644 --- a/pkg/scheduler/framework/plugins/interpodaffinity/plugin.go +++ b/pkg/scheduler/framework/plugins/interpodaffinity/plugin.go @@ -23,7 +23,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/validation/field" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" "k8s.io/utils/pointer" ) @@ -54,7 +53,7 @@ var _ framework.ScorePlugin = &InterPodAffinity{} // InterPodAffinity is a plugin that checks inter pod affinity type InterPodAffinity struct { Args - sharedLister schedulerlisters.SharedLister + sharedLister framework.SharedLister sync.Mutex } diff --git a/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go b/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go index a211a18ccbb..8578fb44dc0 100644 --- a/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go +++ b/pkg/scheduler/framework/plugins/interpodaffinity/scoring.go @@ -25,7 +25,6 @@ import ( "k8s.io/klog" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/internal/parallelize" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" schedutil "k8s.io/kubernetes/pkg/scheduler/util" ) @@ -118,7 +117,7 @@ func (m scoreMap) append(other scoreMap) { } } -func (pl *InterPodAffinity) processExistingPod(state *preScoreState, existingPod *v1.Pod, existingPodNodeInfo *schedulertypes.NodeInfo, incomingPod *v1.Pod, topoScore scoreMap) error { +func (pl *InterPodAffinity) processExistingPod(state *preScoreState, existingPod *v1.Pod, existingPodNodeInfo *framework.NodeInfo, incomingPod *v1.Pod, topoScore scoreMap) error { existingPodAffinity := existingPod.Spec.Affinity existingHasAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAffinity != nil existingHasAntiAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAntiAffinity != nil diff --git a/pkg/scheduler/framework/plugins/nodeaffinity/BUILD b/pkg/scheduler/framework/plugins/nodeaffinity/BUILD index bebdcf729fa..986561ca45e 100644 --- a/pkg/scheduler/framework/plugins/nodeaffinity/BUILD +++ b/pkg/scheduler/framework/plugins/nodeaffinity/BUILD @@ -9,7 +9,6 @@ go_library( "//pkg/apis/core/v1/helper:go_default_library", "//pkg/scheduler/framework/plugins/helper:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -38,7 +37,6 @@ go_test( "//pkg/apis/core:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], diff --git a/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go b/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go index 915cd51102e..7c575c48985 100644 --- a/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go +++ b/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity.go @@ -26,7 +26,6 @@ import ( v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) // NodeAffinity is a plugin that checks if a pod node selector matches the node label. @@ -51,7 +50,7 @@ func (pl *NodeAffinity) Name() string { } // Filter invoked at the filter extension point. -func (pl *NodeAffinity) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *NodeAffinity) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { node := nodeInfo.Node() if node == nil { return framework.NewStatus(framework.Error, "node not found") diff --git a/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity_test.go b/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity_test.go index a2d77dde9bc..fdefc85479c 100644 --- a/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity_test.go +++ b/pkg/scheduler/framework/plugins/nodeaffinity/node_affinity_test.go @@ -26,7 +26,6 @@ import ( api "k8s.io/kubernetes/pkg/apis/core" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/internal/cache" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) // TODO: Add test case for RequiredDuringSchedulingRequiredDuringExecution after it's implemented. @@ -694,7 +693,7 @@ func TestNodeAffinity(t *testing.T) { Name: test.nodeName, Labels: test.labels, }} - nodeInfo := schedulertypes.NewNodeInfo() + nodeInfo := framework.NewNodeInfo() nodeInfo.SetNode(&node) p, _ := New(nil, nil) diff --git a/pkg/scheduler/framework/plugins/nodelabel/BUILD b/pkg/scheduler/framework/plugins/nodelabel/BUILD index 696e214c62f..02c13c001b8 100644 --- a/pkg/scheduler/framework/plugins/nodelabel/BUILD +++ b/pkg/scheduler/framework/plugins/nodelabel/BUILD @@ -7,7 +7,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -21,7 +20,6 @@ go_test( deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", diff --git a/pkg/scheduler/framework/plugins/nodelabel/node_label.go b/pkg/scheduler/framework/plugins/nodelabel/node_label.go index 4cc1c5c23d3..1b6701d49ba 100644 --- a/pkg/scheduler/framework/plugins/nodelabel/node_label.go +++ b/pkg/scheduler/framework/plugins/nodelabel/node_label.go @@ -24,7 +24,6 @@ import ( "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) // Name of this plugin. @@ -102,7 +101,7 @@ func (pl *NodeLabel) Name() string { // Alternately, eliminating nodes that have a certain label, regardless of value, is also useful // A node may have a label with "retiring" as key and the date as the value // and it may be desirable to avoid scheduling new pods on this node. -func (pl *NodeLabel) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *NodeLabel) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { node := nodeInfo.Node() if node == nil { return framework.NewStatus(framework.Error, "node not found") diff --git a/pkg/scheduler/framework/plugins/nodelabel/node_label_test.go b/pkg/scheduler/framework/plugins/nodelabel/node_label_test.go index 769319e4cdc..1b52e68a7c2 100644 --- a/pkg/scheduler/framework/plugins/nodelabel/node_label_test.go +++ b/pkg/scheduler/framework/plugins/nodelabel/node_label_test.go @@ -25,7 +25,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/internal/cache" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) func TestValidateNodeLabelArgs(t *testing.T) { @@ -133,7 +132,7 @@ func TestNodeLabelFilter(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { node := v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: label}} - nodeInfo := schedulertypes.NewNodeInfo() + nodeInfo := framework.NewNodeInfo() nodeInfo.SetNode(&node) args := &runtime.Unknown{Raw: []byte(test.rawArgs)} @@ -248,7 +247,7 @@ func TestNodeLabelScore(t *testing.T) { func TestNodeLabelFilterWithoutNode(t *testing.T) { var pod *v1.Pod t.Run("node does not exist", func(t *testing.T) { - nodeInfo := schedulertypes.NewNodeInfo() + nodeInfo := framework.NewNodeInfo() p, err := New(nil, nil) if err != nil { t.Fatalf("Failed to create plugin: %v", err) diff --git a/pkg/scheduler/framework/plugins/nodename/BUILD b/pkg/scheduler/framework/plugins/nodename/BUILD index 75d2149367f..7d7f4dbe599 100644 --- a/pkg/scheduler/framework/plugins/nodename/BUILD +++ b/pkg/scheduler/framework/plugins/nodename/BUILD @@ -7,7 +7,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", ], @@ -33,7 +32,6 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], diff --git a/pkg/scheduler/framework/plugins/nodename/node_name.go b/pkg/scheduler/framework/plugins/nodename/node_name.go index ec034c182ef..2d01280c7ca 100644 --- a/pkg/scheduler/framework/plugins/nodename/node_name.go +++ b/pkg/scheduler/framework/plugins/nodename/node_name.go @@ -22,7 +22,6 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) // NodeName is a plugin that checks if a pod spec node name matches the current node. @@ -44,7 +43,7 @@ func (pl *NodeName) Name() string { } // Filter invoked at the filter extension point. -func (pl *NodeName) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *NodeName) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { if nodeInfo.Node() == nil { return framework.NewStatus(framework.Error, "node not found") } @@ -55,7 +54,7 @@ func (pl *NodeName) Filter(ctx context.Context, _ *framework.CycleState, pod *v1 } // Fits actually checks if the pod fits the node. -func Fits(pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) bool { +func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo) bool { return len(pod.Spec.NodeName) == 0 || pod.Spec.NodeName == nodeInfo.Node().Name } diff --git a/pkg/scheduler/framework/plugins/nodename/node_name_test.go b/pkg/scheduler/framework/plugins/nodename/node_name_test.go index da234fe57f0..735cec0eab7 100644 --- a/pkg/scheduler/framework/plugins/nodename/node_name_test.go +++ b/pkg/scheduler/framework/plugins/nodename/node_name_test.go @@ -24,7 +24,6 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) func TestNodeName(t *testing.T) { @@ -70,7 +69,7 @@ func TestNodeName(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - nodeInfo := schedulertypes.NewNodeInfo() + nodeInfo := framework.NewNodeInfo() nodeInfo.SetNode(test.node) p, _ := New(nil, nil) diff --git a/pkg/scheduler/framework/plugins/nodeports/BUILD b/pkg/scheduler/framework/plugins/nodeports/BUILD index ae5584f5e35..ea6ae77fbce 100644 --- a/pkg/scheduler/framework/plugins/nodeports/BUILD +++ b/pkg/scheduler/framework/plugins/nodeports/BUILD @@ -7,7 +7,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", ], @@ -33,7 +32,6 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", ], diff --git a/pkg/scheduler/framework/plugins/nodeports/node_ports.go b/pkg/scheduler/framework/plugins/nodeports/node_ports.go index 8190bcde69f..44ab9806a93 100644 --- a/pkg/scheduler/framework/plugins/nodeports/node_ports.go +++ b/pkg/scheduler/framework/plugins/nodeports/node_ports.go @@ -23,7 +23,6 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) // NodePorts is a plugin that checks if a node has free ports for the requested pod ports. @@ -98,7 +97,7 @@ func getPreFilterState(cycleState *framework.CycleState) (preFilterState, error) } // Filter invoked at the filter extension point. -func (pl *NodePorts) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *NodePorts) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { wantPorts, err := getPreFilterState(cycleState) if err != nil { return framework.NewStatus(framework.Error, err.Error()) @@ -113,11 +112,11 @@ func (pl *NodePorts) Filter(ctx context.Context, cycleState *framework.CycleStat } // Fits checks if the pod fits the node. -func Fits(pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) bool { +func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo) bool { return fitsPorts(getContainerPorts(pod), nodeInfo) } -func fitsPorts(wantPorts []*v1.ContainerPort, nodeInfo *schedulertypes.NodeInfo) bool { +func fitsPorts(wantPorts []*v1.ContainerPort, nodeInfo *framework.NodeInfo) bool { // try to see whether existingPorts and wantPorts will conflict or not existingPorts := nodeInfo.UsedPorts() for _, cp := range wantPorts { diff --git a/pkg/scheduler/framework/plugins/nodeports/node_ports_test.go b/pkg/scheduler/framework/plugins/nodeports/node_ports_test.go index 0aafc457a7a..15e0da03ebd 100644 --- a/pkg/scheduler/framework/plugins/nodeports/node_ports_test.go +++ b/pkg/scheduler/framework/plugins/nodeports/node_ports_test.go @@ -26,7 +26,6 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/util/diff" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) func newPod(host string, hostPortInfos ...string) *v1.Pod { @@ -56,91 +55,91 @@ func newPod(host string, hostPortInfos ...string) *v1.Pod { func TestNodePorts(t *testing.T) { tests := []struct { pod *v1.Pod - nodeInfo *schedulertypes.NodeInfo + nodeInfo *framework.NodeInfo name string wantStatus *framework.Status }{ { pod: &v1.Pod{}, - nodeInfo: schedulertypes.NewNodeInfo(), + nodeInfo: framework.NewNodeInfo(), name: "nothing running", }, { pod: newPod("m1", "UDP/127.0.0.1/8080"), - nodeInfo: schedulertypes.NewNodeInfo( + nodeInfo: framework.NewNodeInfo( newPod("m1", "UDP/127.0.0.1/9090")), name: "other port", }, { pod: newPod("m1", "UDP/127.0.0.1/8080"), - nodeInfo: schedulertypes.NewNodeInfo( + nodeInfo: framework.NewNodeInfo( newPod("m1", "UDP/127.0.0.1/8080")), name: "same udp port", wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason), }, { pod: newPod("m1", "TCP/127.0.0.1/8080"), - nodeInfo: schedulertypes.NewNodeInfo( + nodeInfo: framework.NewNodeInfo( newPod("m1", "TCP/127.0.0.1/8080")), name: "same tcp port", wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason), }, { pod: newPod("m1", "TCP/127.0.0.1/8080"), - nodeInfo: schedulertypes.NewNodeInfo( + nodeInfo: framework.NewNodeInfo( newPod("m1", "TCP/127.0.0.2/8080")), name: "different host ip", }, { pod: newPod("m1", "UDP/127.0.0.1/8080"), - nodeInfo: schedulertypes.NewNodeInfo( + nodeInfo: framework.NewNodeInfo( newPod("m1", "TCP/127.0.0.1/8080")), name: "different protocol", }, { pod: newPod("m1", "UDP/127.0.0.1/8000", "UDP/127.0.0.1/8080"), - nodeInfo: schedulertypes.NewNodeInfo( + nodeInfo: framework.NewNodeInfo( newPod("m1", "UDP/127.0.0.1/8080")), name: "second udp port conflict", wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason), }, { pod: newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8080"), - nodeInfo: schedulertypes.NewNodeInfo( + nodeInfo: framework.NewNodeInfo( newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8081")), name: "first tcp port conflict", wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason), }, { pod: newPod("m1", "TCP/0.0.0.0/8001"), - nodeInfo: schedulertypes.NewNodeInfo( + nodeInfo: framework.NewNodeInfo( newPod("m1", "TCP/127.0.0.1/8001")), name: "first tcp port conflict due to 0.0.0.0 hostIP", wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason), }, { pod: newPod("m1", "TCP/10.0.10.10/8001", "TCP/0.0.0.0/8001"), - nodeInfo: schedulertypes.NewNodeInfo( + nodeInfo: framework.NewNodeInfo( newPod("m1", "TCP/127.0.0.1/8001")), name: "TCP hostPort conflict due to 0.0.0.0 hostIP", wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason), }, { pod: newPod("m1", "TCP/127.0.0.1/8001"), - nodeInfo: schedulertypes.NewNodeInfo( + nodeInfo: framework.NewNodeInfo( newPod("m1", "TCP/0.0.0.0/8001")), name: "second tcp port conflict to 0.0.0.0 hostIP", wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason), }, { pod: newPod("m1", "UDP/127.0.0.1/8001"), - nodeInfo: schedulertypes.NewNodeInfo( + nodeInfo: framework.NewNodeInfo( newPod("m1", "TCP/0.0.0.0/8001")), name: "second different protocol", }, { pod: newPod("m1", "UDP/127.0.0.1/8001"), - nodeInfo: schedulertypes.NewNodeInfo( + nodeInfo: framework.NewNodeInfo( newPod("m1", "TCP/0.0.0.0/8001", "UDP/0.0.0.0/8001")), name: "UDP hostPort conflict due to 0.0.0.0 hostIP", wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason), @@ -165,7 +164,7 @@ func TestNodePorts(t *testing.T) { func TestPreFilterDisabled(t *testing.T) { pod := &v1.Pod{} - nodeInfo := schedulertypes.NewNodeInfo() + nodeInfo := framework.NewNodeInfo() node := v1.Node{} nodeInfo.SetNode(&node) p, _ := New(nil, nil) diff --git a/pkg/scheduler/framework/plugins/noderesources/BUILD b/pkg/scheduler/framework/plugins/noderesources/BUILD index 2bd866ea64f..be939f5da06 100644 --- a/pkg/scheduler/framework/plugins/noderesources/BUILD +++ b/pkg/scheduler/framework/plugins/noderesources/BUILD @@ -19,7 +19,6 @@ go_library( "//pkg/features:go_default_library", "//pkg/scheduler/apis/config:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/types:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", @@ -61,7 +60,6 @@ go_test( "//pkg/features:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/scheduler/framework/plugins/noderesources/fit.go b/pkg/scheduler/framework/plugins/noderesources/fit.go index 9438d79d534..22215e6521c 100644 --- a/pkg/scheduler/framework/plugins/noderesources/fit.go +++ b/pkg/scheduler/framework/plugins/noderesources/fit.go @@ -27,7 +27,6 @@ import ( v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) var _ framework.PreFilterPlugin = &Fit{} @@ -56,7 +55,7 @@ type FitArgs struct { // preFilterState computed at PreFilter and used at Filter. type preFilterState struct { - schedulertypes.Resource + framework.Resource } // Clone the prefilter state. @@ -69,7 +68,7 @@ func (f *Fit) Name() string { return FitName } -// computePodResourceRequest returns a schedulertypes.Resource that covers the largest +// computePodResourceRequest returns a framework.Resource that covers the largest // width in each resource dimension. Because init-containers run sequentially, we collect // the max in each dimension iteratively. In contrast, we sum the resource vectors for // regular containers since they run simultaneously. @@ -143,7 +142,7 @@ func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error // Filter invoked at the filter extension point. // Checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod. // It returns a list of insufficient resources, if empty, then the node has all the resources requested by the pod. -func (f *Fit) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (f *Fit) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { s, err := getPreFilterState(cycleState) if err != nil { return framework.NewStatus(framework.Error, err.Error()) @@ -174,11 +173,11 @@ type InsufficientResource struct { } // Fits checks if node have enough resources to host the pod. -func Fits(pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo, ignoredExtendedResources sets.String) []InsufficientResource { +func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo, ignoredExtendedResources sets.String) []InsufficientResource { return fitsRequest(computePodResourceRequest(pod), nodeInfo, ignoredExtendedResources) } -func fitsRequest(podRequest *preFilterState, nodeInfo *schedulertypes.NodeInfo, ignoredExtendedResources sets.String) []InsufficientResource { +func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignoredExtendedResources sets.String) []InsufficientResource { insufficientResources := make([]InsufficientResource, 0, 4) allowedPodNumber := nodeInfo.AllowedPodNumber() diff --git a/pkg/scheduler/framework/plugins/noderesources/fit_test.go b/pkg/scheduler/framework/plugins/noderesources/fit_test.go index 35ef0954db1..ca698f429d6 100644 --- a/pkg/scheduler/framework/plugins/noderesources/fit_test.go +++ b/pkg/scheduler/framework/plugins/noderesources/fit_test.go @@ -27,7 +27,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) var ( @@ -62,7 +61,7 @@ func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePa } } -func newResourcePod(usage ...schedulertypes.Resource) *v1.Pod { +func newResourcePod(usage ...framework.Resource) *v1.Pod { containers := []v1.Container{} for _, req := range usage { containers = append(containers, v1.Container{ @@ -76,7 +75,7 @@ func newResourcePod(usage ...schedulertypes.Resource) *v1.Pod { } } -func newResourceInitPod(pod *v1.Pod, usage ...schedulertypes.Resource) *v1.Pod { +func newResourceInitPod(pod *v1.Pod, usage ...framework.Resource) *v1.Pod { pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers return pod } @@ -93,7 +92,7 @@ func getErrReason(rn v1.ResourceName) string { func TestEnoughRequests(t *testing.T) { enoughPodsTests := []struct { pod *v1.Pod - nodeInfo *schedulertypes.NodeInfo + nodeInfo *framework.NodeInfo name string ignoredResources []byte wantInsufficientResources []InsufficientResource @@ -101,266 +100,266 @@ func TestEnoughRequests(t *testing.T) { }{ { pod: &v1.Pod{}, - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 10, Memory: 20})), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 10, Memory: 20})), name: "no resources requested always fits", wantInsufficientResources: []InsufficientResource{}, }, { - pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 10, Memory: 20})), + pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 10, Memory: 20})), name: "too many resources fails", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU), getErrReason(v1.ResourceMemory)), wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 1, 10, 10}, {v1.ResourceMemory, getErrReason(v1.ResourceMemory), 1, 20, 20}}, }, { - pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 3, Memory: 1}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 8, Memory: 19})), + pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 3, Memory: 1}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 8, Memory: 19})), name: "too many resources fails due to init container cpu", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)), wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 3, 8, 10}}, }, { - pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 3, Memory: 1}, schedulertypes.Resource{MilliCPU: 2, Memory: 1}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 8, Memory: 19})), + pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 3, Memory: 1}, framework.Resource{MilliCPU: 2, Memory: 1}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 8, Memory: 19})), name: "too many resources fails due to highest init container cpu", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)), wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 3, 8, 10}}, }, { - pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 1, Memory: 3}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 19})), + pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 3}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})), name: "too many resources fails due to init container memory", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)), wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 3, 19, 20}}, }, { - pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 1, Memory: 3}, schedulertypes.Resource{MilliCPU: 1, Memory: 2}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 19})), + pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 3}, framework.Resource{MilliCPU: 1, Memory: 2}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})), name: "too many resources fails due to highest init container memory", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)), wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 3, 19, 20}}, }, { - pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 1, Memory: 1}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 19})), + pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 1}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})), name: "init container fits because it's the max, not sum, of containers and init containers", wantInsufficientResources: []InsufficientResource{}, }, { - pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 1, Memory: 1}, schedulertypes.Resource{MilliCPU: 1, Memory: 1}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 19})), + pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 1}, framework.Resource{MilliCPU: 1, Memory: 1}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})), name: "multiple init containers fit because it's the max, not sum, of containers and init containers", wantInsufficientResources: []InsufficientResource{}, }, { - pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 5})), + pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})), name: "both resources fit", wantInsufficientResources: []InsufficientResource{}, }, { - pod: newResourcePod(schedulertypes.Resource{MilliCPU: 2, Memory: 1}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 5})), + pod: newResourcePod(framework.Resource{MilliCPU: 2, Memory: 1}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 9, Memory: 5})), name: "one resource memory fits", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)), wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 2, 9, 10}}, }, { - pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 2}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})), + pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 2}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})), name: "one resource cpu fits", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)), wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 2, 19, 20}}, }, { - pod: newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 1}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})), + pod: newResourcePod(framework.Resource{MilliCPU: 5, Memory: 1}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})), name: "equal edge case", wantInsufficientResources: []InsufficientResource{}, }, { - pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 4, Memory: 1}), schedulertypes.Resource{MilliCPU: 5, Memory: 1}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})), + pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 4, Memory: 1}), framework.Resource{MilliCPU: 5, Memory: 1}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})), name: "equal edge case for init container", wantInsufficientResources: []InsufficientResource{}, }, { - pod: newResourcePod(schedulertypes.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), - nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{})), + pod: newResourcePod(framework.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), + nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{})), name: "extended resource fits", wantInsufficientResources: []InsufficientResource{}, }, { - pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}), schedulertypes.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), - nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{})), + pod: newResourceInitPod(newResourcePod(framework.Resource{}), framework.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), + nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{})), name: "extended resource fits for init container", wantInsufficientResources: []InsufficientResource{}, }, { pod: newResourcePod( - schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})), + framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})), name: "extended resource capacity enforced", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)), wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 10, 0, 5}}, }, { - pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}), - schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})), + pod: newResourceInitPod(newResourcePod(framework.Resource{}), + framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})), name: "extended resource capacity enforced for init container", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)), wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 10, 0, 5}}, }, { pod: newResourcePod( - schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})), + framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})), name: "extended resource allocatable enforced", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)), wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 1, 5, 5}}, }, { - pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}), - schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})), + pod: newResourceInitPod(newResourcePod(framework.Resource{}), + framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})), name: "extended resource allocatable enforced for init container", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)), wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 1, 5, 5}}, }, { pod: newResourcePod( - schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}, - schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), + framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}, + framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), name: "extended resource allocatable enforced for multiple containers", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)), wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 6, 2, 5}}, }, { - pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}), - schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}, - schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), + pod: newResourceInitPod(newResourcePod(framework.Resource{}), + framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}, + framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), name: "extended resource allocatable admits multiple init containers", wantInsufficientResources: []InsufficientResource{}, }, { - pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}), - schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 6}}, - schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), + pod: newResourceInitPod(newResourcePod(framework.Resource{}), + framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 6}}, + framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), name: "extended resource allocatable enforced for multiple init containers", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)), wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 6, 2, 5}}, }, { pod: newResourcePod( - schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})), + framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})), name: "extended resource allocatable enforced for unknown resource", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)), wantInsufficientResources: []InsufficientResource{{extendedResourceB, getErrReason(extendedResourceB), 1, 0, 0}}, }, { - pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}), - schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})), + pod: newResourceInitPod(newResourcePod(framework.Resource{}), + framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})), name: "extended resource allocatable enforced for unknown resource for init container", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)), wantInsufficientResources: []InsufficientResource{{extendedResourceB, getErrReason(extendedResourceB), 1, 0, 0}}, }, { pod: newResourcePod( - schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})), + framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})), name: "kubernetes.io resource capacity enforced", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceA)), wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceA, getErrReason(kubernetesIOResourceA), 10, 0, 0}}, }, { - pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}), - schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})), + pod: newResourceInitPod(newResourcePod(framework.Resource{}), + framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})), name: "kubernetes.io resource capacity enforced for init container", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceB)), wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceB, getErrReason(kubernetesIOResourceB), 10, 0, 0}}, }, { pod: newResourcePod( - schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})), + framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})), name: "hugepages resource capacity enforced", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)), wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 10, 0, 5}}, }, { - pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}), - schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})), + pod: newResourceInitPod(newResourcePod(framework.Resource{}), + framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})), name: "hugepages resource capacity enforced for init container", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)), wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 10, 0, 5}}, }, { pod: newResourcePod( - schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}, - schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})), + framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}, + framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})), name: "hugepages resource allocatable enforced for multiple containers", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)), wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 6, 2, 5}}, }, { pod: newResourcePod( - schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), - nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})), + framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), + nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})), ignoredResources: []byte(`{"IgnoredResources" : ["example.com/bbb"]}`), name: "skip checking ignored extended resource", wantInsufficientResources: []InsufficientResource{}, }, { pod: newResourceOverheadPod( - newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), + newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), v1.ResourceList{v1.ResourceCPU: resource.MustParse("3m"), v1.ResourceMemory: resource.MustParse("13")}, ), - nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 5})), + nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})), name: "resources + pod overhead fits", wantInsufficientResources: []InsufficientResource{}, }, { pod: newResourceOverheadPod( - newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), + newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), v1.ResourceList{v1.ResourceCPU: resource.MustParse("1m"), v1.ResourceMemory: resource.MustParse("15")}, ), - nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 5})), + nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})), name: "requests + overhead does not fit for memory", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)), wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 16, 5, 20}}, @@ -395,7 +394,7 @@ func TestEnoughRequests(t *testing.T) { func TestPreFilterDisabled(t *testing.T) { pod := &v1.Pod{} - nodeInfo := schedulertypes.NewNodeInfo() + nodeInfo := framework.NewNodeInfo() node := v1.Node{} nodeInfo.SetNode(&node) p, _ := NewFit(nil, nil) @@ -410,32 +409,32 @@ func TestPreFilterDisabled(t *testing.T) { func TestNotEnoughRequests(t *testing.T) { notEnoughPodsTests := []struct { pod *v1.Pod - nodeInfo *schedulertypes.NodeInfo + nodeInfo *framework.NodeInfo fits bool name string wantStatus *framework.Status }{ { pod: &v1.Pod{}, - nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 10, Memory: 20})), + nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 10, Memory: 20})), name: "even without specified resources predicate fails when there's no space for additional pod", wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"), }, { - pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), - nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 5})), + pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), + nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})), name: "even if both resources fit predicate fails when there's no space for additional pod", wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"), }, { - pod: newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 1}), - nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})), + pod: newResourcePod(framework.Resource{MilliCPU: 5, Memory: 1}), + nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})), name: "even for equal edge case predicate fails when there's no space for additional pod", wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"), }, { - pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 1}), schedulertypes.Resource{MilliCPU: 5, Memory: 1}), - nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})), + pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 1}), framework.Resource{MilliCPU: 5, Memory: 1}), + nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})), name: "even for equal edge case predicate fails when there's no space for additional pod due to init container", wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"), }, @@ -464,34 +463,34 @@ func TestNotEnoughRequests(t *testing.T) { func TestStorageRequests(t *testing.T) { storagePodsTests := []struct { pod *v1.Pod - nodeInfo *schedulertypes.NodeInfo + nodeInfo *framework.NodeInfo name string wantStatus *framework.Status }{ { - pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 10, Memory: 10})), + pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 10, Memory: 10})), name: "due to container scratch disk", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)), }, { - pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 2, Memory: 10})), + pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 2, Memory: 10})), name: "pod fit", }, { - pod: newResourcePod(schedulertypes.Resource{EphemeralStorage: 25}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 2, Memory: 2})), + pod: newResourcePod(framework.Resource{EphemeralStorage: 25}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 2, Memory: 2})), name: "storage ephemeral local storage request exceeds allocatable", wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceEphemeralStorage)), }, { - pod: newResourcePod(schedulertypes.Resource{EphemeralStorage: 10}), - nodeInfo: schedulertypes.NewNodeInfo( - newResourcePod(schedulertypes.Resource{MilliCPU: 2, Memory: 2})), + pod: newResourcePod(framework.Resource{EphemeralStorage: 10}), + nodeInfo: framework.NewNodeInfo( + newResourcePod(framework.Resource{MilliCPU: 2, Memory: 2})), name: "pod fits", }, } diff --git a/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go b/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go index 9e390224ff8..ec1fb22853e 100644 --- a/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go +++ b/pkg/scheduler/framework/plugins/noderesources/resource_allocation.go @@ -23,7 +23,6 @@ import ( v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/features" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" schedutil "k8s.io/kubernetes/pkg/scheduler/util" ) @@ -46,7 +45,7 @@ type resourceToValueMap map[v1.ResourceName]int64 // score will use `scorer` function to calculate the score. func (r *resourceAllocationScorer) score( pod *v1.Pod, - nodeInfo *schedulertypes.NodeInfo) (int64, *framework.Status) { + nodeInfo *framework.NodeInfo) (int64, *framework.Status) { node := nodeInfo.Node() if node == nil { return 0, framework.NewStatus(framework.Error, "node not found") @@ -90,7 +89,7 @@ func (r *resourceAllocationScorer) score( } // calculateResourceAllocatableRequest returns resources Allocatable and Requested values -func calculateResourceAllocatableRequest(nodeInfo *schedulertypes.NodeInfo, pod *v1.Pod, resource v1.ResourceName) (int64, int64) { +func calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, pod *v1.Pod, resource v1.ResourceName) (int64, int64) { allocatable := nodeInfo.AllocatableResource() requested := nodeInfo.RequestedResource() podRequest := calculatePodResourceRequest(pod, resource) diff --git a/pkg/scheduler/framework/plugins/noderesources/resource_limits.go b/pkg/scheduler/framework/plugins/noderesources/resource_limits.go index da2249e0943..73387c25679 100644 --- a/pkg/scheduler/framework/plugins/noderesources/resource_limits.go +++ b/pkg/scheduler/framework/plugins/noderesources/resource_limits.go @@ -23,7 +23,6 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) // ResourceLimits is a score plugin that increases score of input node by 1 if the node satisfies @@ -46,7 +45,7 @@ const ( // preScoreState computed at PreScore and used at Score. type preScoreState struct { - podResourceRequest *schedulertypes.Resource + podResourceRequest *framework.Resource } // Clone the preScore state. @@ -81,7 +80,7 @@ func (rl *ResourceLimits) PreScore( return nil } -func getPodResource(cycleState *framework.CycleState) (*schedulertypes.Resource, error) { +func getPodResource(cycleState *framework.CycleState) (*framework.Resource, error) { c, err := cycleState.Read(preScoreStateKey) if err != nil { return nil, fmt.Errorf("Error reading %q from cycleState: %v", preScoreStateKey, err) @@ -136,9 +135,9 @@ func NewResourceLimits(_ *runtime.Unknown, h framework.FrameworkHandle) (framewo // getResourceLimits computes resource limits for input pod. // The reason to create this new function is to be consistent with other // priority functions because most or perhaps all priority functions work -// with schedulertypes.Resource. -func getResourceLimits(pod *v1.Pod) *schedulertypes.Resource { - result := &schedulertypes.Resource{} +// with framework.Resource. +func getResourceLimits(pod *v1.Pod) *framework.Resource { + result := &framework.Resource{} for _, container := range pod.Spec.Containers { result.Add(container.Resources.Limits) } diff --git a/pkg/scheduler/framework/plugins/nodeunschedulable/BUILD b/pkg/scheduler/framework/plugins/nodeunschedulable/BUILD index 3d115cbe35f..4b5b01cb82b 100644 --- a/pkg/scheduler/framework/plugins/nodeunschedulable/BUILD +++ b/pkg/scheduler/framework/plugins/nodeunschedulable/BUILD @@ -8,7 +8,6 @@ go_library( deps = [ "//pkg/apis/core/v1/helper:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", ], @@ -20,7 +19,6 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", ], ) diff --git a/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable.go b/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable.go index eaf27ad4b93..7a0689347ff 100644 --- a/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable.go +++ b/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable.go @@ -23,7 +23,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) // NodeUnschedulable is a plugin that priorities nodes according to the node annotation @@ -49,7 +48,7 @@ func (pl *NodeUnschedulable) Name() string { } // Filter invoked at the filter extension point. -func (pl *NodeUnschedulable) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *NodeUnschedulable) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { if nodeInfo == nil || nodeInfo.Node() == nil { return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonUnknownCondition) } diff --git a/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable_test.go b/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable_test.go index d1bb49b2dd0..eaa6ca35bd8 100644 --- a/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable_test.go +++ b/pkg/scheduler/framework/plugins/nodeunschedulable/node_unschedulable_test.go @@ -23,7 +23,6 @@ import ( v1 "k8s.io/api/core/v1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) func TestNodeUnschedulable(t *testing.T) { @@ -73,7 +72,7 @@ func TestNodeUnschedulable(t *testing.T) { } for _, test := range testCases { - nodeInfo := schedulertypes.NewNodeInfo() + nodeInfo := framework.NewNodeInfo() nodeInfo.SetNode(test.node) p, _ := New(nil, nil) diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/BUILD b/pkg/scheduler/framework/plugins/nodevolumelimits/BUILD index acc9ce324a1..d8e9056f09f 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/BUILD +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/BUILD @@ -13,7 +13,6 @@ go_library( "//pkg/apis/core/v1/helper:go_default_library", "//pkg/features:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/types:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", @@ -40,8 +39,7 @@ go_test( deps = [ "//pkg/features:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/listers/fake:go_default_library", - "//pkg/scheduler/types:go_default_library", + "//pkg/scheduler/framework/v1alpha1/fake:go_default_library", "//pkg/volume/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go b/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go index c3f128abe9e..a10e8b499d7 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/csi.go @@ -29,7 +29,6 @@ import ( csitrans "k8s.io/csi-translation-lib" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" volumeutil "k8s.io/kubernetes/pkg/volume/util" "k8s.io/klog" @@ -68,7 +67,7 @@ func (pl *CSILimits) Name() string { } // Filter invoked at the filter extension point. -func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { // If the new pod doesn't have any volume attached to it, the predicate will always be true if len(pod.Spec.Volumes) == 0 { return nil @@ -285,7 +284,7 @@ func NewCSI(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plu }, nil } -func getVolumeLimits(nodeInfo *schedulertypes.NodeInfo, csiNode *storagev1.CSINode) map[v1.ResourceName]int64 { +func getVolumeLimits(nodeInfo *framework.NodeInfo, csiNode *storagev1.CSINode) map[v1.ResourceName]int64 { // TODO: stop getting values from Node object in v1.18 nodeVolumeLimits := nodeInfo.VolumeLimits() if csiNode != nil { diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go b/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go index 1d74da2eb55..18178e20746 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go @@ -35,8 +35,7 @@ import ( csilibplugins "k8s.io/csi-translation-lib/plugins" "k8s.io/kubernetes/pkg/features" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" + fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/fake" volumeutil "k8s.io/kubernetes/pkg/volume/util" utilpointer "k8s.io/utils/pointer" ) @@ -475,8 +474,8 @@ func TestCSILimits(t *testing.T) { } } -func getFakeCSIPVLister(volumeName string, driverNames ...string) fakelisters.PersistentVolumeLister { - pvLister := fakelisters.PersistentVolumeLister{} +func getFakeCSIPVLister(volumeName string, driverNames ...string) fakeframework.PersistentVolumeLister { + pvLister := fakeframework.PersistentVolumeLister{} for _, driver := range driverNames { for j := 0; j < 4; j++ { volumeHandle := fmt.Sprintf("%s-%s-%d", volumeName, driver, j) @@ -520,8 +519,8 @@ func getFakeCSIPVLister(volumeName string, driverNames ...string) fakelisters.Pe return pvLister } -func getFakeCSIPVCLister(volumeName, scName string, driverNames ...string) fakelisters.PersistentVolumeClaimLister { - pvcLister := fakelisters.PersistentVolumeClaimLister{} +func getFakeCSIPVCLister(volumeName, scName string, driverNames ...string) fakeframework.PersistentVolumeClaimLister { + pvcLister := fakeframework.PersistentVolumeClaimLister{} for _, driver := range driverNames { for j := 0; j < 4; j++ { v := fmt.Sprintf("%s-%s-%d", volumeName, driver, j) @@ -563,8 +562,8 @@ func enableMigrationOnNode(csiNode *storagev1.CSINode, pluginName string) { csiNode.Annotations = nodeInfoAnnotations } -func getFakeCSIStorageClassLister(scName, provisionerName string) fakelisters.StorageClassLister { - return fakelisters.StorageClassLister{ +func getFakeCSIStorageClassLister(scName, provisionerName string) fakeframework.StorageClassLister { + return fakeframework.StorageClassLister{ { ObjectMeta: metav1.ObjectMeta{Name: scName}, Provisioner: provisionerName, @@ -572,15 +571,15 @@ func getFakeCSIStorageClassLister(scName, provisionerName string) fakelisters.St } } -func getFakeCSINodeLister(csiNode *storagev1.CSINode) fakelisters.CSINodeLister { +func getFakeCSINodeLister(csiNode *storagev1.CSINode) fakeframework.CSINodeLister { if csiNode != nil { - return fakelisters.CSINodeLister(*csiNode) + return fakeframework.CSINodeLister(*csiNode) } - return fakelisters.CSINodeLister{} + return fakeframework.CSINodeLister{} } -func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*schedulertypes.NodeInfo, *storagev1.CSINode) { - nodeInfo := schedulertypes.NewNodeInfo(pods...) +func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*framework.NodeInfo, *storagev1.CSINode) { + nodeInfo := framework.NewNodeInfo(pods...) node := &v1.Node{ ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"}, Status: v1.NodeStatus{ diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go index b124f60c8ca..14b74bfa072 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi.go @@ -36,7 +36,6 @@ import ( "k8s.io/kubernetes/pkg/features" kubefeatures "k8s.io/kubernetes/pkg/features" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" volumeutil "k8s.io/kubernetes/pkg/volume/util" ) @@ -196,7 +195,7 @@ func (pl *nonCSILimits) Name() string { } // Filter invoked at the filter extension point. -func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { // If a pod doesn't have any volume attached to it, the predicate will always be true. // Thus we make a fast path for it, to avoid unnecessary computations in this case. if len(pod.Spec.Volumes) == 0 { diff --git a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go index c919567c960..3ad7eb514f4 100644 --- a/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go +++ b/pkg/scheduler/framework/plugins/nodevolumelimits/non_csi_test.go @@ -27,7 +27,7 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" csilibplugins "k8s.io/csi-translation-lib/plugins" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake" + fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/fake" utilpointer "k8s.io/utils/pointer" ) @@ -1222,8 +1222,8 @@ func TestGetMaxVols(t *testing.T) { } } -func getFakePVCLister(filterName string) fakelisters.PersistentVolumeClaimLister { - return fakelisters.PersistentVolumeClaimLister{ +func getFakePVCLister(filterName string) fakeframework.PersistentVolumeClaimLister { + return fakeframework.PersistentVolumeClaimLister{ { ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"}, Spec: v1.PersistentVolumeClaimSpec{ @@ -1283,8 +1283,8 @@ func getFakePVCLister(filterName string) fakelisters.PersistentVolumeClaimLister } } -func getFakePVLister(filterName string) fakelisters.PersistentVolumeLister { - return fakelisters.PersistentVolumeLister{ +func getFakePVLister(filterName string) fakeframework.PersistentVolumeLister { + return fakeframework.PersistentVolumeLister{ { ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"}, Spec: v1.PersistentVolumeSpec{ diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/BUILD b/pkg/scheduler/framework/plugins/podtopologyspread/BUILD index 6cd24164c15..69c4c98a042 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/BUILD +++ b/pkg/scheduler/framework/plugins/podtopologyspread/BUILD @@ -14,8 +14,6 @@ go_library( "//pkg/scheduler/framework/plugins/helper:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/internal/parallelize:go_default_library", - "//pkg/scheduler/listers:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", @@ -43,7 +41,6 @@ go_test( "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/parallelize:go_default_library", "//pkg/scheduler/testing:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go b/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go index 0e3ff224591..bb8db6736e2 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/filtering.go @@ -28,7 +28,6 @@ import ( "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/internal/parallelize" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) const preFilterStateKey = "PreFilter" + Name @@ -160,7 +159,7 @@ func (pl *PodTopologySpread) PreFilterExtensions() framework.PreFilterExtensions } // AddPod from pre-computed data in cycleState. -func (pl *PodTopologySpread) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *PodTopologySpread) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { s, err := getPreFilterState(cycleState) if err != nil { return framework.NewStatus(framework.Error, err.Error()) @@ -171,7 +170,7 @@ func (pl *PodTopologySpread) AddPod(ctx context.Context, cycleState *framework.C } // RemovePod from pre-computed data in cycleState. -func (pl *PodTopologySpread) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *PodTopologySpread) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { s, err := getPreFilterState(cycleState) if err != nil { return framework.NewStatus(framework.Error, err.Error()) @@ -275,7 +274,7 @@ func (pl *PodTopologySpread) calPreFilterState(pod *v1.Pod) (*preFilterState, er } // Filter invoked at the filter extension point. -func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { node := nodeInfo.Node() if node == nil { return framework.NewStatus(framework.Error, "node not found") diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go b/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go index b5a1f03425f..7aa73c2aa78 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/filtering_test.go @@ -32,7 +32,6 @@ import ( "k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/parallelize" st "k8s.io/kubernetes/pkg/scheduler/testing" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" "k8s.io/utils/pointer" ) @@ -1619,7 +1618,7 @@ func TestMultipleConstraints(t *testing.T) { func TestPreFilterDisabled(t *testing.T) { pod := &v1.Pod{} - nodeInfo := schedulertypes.NewNodeInfo() + nodeInfo := framework.NewNodeInfo() node := v1.Node{} nodeInfo.SetNode(&node) p := &PodTopologySpread{} diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/plugin.go b/pkg/scheduler/framework/plugins/podtopologyspread/plugin.go index e68eacbca86..6c2f6609164 100644 --- a/pkg/scheduler/framework/plugins/podtopologyspread/plugin.go +++ b/pkg/scheduler/framework/plugins/podtopologyspread/plugin.go @@ -28,7 +28,6 @@ import ( appslisters "k8s.io/client-go/listers/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" ) const ( @@ -56,7 +55,7 @@ type Args struct { // PodTopologySpread is a plugin that ensures pod's topologySpreadConstraints is satisfied. type PodTopologySpread struct { Args - sharedLister schedulerlisters.SharedLister + sharedLister framework.SharedLister services corelisters.ServiceLister replicationCtrls corelisters.ReplicationControllerLister replicaSets appslisters.ReplicaSetLister diff --git a/pkg/scheduler/framework/plugins/serviceaffinity/BUILD b/pkg/scheduler/framework/plugins/serviceaffinity/BUILD index 9dfaed2c4dc..64249e7e5d3 100644 --- a/pkg/scheduler/framework/plugins/serviceaffinity/BUILD +++ b/pkg/scheduler/framework/plugins/serviceaffinity/BUILD @@ -8,8 +8,6 @@ go_library( deps = [ "//pkg/scheduler/framework/plugins/helper:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/listers:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -23,9 +21,8 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", + "//pkg/scheduler/framework/v1alpha1/fake:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", - "//pkg/scheduler/listers/fake:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], diff --git a/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go b/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go index cac4ee630b0..9bcf374adcb 100644 --- a/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go +++ b/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go @@ -26,8 +26,6 @@ import ( corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) const ( @@ -92,7 +90,7 @@ func New(plArgs *runtime.Unknown, handle framework.FrameworkHandle) (framework.P // ServiceAffinity is a plugin that checks service affinity. type ServiceAffinity struct { args Args - sharedLister schedulerlisters.SharedLister + sharedLister framework.SharedLister serviceLister corelisters.ServiceLister } @@ -146,7 +144,7 @@ func (pl *ServiceAffinity) PreFilterExtensions() framework.PreFilterExtensions { } // AddPod from pre-computed data in cycleState. -func (pl *ServiceAffinity) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *ServiceAffinity) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { s, err := getPreFilterState(cycleState) if err != nil { return framework.NewStatus(framework.Error, err.Error()) @@ -167,7 +165,7 @@ func (pl *ServiceAffinity) AddPod(ctx context.Context, cycleState *framework.Cyc } // RemovePod from pre-computed data in cycleState. -func (pl *ServiceAffinity) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *ServiceAffinity) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { s, err := getPreFilterState(cycleState) if err != nil { return framework.NewStatus(framework.Error, err.Error()) @@ -230,7 +228,7 @@ func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error // - L is a label that the ServiceAffinity object needs as a matching constraint. // - L is not defined in the pod itself already. // - and SOME pod, from a service, in the same namespace, ALREADY scheduled onto a node, has a matching value. -func (pl *ServiceAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *ServiceAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { if len(pl.args.AffinityLabels) == 0 { return nil } @@ -332,7 +330,7 @@ func (pl *ServiceAffinity) NormalizeScore(ctx context.Context, _ *framework.Cycl // we need to modify the old priority to be able to handle multiple labels so that it can be mapped // to a single plugin. // TODO: This will be deprecated soon. -func (pl *ServiceAffinity) updateNodeScoresForLabel(sharedLister schedulerlisters.SharedLister, mapResult framework.NodeScoreList, reduceResult []float64, label string) error { +func (pl *ServiceAffinity) updateNodeScoresForLabel(sharedLister framework.SharedLister, mapResult framework.NodeScoreList, reduceResult []float64, label string) error { var numServicePods int64 var labelValue string podCounts := map[string]int64{} diff --git a/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity_test.go b/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity_test.go index 9a0cd9e6ddf..97c06985b16 100644 --- a/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity_test.go +++ b/pkg/scheduler/framework/plugins/serviceaffinity/service_affinity_test.go @@ -25,9 +25,8 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" + fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/fake" "k8s.io/kubernetes/pkg/scheduler/internal/cache" - fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) func TestServiceAffinity(t *testing.T) { @@ -164,7 +163,7 @@ func TestServiceAffinity(t *testing.T) { p := &ServiceAffinity{ sharedLister: snapshot, - serviceLister: fakelisters.ServiceLister(test.services), + serviceLister: fakeframework.ServiceLister(test.services), args: Args{ AffinityLabels: test.labels, }, @@ -384,7 +383,7 @@ func TestServiceAffinityScore(t *testing.T) { t.Run(test.name, func(t *testing.T) { nodes := makeLabeledNodeList(test.nodes) snapshot := cache.NewSnapshot(test.pods, nodes) - serviceLister := fakelisters.ServiceLister(test.services) + serviceLister := fakeframework.ServiceLister(test.services) p := &ServiceAffinity{ sharedLister: snapshot, @@ -499,7 +498,7 @@ func TestPreFilterStateAddRemovePod(t *testing.T) { p := &ServiceAffinity{ sharedLister: snapshot, - serviceLister: fakelisters.ServiceLister(test.services), + serviceLister: fakeframework.ServiceLister(test.services), } cycleState := framework.NewCycleState() preFilterStatus := p.PreFilter(context.Background(), cycleState, test.pendingPod) @@ -591,7 +590,7 @@ func sortNodeScoreList(out framework.NodeScoreList) { }) } -func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *schedulertypes.NodeInfo { +func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *framework.NodeInfo { t.Helper() nodeInfo, err := snapshot.NodeInfos().Get(name) if err != nil { @@ -602,7 +601,7 @@ func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *sched func TestPreFilterDisabled(t *testing.T) { pod := &v1.Pod{} - nodeInfo := schedulertypes.NewNodeInfo() + nodeInfo := framework.NewNodeInfo() node := v1.Node{} nodeInfo.SetNode(&node) p := &ServiceAffinity{ diff --git a/pkg/scheduler/framework/plugins/tainttoleration/BUILD b/pkg/scheduler/framework/plugins/tainttoleration/BUILD index c076cbffa22..7fb3f837601 100644 --- a/pkg/scheduler/framework/plugins/tainttoleration/BUILD +++ b/pkg/scheduler/framework/plugins/tainttoleration/BUILD @@ -9,7 +9,6 @@ go_library( "//pkg/apis/core/v1/helper:go_default_library", "//pkg/scheduler/framework/plugins/helper:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", ], @@ -36,7 +35,6 @@ go_test( deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", ], diff --git a/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go b/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go index 29a2aaacc6c..73bfea7e5a9 100644 --- a/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go +++ b/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration.go @@ -25,7 +25,6 @@ import ( v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) // TaintToleration is a plugin that checks if a pod tolerates a node's taints. @@ -52,7 +51,7 @@ func (pl *TaintToleration) Name() string { } // Filter invoked at the filter extension point. -func (pl *TaintToleration) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *TaintToleration) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { if nodeInfo == nil || nodeInfo.Node() == nil { return framework.NewStatus(framework.Error, "invalid nodeInfo") } diff --git a/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go b/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go index 461bb64ebcd..a3377b3308f 100644 --- a/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go +++ b/pkg/scheduler/framework/plugins/tainttoleration/taint_toleration_test.go @@ -25,7 +25,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/internal/cache" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) func nodeWithTaints(nodeName string, taints []v1.Taint) *v1.Node { @@ -330,7 +329,7 @@ func TestTaintTolerationFilter(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - nodeInfo := schedulertypes.NewNodeInfo() + nodeInfo := framework.NewNodeInfo() nodeInfo.SetNode(test.node) p, _ := New(nil, nil) gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), nil, test.pod, nodeInfo) diff --git a/pkg/scheduler/framework/plugins/volumebinding/BUILD b/pkg/scheduler/framework/plugins/volumebinding/BUILD index 6b7d0a627da..33ed9d13323 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/BUILD +++ b/pkg/scheduler/framework/plugins/volumebinding/BUILD @@ -8,7 +8,6 @@ go_library( deps = [ "//pkg/controller/volume/scheduling:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", ], @@ -35,7 +34,6 @@ go_test( deps = [ "//pkg/controller/volume/scheduling:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", ], ) diff --git a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go index 6d50bdd9bbf..bde2b62b1cb 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go +++ b/pkg/scheduler/framework/plugins/volumebinding/volume_binding.go @@ -23,7 +23,6 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/kubernetes/pkg/controller/volume/scheduling" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) // VolumeBinding is a plugin that binds pod volumes in scheduling. @@ -62,7 +61,7 @@ func podHasPVCs(pod *v1.Pod) bool { // // The predicate returns true if all bound PVCs have compatible PVs with the node, and if all unbound // PVCs can be matched with an available and node-compatible PV. -func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { node := nodeInfo.Node() if node == nil { return framework.NewStatus(framework.Error, "node not found") diff --git a/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go b/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go index 1c7b7a46201..35cdef2a9ad 100644 --- a/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go +++ b/pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go @@ -25,7 +25,6 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/kubernetes/pkg/controller/volume/scheduling" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) func TestVolumeBinding(t *testing.T) { @@ -99,7 +98,7 @@ func TestVolumeBinding(t *testing.T) { for _, item := range table { t.Run(item.name, func(t *testing.T) { - nodeInfo := schedulertypes.NewNodeInfo() + nodeInfo := framework.NewNodeInfo() nodeInfo.SetNode(item.node) fakeVolumeBinder := scheduling.NewFakeVolumeBinder(item.volumeBinderConfig) p := &VolumeBinding{ diff --git a/pkg/scheduler/framework/plugins/volumerestrictions/BUILD b/pkg/scheduler/framework/plugins/volumerestrictions/BUILD index 6330037c028..997d00086db 100644 --- a/pkg/scheduler/framework/plugins/volumerestrictions/BUILD +++ b/pkg/scheduler/framework/plugins/volumerestrictions/BUILD @@ -7,7 +7,6 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", ], @@ -33,7 +32,6 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", ], ) diff --git a/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go b/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go index e4de9aadf2c..680d5c0e401 100644 --- a/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go +++ b/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions.go @@ -22,7 +22,6 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) // VolumeRestrictions is a plugin that checks volume restrictions. @@ -118,7 +117,7 @@ func haveOverlap(a1, a2 []string) bool { // - AWS EBS forbids any two pods mounting the same volume ID // - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image, and the image is read-only // - ISCSI forbids if any two pods share at least same IQN and ISCSI volume is read-only -func (pl *VolumeRestrictions) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *VolumeRestrictions) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { for _, v := range pod.Spec.Volumes { for _, ev := range nodeInfo.Pods() { if isVolumeConflict(v, ev) { diff --git a/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions_test.go b/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions_test.go index bdc7d7d345f..a64fbbfc048 100644 --- a/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions_test.go +++ b/pkg/scheduler/framework/plugins/volumerestrictions/volume_restrictions_test.go @@ -23,7 +23,6 @@ import ( v1 "k8s.io/api/core/v1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) func TestGCEDiskConflicts(t *testing.T) { @@ -52,15 +51,15 @@ func TestGCEDiskConflicts(t *testing.T) { errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict) tests := []struct { pod *v1.Pod - nodeInfo *schedulertypes.NodeInfo + nodeInfo *framework.NodeInfo isOk bool name string wantStatus *framework.Status }{ - {&v1.Pod{}, schedulertypes.NewNodeInfo(), true, "nothing", nil}, - {&v1.Pod{}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil}, - {&v1.Pod{Spec: volState}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus}, - {&v1.Pod{Spec: volState2}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil}, + {&v1.Pod{}, framework.NewNodeInfo(), true, "nothing", nil}, + {&v1.Pod{}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil}, + {&v1.Pod{Spec: volState}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus}, + {&v1.Pod{Spec: volState2}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil}, } for _, test := range tests { @@ -100,15 +99,15 @@ func TestAWSDiskConflicts(t *testing.T) { errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict) tests := []struct { pod *v1.Pod - nodeInfo *schedulertypes.NodeInfo + nodeInfo *framework.NodeInfo isOk bool name string wantStatus *framework.Status }{ - {&v1.Pod{}, schedulertypes.NewNodeInfo(), true, "nothing", nil}, - {&v1.Pod{}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil}, - {&v1.Pod{Spec: volState}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus}, - {&v1.Pod{Spec: volState2}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil}, + {&v1.Pod{}, framework.NewNodeInfo(), true, "nothing", nil}, + {&v1.Pod{}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil}, + {&v1.Pod{Spec: volState}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus}, + {&v1.Pod{Spec: volState2}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil}, } for _, test := range tests { @@ -154,15 +153,15 @@ func TestRBDDiskConflicts(t *testing.T) { errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict) tests := []struct { pod *v1.Pod - nodeInfo *schedulertypes.NodeInfo + nodeInfo *framework.NodeInfo isOk bool name string wantStatus *framework.Status }{ - {&v1.Pod{}, schedulertypes.NewNodeInfo(), true, "nothing", nil}, - {&v1.Pod{}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil}, - {&v1.Pod{Spec: volState}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus}, - {&v1.Pod{Spec: volState2}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil}, + {&v1.Pod{}, framework.NewNodeInfo(), true, "nothing", nil}, + {&v1.Pod{}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil}, + {&v1.Pod{Spec: volState}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus}, + {&v1.Pod{Spec: volState2}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil}, } for _, test := range tests { @@ -208,15 +207,15 @@ func TestISCSIDiskConflicts(t *testing.T) { errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict) tests := []struct { pod *v1.Pod - nodeInfo *schedulertypes.NodeInfo + nodeInfo *framework.NodeInfo isOk bool name string wantStatus *framework.Status }{ - {&v1.Pod{}, schedulertypes.NewNodeInfo(), true, "nothing", nil}, - {&v1.Pod{}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil}, - {&v1.Pod{Spec: volState}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus}, - {&v1.Pod{Spec: volState2}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil}, + {&v1.Pod{}, framework.NewNodeInfo(), true, "nothing", nil}, + {&v1.Pod{}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil}, + {&v1.Pod{Spec: volState}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus}, + {&v1.Pod{Spec: volState2}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil}, } for _, test := range tests { diff --git a/pkg/scheduler/framework/plugins/volumezone/BUILD b/pkg/scheduler/framework/plugins/volumezone/BUILD index af2c275e4ec..9b7949a9e92 100644 --- a/pkg/scheduler/framework/plugins/volumezone/BUILD +++ b/pkg/scheduler/framework/plugins/volumezone/BUILD @@ -8,7 +8,6 @@ go_library( deps = [ "//pkg/apis/core/v1/helper:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", @@ -26,8 +25,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/scheduler/framework/v1alpha1:go_default_library", - "//pkg/scheduler/listers/fake:go_default_library", - "//pkg/scheduler/types:go_default_library", + "//pkg/scheduler/framework/v1alpha1/fake:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", diff --git a/pkg/scheduler/framework/plugins/volumezone/volume_zone.go b/pkg/scheduler/framework/plugins/volumezone/volume_zone.go index 2410ff4f2c3..d7f866ce5b9 100644 --- a/pkg/scheduler/framework/plugins/volumezone/volume_zone.go +++ b/pkg/scheduler/framework/plugins/volumezone/volume_zone.go @@ -30,7 +30,6 @@ import ( "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) // VolumeZone is a plugin that checks volume zone. @@ -78,7 +77,7 @@ func (pl *VolumeZone) Name() string { // determining the zone of a volume during scheduling, and that is likely to // require calling out to the cloud provider. It seems that we are moving away // from inline volume declarations anyway. -func (pl *VolumeZone) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (pl *VolumeZone) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { // If a pod doesn't have any volume attached to it, the predicate will always be true. // Thus we make a fast path for it, to avoid unnecessary computations in this case. if len(pod.Spec.Volumes) == 0 { diff --git a/pkg/scheduler/framework/plugins/volumezone/volume_zone_test.go b/pkg/scheduler/framework/plugins/volumezone/volume_zone_test.go index e344d22b38d..2c6deaa70e5 100644 --- a/pkg/scheduler/framework/plugins/volumezone/volume_zone_test.go +++ b/pkg/scheduler/framework/plugins/volumezone/volume_zone_test.go @@ -25,8 +25,7 @@ import ( storagev1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" + fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/fake" ) func createPodWithVolume(pod, pv, pvc string) *v1.Pod { @@ -48,7 +47,7 @@ func createPodWithVolume(pod, pv, pvc string) *v1.Pod { } func TestSingleZone(t *testing.T) { - pvLister := fakelisters.PersistentVolumeLister{ + pvLister := fakeframework.PersistentVolumeLister{ { ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}}, }, @@ -66,7 +65,7 @@ func TestSingleZone(t *testing.T) { }, } - pvcLister := fakelisters.PersistentVolumeClaimLister{ + pvcLister := fakeframework.PersistentVolumeClaimLister{ { ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"}, Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"}, @@ -208,7 +207,7 @@ func TestSingleZone(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - node := &schedulertypes.NodeInfo{} + node := &framework.NodeInfo{} node.SetNode(test.Node) p := &VolumeZone{ pvLister, @@ -224,7 +223,7 @@ func TestSingleZone(t *testing.T) { } func TestMultiZone(t *testing.T) { - pvLister := fakelisters.PersistentVolumeLister{ + pvLister := fakeframework.PersistentVolumeLister{ { ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}}, }, @@ -242,7 +241,7 @@ func TestMultiZone(t *testing.T) { }, } - pvcLister := fakelisters.PersistentVolumeClaimLister{ + pvcLister := fakeframework.PersistentVolumeClaimLister{ { ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"}, Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"}, @@ -330,7 +329,7 @@ func TestMultiZone(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - node := &schedulertypes.NodeInfo{} + node := &framework.NodeInfo{} node.SetNode(test.Node) p := &VolumeZone{ pvLister, @@ -354,7 +353,7 @@ func TestWithBinding(t *testing.T) { classImmediate = "Class_Immediate" ) - scLister := fakelisters.StorageClassLister{ + scLister := fakeframework.StorageClassLister{ { ObjectMeta: metav1.ObjectMeta{Name: classImmediate}, }, @@ -364,13 +363,13 @@ func TestWithBinding(t *testing.T) { }, } - pvLister := fakelisters.PersistentVolumeLister{ + pvLister := fakeframework.PersistentVolumeLister{ { ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}}, }, } - pvcLister := fakelisters.PersistentVolumeClaimLister{ + pvcLister := fakeframework.PersistentVolumeClaimLister{ { ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"}, Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"}, @@ -439,7 +438,7 @@ func TestWithBinding(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { - node := &schedulertypes.NodeInfo{} + node := &framework.NodeInfo{} node.SetNode(test.Node) p := &VolumeZone{ pvLister, diff --git a/pkg/scheduler/framework/v1alpha1/BUILD b/pkg/scheduler/framework/v1alpha1/BUILD index 515b298ca62..e231f9f9679 100644 --- a/pkg/scheduler/framework/v1alpha1/BUILD +++ b/pkg/scheduler/framework/v1alpha1/BUILD @@ -6,26 +6,33 @@ go_library( "cycle_state.go", "framework.go", "interface.go", + "listers.go", "metrics_recorder.go", "registry.go", + "types.go", "waiting_pods_map.go", ], importpath = "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1", visibility = ["//visibility:public"], deps = [ + "//pkg/apis/core/v1/helper:go_default_library", "//pkg/controller/volume/scheduling:go_default_library", + "//pkg/features:go_default_library", "//pkg/scheduler/apis/config:go_default_library", "//pkg/scheduler/internal/parallelize:go_default_library", - "//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/metrics:go_default_library", - "//pkg/scheduler/types:go_default_library", + "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes:go_default_library", + "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/component-base/metrics:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/sigs.k8s.io/yaml:go_default_library", @@ -41,7 +48,10 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//pkg/scheduler/framework/v1alpha1/fake:all-srcs", + ], tags = ["automanaged"], visibility = ["//visibility:public"], ) @@ -53,13 +63,14 @@ go_test( "framework_test.go", "interface_test.go", "registry_test.go", + "types_test.go", ], embed = [":go_default_library"], deps = [ "//pkg/scheduler/apis/config:go_default_library", "//pkg/scheduler/metrics:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", diff --git a/pkg/scheduler/listers/fake/BUILD b/pkg/scheduler/framework/v1alpha1/fake/BUILD similarity index 86% rename from pkg/scheduler/listers/fake/BUILD rename to pkg/scheduler/framework/v1alpha1/fake/BUILD index 2a28e070c2e..c9167c7e5e0 100644 --- a/pkg/scheduler/listers/fake/BUILD +++ b/pkg/scheduler/framework/v1alpha1/fake/BUILD @@ -3,11 +3,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library") go_library( name = "go_default_library", srcs = ["listers.go"], - importpath = "k8s.io/kubernetes/pkg/scheduler/listers/fake", + importpath = "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/fake", visibility = ["//visibility:public"], deps = [ - "//pkg/scheduler/listers:go_default_library", - "//pkg/scheduler/types:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library", diff --git a/pkg/scheduler/listers/fake/listers.go b/pkg/scheduler/framework/v1alpha1/fake/listers.go similarity index 92% rename from pkg/scheduler/listers/fake/listers.go rename to pkg/scheduler/framework/v1alpha1/fake/listers.go index 8735b8737ec..785a19f5dad 100644 --- a/pkg/scheduler/listers/fake/listers.go +++ b/pkg/scheduler/framework/v1alpha1/fake/listers.go @@ -27,11 +27,10 @@ import ( appslisters "k8s.io/client-go/listers/apps/v1" corelisters "k8s.io/client-go/listers/core/v1" storagelisters "k8s.io/client-go/listers/storage/v1" - schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) -var _ schedulerlisters.PodLister = &PodLister{} +var _ framework.PodLister = &PodLister{} // PodLister implements PodLister on an []v1.Pods for test purposes. type PodLister []*v1.Pod @@ -47,7 +46,7 @@ func (f PodLister) List(s labels.Selector) (selected []*v1.Pod, err error) { } // FilteredList returns pods matching a pod filter and a label selector. -func (f PodLister) FilteredList(podFilter schedulerlisters.PodFilter, s labels.Selector) (selected []*v1.Pod, err error) { +func (f PodLister) FilteredList(podFilter framework.PodFilter, s labels.Selector) (selected []*v1.Pod, err error) { for _, pod := range f { if podFilter(pod) && s.Matches(labels.Set(pod.Labels)) { selected = append(selected, pod) @@ -247,11 +246,11 @@ func (pvcs PersistentVolumeClaimLister) PersistentVolumeClaims(namespace string) } } -// NodeInfoLister declares a schedulertypes.NodeInfo type for testing. -type NodeInfoLister []*schedulertypes.NodeInfo +// NodeInfoLister declares a framework.NodeInfo type for testing. +type NodeInfoLister []*framework.NodeInfo // Get returns a fake node object in the fake nodes. -func (nodes NodeInfoLister) Get(nodeName string) (*schedulertypes.NodeInfo, error) { +func (nodes NodeInfoLister) Get(nodeName string) (*framework.NodeInfo, error) { for _, node := range nodes { if node != nil && node.Node().Name == nodeName { return node, nil @@ -261,21 +260,21 @@ func (nodes NodeInfoLister) Get(nodeName string) (*schedulertypes.NodeInfo, erro } // List lists all nodes. -func (nodes NodeInfoLister) List() ([]*schedulertypes.NodeInfo, error) { +func (nodes NodeInfoLister) List() ([]*framework.NodeInfo, error) { return nodes, nil } // HavePodsWithAffinityList is supposed to list nodes with at least one pod with affinity. For the fake lister // we just return everything. -func (nodes NodeInfoLister) HavePodsWithAffinityList() ([]*schedulertypes.NodeInfo, error) { +func (nodes NodeInfoLister) HavePodsWithAffinityList() ([]*framework.NodeInfo, error) { return nodes, nil } // NewNodeInfoLister create a new fake NodeInfoLister from a slice of v1.Nodes. -func NewNodeInfoLister(nodes []*v1.Node) schedulerlisters.NodeInfoLister { - nodeInfoList := make([]*schedulertypes.NodeInfo, len(nodes)) +func NewNodeInfoLister(nodes []*v1.Node) framework.NodeInfoLister { + nodeInfoList := make([]*framework.NodeInfo, len(nodes)) for _, node := range nodes { - nodeInfo := schedulertypes.NewNodeInfo() + nodeInfo := framework.NewNodeInfo() nodeInfo.SetNode(node) nodeInfoList = append(nodeInfoList, nodeInfo) } diff --git a/pkg/scheduler/framework/v1alpha1/framework.go b/pkg/scheduler/framework/v1alpha1/framework.go index 4a85f83d668..320860709f5 100644 --- a/pkg/scheduler/framework/v1alpha1/framework.go +++ b/pkg/scheduler/framework/v1alpha1/framework.go @@ -32,9 +32,7 @@ import ( "k8s.io/kubernetes/pkg/controller/volume/scheduling" "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/internal/parallelize" - schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" "k8s.io/kubernetes/pkg/scheduler/metrics" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) const ( @@ -60,7 +58,7 @@ const ( // plugins. type framework struct { registry Registry - snapshotSharedLister schedulerlisters.SharedLister + snapshotSharedLister SharedLister waitingPods *waitingPodsMap pluginNameToWeightMap map[string]int queueSortPlugins []QueueSortPlugin @@ -116,7 +114,7 @@ func (f *framework) getExtensionPoints(plugins *config.Plugins) []extensionPoint type frameworkOptions struct { clientSet clientset.Interface informerFactory informers.SharedInformerFactory - snapshotSharedLister schedulerlisters.SharedLister + snapshotSharedLister SharedLister metricsRecorder *metricsRecorder volumeBinder scheduling.SchedulerVolumeBinder runAllFilters bool @@ -140,7 +138,7 @@ func WithInformerFactory(informerFactory informers.SharedInformerFactory) Option } // WithSnapshotSharedLister sets the SharedLister of the snapshot. -func WithSnapshotSharedLister(snapshotSharedLister schedulerlisters.SharedLister) Option { +func WithSnapshotSharedLister(snapshotSharedLister SharedLister) Option { return func(o *frameworkOptions) { o.snapshotSharedLister = snapshotSharedLister } @@ -352,7 +350,7 @@ func (f *framework) RunPreFilterExtensionAddPod( state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, - nodeInfo *schedulertypes.NodeInfo, + nodeInfo *NodeInfo, ) (status *Status) { for _, pl := range f.preFilterPlugins { if pl.PreFilterExtensions() == nil { @@ -370,7 +368,7 @@ func (f *framework) RunPreFilterExtensionAddPod( return nil } -func (f *framework) runPreFilterExtensionAddPod(ctx context.Context, pl PreFilterPlugin, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status { +func (f *framework) runPreFilterExtensionAddPod(ctx context.Context, pl PreFilterPlugin, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *NodeInfo) *Status { if !state.ShouldRecordPluginMetrics() { return pl.PreFilterExtensions().AddPod(ctx, state, podToSchedule, podToAdd, nodeInfo) } @@ -388,7 +386,7 @@ func (f *framework) RunPreFilterExtensionRemovePod( state *CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, - nodeInfo *schedulertypes.NodeInfo, + nodeInfo *NodeInfo, ) (status *Status) { for _, pl := range f.preFilterPlugins { if pl.PreFilterExtensions() == nil { @@ -406,7 +404,7 @@ func (f *framework) RunPreFilterExtensionRemovePod( return nil } -func (f *framework) runPreFilterExtensionRemovePod(ctx context.Context, pl PreFilterPlugin, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status { +func (f *framework) runPreFilterExtensionRemovePod(ctx context.Context, pl PreFilterPlugin, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *NodeInfo) *Status { if !state.ShouldRecordPluginMetrics() { return pl.PreFilterExtensions().RemovePod(ctx, state, podToSchedule, podToAdd, nodeInfo) } @@ -424,7 +422,7 @@ func (f *framework) RunFilterPlugins( ctx context.Context, state *CycleState, pod *v1.Pod, - nodeInfo *schedulertypes.NodeInfo, + nodeInfo *NodeInfo, ) PluginToStatus { var firstFailedStatus *Status statuses := make(PluginToStatus) @@ -451,7 +449,7 @@ func (f *framework) RunFilterPlugins( return statuses } -func (f *framework) runFilterPlugin(ctx context.Context, pl FilterPlugin, state *CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status { +func (f *framework) runFilterPlugin(ctx context.Context, pl FilterPlugin, state *CycleState, pod *v1.Pod, nodeInfo *NodeInfo) *Status { if !state.ShouldRecordPluginMetrics() { return pl.Filter(ctx, state, pod, nodeInfo) } @@ -817,7 +815,7 @@ func (f *framework) WaitOnPermit(ctx context.Context, pod *v1.Pod) (status *Stat // snapshot. The snapshot is taken at the beginning of a scheduling cycle and remains // unchanged until a pod finishes "Reserve". There is no guarantee that the information // remains unchanged after "Reserve". -func (f *framework) SnapshotSharedLister() schedulerlisters.SharedLister { +func (f *framework) SnapshotSharedLister() SharedLister { return f.snapshotSharedLister } diff --git a/pkg/scheduler/framework/v1alpha1/framework_test.go b/pkg/scheduler/framework/v1alpha1/framework_test.go index cfc04f10430..2eb4496d402 100644 --- a/pkg/scheduler/framework/v1alpha1/framework_test.go +++ b/pkg/scheduler/framework/v1alpha1/framework_test.go @@ -32,7 +32,6 @@ import ( "k8s.io/apimachinery/pkg/types" "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/metrics" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) const ( @@ -138,10 +137,10 @@ type TestPluginPreFilterExtension struct { inj injectedResult } -func (e *TestPluginPreFilterExtension) AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status { +func (e *TestPluginPreFilterExtension) AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *NodeInfo) *Status { return NewStatus(Code(e.inj.PreFilterAddPodStatus), "injected status") } -func (e *TestPluginPreFilterExtension) RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status { +func (e *TestPluginPreFilterExtension) RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *NodeInfo) *Status { return NewStatus(Code(e.inj.PreFilterRemovePodStatus), "injected status") } @@ -165,7 +164,7 @@ func (pl *TestPlugin) PreFilterExtensions() PreFilterExtensions { return &TestPluginPreFilterExtension{inj: pl.inj} } -func (pl *TestPlugin) Filter(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status { +func (pl *TestPlugin) Filter(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *NodeInfo) *Status { return NewStatus(Code(pl.inj.FilterStatus), "injected filter status") } @@ -228,13 +227,13 @@ func (pl *TestPreFilterWithExtensionsPlugin) PreFilter(ctx context.Context, stat } func (pl *TestPreFilterWithExtensionsPlugin) AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, - podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status { + podToAdd *v1.Pod, nodeInfo *NodeInfo) *Status { pl.AddCalled++ return nil } func (pl *TestPreFilterWithExtensionsPlugin) RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, - podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status { + podToRemove *v1.Pod, nodeInfo *NodeInfo) *Status { pl.RemoveCalled++ return nil } diff --git a/pkg/scheduler/framework/v1alpha1/interface.go b/pkg/scheduler/framework/v1alpha1/interface.go index fb66bb6607b..8bb61c47bcc 100644 --- a/pkg/scheduler/framework/v1alpha1/interface.go +++ b/pkg/scheduler/framework/v1alpha1/interface.go @@ -31,8 +31,6 @@ import ( clientset "k8s.io/client-go/kubernetes" "k8s.io/kubernetes/pkg/controller/volume/scheduling" "k8s.io/kubernetes/pkg/scheduler/apis/config" - schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) // NodeScoreList declares a list of nodes and their scores. @@ -256,10 +254,10 @@ type QueueSortPlugin interface { type PreFilterExtensions interface { // AddPod is called by the framework while trying to evaluate the impact // of adding podToAdd to the node while scheduling podToSchedule. - AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status + AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *NodeInfo) *Status // RemovePod is called by the framework while trying to evaluate the impact // of removing podToRemove from the node while scheduling podToSchedule. - RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status + RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *NodeInfo) *Status } // PreFilterPlugin is an interface that must be implemented by "prefilter" plugins. @@ -299,7 +297,7 @@ type FilterPlugin interface { // For example, during preemption, we may pass a copy of the original // nodeInfo object that has some pods removed from it to evaluate the // possibility of preempting them to schedule the target pod. - Filter(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status + Filter(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *NodeInfo) *Status } // PreScorePlugin is an interface for Pre-score plugin. Pre-score is an @@ -425,17 +423,17 @@ type Framework interface { // preemption, we may pass a copy of the original nodeInfo object that has some pods // removed from it to evaluate the possibility of preempting them to // schedule the target pod. - RunFilterPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) PluginToStatus + RunFilterPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *NodeInfo) PluginToStatus // RunPreFilterExtensionAddPod calls the AddPod interface for the set of configured // PreFilter plugins. It returns directly if any of the plugins return any // status other than Success. - RunPreFilterExtensionAddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status + RunPreFilterExtensionAddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *NodeInfo) *Status // RunPreFilterExtensionRemovePod calls the RemovePod interface for the set of configured // PreFilter plugins. It returns directly if any of the plugins return any // status other than Success. - RunPreFilterExtensionRemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status + RunPreFilterExtensionRemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *NodeInfo) *Status // RunPreScorePlugins runs the set of configured pre-score plugins. If any // of these plugins returns any status other than "Success", the given pod is rejected. @@ -504,7 +502,7 @@ type FrameworkHandle interface { // cycle (pre-bind/bind/post-bind/un-reserve plugin) should not use it, // otherwise a concurrent read/write error might occur, they should use scheduler // cache instead. - SnapshotSharedLister() schedulerlisters.SharedLister + SnapshotSharedLister() SharedLister // IterateOverWaitingPods acquires a read lock and iterates over the WaitingPods map. IterateOverWaitingPods(callback func(WaitingPod)) diff --git a/pkg/scheduler/listers/listers.go b/pkg/scheduler/framework/v1alpha1/listers.go similarity index 91% rename from pkg/scheduler/listers/listers.go rename to pkg/scheduler/framework/v1alpha1/listers.go index 96df2f86422..cefec74ecab 100644 --- a/pkg/scheduler/listers/listers.go +++ b/pkg/scheduler/framework/v1alpha1/listers.go @@ -14,13 +14,12 @@ See the License for the specific language governing permissions and limitations under the License. */ -package listers +package v1alpha1 import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" v1listers "k8s.io/client-go/listers/core/v1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) // PodFilter is a function to filter a pod. If pod passed return true else return false. @@ -38,11 +37,11 @@ type PodLister interface { // NodeInfoLister interface represents anything that can list/get NodeInfo objects from node name. type NodeInfoLister interface { // Returns the list of NodeInfos. - List() ([]*schedulertypes.NodeInfo, error) + List() ([]*NodeInfo, error) // Returns the list of NodeInfos of nodes with pods with affinity terms. - HavePodsWithAffinityList() ([]*schedulertypes.NodeInfo, error) + HavePodsWithAffinityList() ([]*NodeInfo, error) // Returns the NodeInfo of the given node name. - Get(nodeName string) (*schedulertypes.NodeInfo, error) + Get(nodeName string) (*NodeInfo, error) } // SharedLister groups scheduler-specific listers. diff --git a/pkg/scheduler/types/node_info.go b/pkg/scheduler/framework/v1alpha1/types.go similarity index 89% rename from pkg/scheduler/types/node_info.go rename to pkg/scheduler/framework/v1alpha1/types.go index a288ff7298b..2326b2b0b52 100644 --- a/pkg/scheduler/types/node_info.go +++ b/pkg/scheduler/framework/v1alpha1/types.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package types +package v1alpha1 import ( "errors" @@ -689,3 +689,117 @@ func (n *NodeInfo) Filter(pod *v1.Pod) bool { } return false } + +// DefaultBindAllHostIP defines the default ip address used to bind to all host. +const DefaultBindAllHostIP = "0.0.0.0" + +// ProtocolPort represents a protocol port pair, e.g. tcp:80. +type ProtocolPort struct { + Protocol string + Port int32 +} + +// NewProtocolPort creates a ProtocolPort instance. +func NewProtocolPort(protocol string, port int32) *ProtocolPort { + pp := &ProtocolPort{ + Protocol: protocol, + Port: port, + } + + if len(pp.Protocol) == 0 { + pp.Protocol = string(v1.ProtocolTCP) + } + + return pp +} + +// HostPortInfo stores mapping from ip to a set of ProtocolPort +type HostPortInfo map[string]map[ProtocolPort]struct{} + +// Add adds (ip, protocol, port) to HostPortInfo +func (h HostPortInfo) Add(ip, protocol string, port int32) { + if port <= 0 { + return + } + + h.sanitize(&ip, &protocol) + + pp := NewProtocolPort(protocol, port) + if _, ok := h[ip]; !ok { + h[ip] = map[ProtocolPort]struct{}{ + *pp: {}, + } + return + } + + h[ip][*pp] = struct{}{} +} + +// Remove removes (ip, protocol, port) from HostPortInfo +func (h HostPortInfo) Remove(ip, protocol string, port int32) { + if port <= 0 { + return + } + + h.sanitize(&ip, &protocol) + + pp := NewProtocolPort(protocol, port) + if m, ok := h[ip]; ok { + delete(m, *pp) + if len(h[ip]) == 0 { + delete(h, ip) + } + } +} + +// Len returns the total number of (ip, protocol, port) tuple in HostPortInfo +func (h HostPortInfo) Len() int { + length := 0 + for _, m := range h { + length += len(m) + } + return length +} + +// CheckConflict checks if the input (ip, protocol, port) conflicts with the existing +// ones in HostPortInfo. +func (h HostPortInfo) CheckConflict(ip, protocol string, port int32) bool { + if port <= 0 { + return false + } + + h.sanitize(&ip, &protocol) + + pp := NewProtocolPort(protocol, port) + + // If ip is 0.0.0.0 check all IP's (protocol, port) pair + if ip == DefaultBindAllHostIP { + for _, m := range h { + if _, ok := m[*pp]; ok { + return true + } + } + return false + } + + // If ip isn't 0.0.0.0, only check IP and 0.0.0.0's (protocol, port) pair + for _, key := range []string{DefaultBindAllHostIP, ip} { + if m, ok := h[key]; ok { + if _, ok2 := m[*pp]; ok2 { + return true + } + } + } + + return false +} + +// sanitize the parameters +func (h HostPortInfo) sanitize(ip, protocol *string) { + if len(*ip) == 0 { + *ip = DefaultBindAllHostIP + } + if len(*protocol) == 0 { + *protocol = string(v1.ProtocolTCP) + } +} diff --git a/pkg/scheduler/types/node_info_test.go b/pkg/scheduler/framework/v1alpha1/types_test.go similarity index 83% rename from pkg/scheduler/types/node_info_test.go rename to pkg/scheduler/framework/v1alpha1/types_test.go index aa0cac9c7bf..8b3d0abbdaa 100644 --- a/pkg/scheduler/types/node_info_test.go +++ b/pkg/scheduler/framework/v1alpha1/types_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package types +package v1alpha1 import ( "fmt" @@ -967,3 +967,213 @@ func fakeNodeInfo(pods ...*v1.Pod) *NodeInfo { }) return ni } + +type hostPortInfoParam struct { + protocol, ip string + port int32 +} + +func TestHostPortInfo_AddRemove(t *testing.T) { + tests := []struct { + desc string + added []hostPortInfoParam + removed []hostPortInfoParam + length int + }{ + { + desc: "normal add case", + added: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 79}, + {"UDP", "127.0.0.1", 80}, + {"TCP", "127.0.0.1", 81}, + {"TCP", "127.0.0.1", 82}, + // this might not make sense in real case, but the struct doesn't forbid it. + {"TCP", "0.0.0.0", 79}, + {"UDP", "0.0.0.0", 80}, + {"TCP", "0.0.0.0", 81}, + {"TCP", "0.0.0.0", 82}, + {"TCP", "0.0.0.0", 0}, + {"TCP", "0.0.0.0", -1}, + }, + length: 8, + }, + { + desc: "empty ip and protocol add should work", + added: []hostPortInfoParam{ + {"", "127.0.0.1", 79}, + {"UDP", "127.0.0.1", 80}, + {"", "127.0.0.1", 81}, + {"", "127.0.0.1", 82}, + {"", "", 79}, + {"UDP", "", 80}, + {"", "", 81}, + {"", "", 82}, + {"", "", 0}, + {"", "", -1}, + }, + length: 8, + }, + { + desc: "normal remove case", + added: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 79}, + {"UDP", "127.0.0.1", 80}, + {"TCP", "127.0.0.1", 81}, + {"TCP", "127.0.0.1", 82}, + {"TCP", "0.0.0.0", 79}, + {"UDP", "0.0.0.0", 80}, + {"TCP", "0.0.0.0", 81}, + {"TCP", "0.0.0.0", 82}, + }, + removed: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 79}, + {"UDP", "127.0.0.1", 80}, + {"TCP", "127.0.0.1", 81}, + {"TCP", "127.0.0.1", 82}, + {"TCP", "0.0.0.0", 79}, + {"UDP", "0.0.0.0", 80}, + {"TCP", "0.0.0.0", 81}, + {"TCP", "0.0.0.0", 82}, + }, + length: 0, + }, + { + desc: "empty ip and protocol remove should work", + added: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 79}, + {"UDP", "127.0.0.1", 80}, + {"TCP", "127.0.0.1", 81}, + {"TCP", "127.0.0.1", 82}, + {"TCP", "0.0.0.0", 79}, + {"UDP", "0.0.0.0", 80}, + {"TCP", "0.0.0.0", 81}, + {"TCP", "0.0.0.0", 82}, + }, + removed: []hostPortInfoParam{ + {"", "127.0.0.1", 79}, + {"", "127.0.0.1", 81}, + {"", "127.0.0.1", 82}, + {"UDP", "127.0.0.1", 80}, + {"", "", 79}, + {"", "", 81}, + {"", "", 82}, + {"UDP", "", 80}, + }, + length: 0, + }, + } + + for _, test := range tests { + hp := make(HostPortInfo) + for _, param := range test.added { + hp.Add(param.ip, param.protocol, param.port) + } + for _, param := range test.removed { + hp.Remove(param.ip, param.protocol, param.port) + } + if hp.Len() != test.length { + t.Errorf("%v failed: expect length %d; got %d", test.desc, test.length, hp.Len()) + t.Error(hp) + } + } +} + +func TestHostPortInfo_Check(t *testing.T) { + tests := []struct { + desc string + added []hostPortInfoParam + check hostPortInfoParam + expect bool + }{ + { + desc: "empty check should check 0.0.0.0 and TCP", + added: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 80}, + }, + check: hostPortInfoParam{"", "", 81}, + expect: false, + }, + { + desc: "empty check should check 0.0.0.0 and TCP (conflicted)", + added: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 80}, + }, + check: hostPortInfoParam{"", "", 80}, + expect: true, + }, + { + desc: "empty port check should pass", + added: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 80}, + }, + check: hostPortInfoParam{"", "", 0}, + expect: false, + }, + { + desc: "0.0.0.0 should check all registered IPs", + added: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 80}, + }, + check: hostPortInfoParam{"TCP", "0.0.0.0", 80}, + expect: true, + }, + { + desc: "0.0.0.0 with different protocol should be allowed", + added: []hostPortInfoParam{ + {"UDP", "127.0.0.1", 80}, + }, + check: hostPortInfoParam{"TCP", "0.0.0.0", 80}, + expect: false, + }, + { + desc: "0.0.0.0 with different port should be allowed", + added: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 79}, + {"TCP", "127.0.0.1", 81}, + {"TCP", "127.0.0.1", 82}, + }, + check: hostPortInfoParam{"TCP", "0.0.0.0", 80}, + expect: false, + }, + { + desc: "normal ip should check all registered 0.0.0.0", + added: []hostPortInfoParam{ + {"TCP", "0.0.0.0", 80}, + }, + check: hostPortInfoParam{"TCP", "127.0.0.1", 80}, + expect: true, + }, + { + desc: "normal ip with different port/protocol should be allowed (0.0.0.0)", + added: []hostPortInfoParam{ + {"TCP", "0.0.0.0", 79}, + {"UDP", "0.0.0.0", 80}, + {"TCP", "0.0.0.0", 81}, + {"TCP", "0.0.0.0", 82}, + }, + check: hostPortInfoParam{"TCP", "127.0.0.1", 80}, + expect: false, + }, + { + desc: "normal ip with different port/protocol should be allowed", + added: []hostPortInfoParam{ + {"TCP", "127.0.0.1", 79}, + {"UDP", "127.0.0.1", 80}, + {"TCP", "127.0.0.1", 81}, + {"TCP", "127.0.0.1", 82}, + }, + check: hostPortInfoParam{"TCP", "127.0.0.1", 80}, + expect: false, + }, + } + + for _, test := range tests { + hp := make(HostPortInfo) + for _, param := range test.added { + hp.Add(param.ip, param.protocol, param.port) + } + if hp.CheckConflict(test.check.ip, test.check.protocol, test.check.port) != test.expect { + t.Errorf("%v failed, expected %t; got %t", test.desc, test.expect, !test.expect) + } + } +} diff --git a/pkg/scheduler/internal/cache/BUILD b/pkg/scheduler/internal/cache/BUILD index 51f098e4d33..57a2696627e 100644 --- a/pkg/scheduler/internal/cache/BUILD +++ b/pkg/scheduler/internal/cache/BUILD @@ -12,9 +12,8 @@ go_library( visibility = ["//pkg/scheduler:__subpackages__"], deps = [ "//pkg/features:go_default_library", - "//pkg/scheduler/listers:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/metrics:go_default_library", - "//pkg/scheduler/types:go_default_library", "//pkg/util/node:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", @@ -35,7 +34,7 @@ go_test( embed = [":go_default_library"], deps = [ "//pkg/features:go_default_library", - "//pkg/scheduler/types:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/util:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/pkg/scheduler/internal/cache/cache.go b/pkg/scheduler/internal/cache/cache.go index 013b888f7b9..fc1ec3b0208 100644 --- a/pkg/scheduler/internal/cache/cache.go +++ b/pkg/scheduler/internal/cache/cache.go @@ -28,9 +28,8 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/klog" "k8s.io/kubernetes/pkg/features" - schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/pkg/scheduler/metrics" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) var ( @@ -51,7 +50,7 @@ func New(ttl time.Duration, stop <-chan struct{}) Cache { // linked list. When a NodeInfo is updated, it goes to the head of the list. // The items closer to the head are the most recently updated items. type nodeInfoListItem struct { - info *schedulertypes.NodeInfo + info *framework.NodeInfo next *nodeInfoListItem prev *nodeInfoListItem } @@ -93,8 +92,8 @@ type imageState struct { } // createImageStateSummary returns a summarizing snapshot of the given image's state. -func (cache *schedulerCache) createImageStateSummary(state *imageState) *schedulertypes.ImageStateSummary { - return &schedulertypes.ImageStateSummary{ +func (cache *schedulerCache) createImageStateSummary(state *imageState) *framework.ImageStateSummary { + return &framework.ImageStateSummary{ Size: state.size, NumNodes: len(state.nodes), } @@ -115,7 +114,7 @@ func newSchedulerCache(ttl, period time.Duration, stop <-chan struct{}) *schedul } // newNodeInfoListItem initializes a new nodeInfoListItem. -func newNodeInfoListItem(ni *schedulertypes.NodeInfo) *nodeInfoListItem { +func newNodeInfoListItem(ni *framework.NodeInfo) *nodeInfoListItem { return &nodeInfoListItem{ info: ni, } @@ -180,7 +179,7 @@ func (cache *schedulerCache) Dump() *Dump { cache.mu.RLock() defer cache.mu.RUnlock() - nodes := make(map[string]*schedulertypes.NodeInfo, len(cache.nodes)) + nodes := make(map[string]*framework.NodeInfo, len(cache.nodes)) for k, v := range cache.nodes { nodes[k] = v.info.Clone() } @@ -231,7 +230,7 @@ func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error { existing, ok := nodeSnapshot.nodeInfoMap[np.Name] if !ok { updateAllLists = true - existing = &schedulertypes.NodeInfo{} + existing = &framework.NodeInfo{} nodeSnapshot.nodeInfoMap[np.Name] = existing } clone := node.info.Clone() @@ -277,10 +276,10 @@ func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error { } func (cache *schedulerCache) updateNodeInfoSnapshotList(snapshot *Snapshot, updateAll bool) { - snapshot.havePodsWithAffinityNodeInfoList = make([]*schedulertypes.NodeInfo, 0, cache.nodeTree.numNodes) + snapshot.havePodsWithAffinityNodeInfoList = make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes) if updateAll { // Take a snapshot of the nodes order in the tree - snapshot.nodeInfoList = make([]*schedulertypes.NodeInfo, 0, cache.nodeTree.numNodes) + snapshot.nodeInfoList = make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes) for i := 0; i < cache.nodeTree.numNodes; i++ { nodeName := cache.nodeTree.next() if n := snapshot.nodeInfoMap[nodeName]; n != nil { @@ -320,7 +319,7 @@ func (cache *schedulerCache) List(selector labels.Selector) ([]*v1.Pod, error) { return cache.FilteredList(alwaysTrue, selector) } -func (cache *schedulerCache) FilteredList(podFilter schedulerlisters.PodFilter, selector labels.Selector) ([]*v1.Pod, error) { +func (cache *schedulerCache) FilteredList(podFilter framework.PodFilter, selector labels.Selector) ([]*v1.Pod, error) { cache.mu.RLock() defer cache.mu.RUnlock() // podFilter is expected to return true for most or all of the pods. We @@ -342,7 +341,7 @@ func (cache *schedulerCache) FilteredList(podFilter schedulerlisters.PodFilter, } func (cache *schedulerCache) AssumePod(pod *v1.Pod) error { - key, err := schedulertypes.GetPodKey(pod) + key, err := framework.GetPodKey(pod) if err != nil { return err } @@ -368,7 +367,7 @@ func (cache *schedulerCache) FinishBinding(pod *v1.Pod) error { // finishBinding exists to make tests determinitistic by injecting now as an argument func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error { - key, err := schedulertypes.GetPodKey(pod) + key, err := framework.GetPodKey(pod) if err != nil { return err } @@ -387,7 +386,7 @@ func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error { } func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error { - key, err := schedulertypes.GetPodKey(pod) + key, err := framework.GetPodKey(pod) if err != nil { return err } @@ -419,7 +418,7 @@ func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error { func (cache *schedulerCache) addPod(pod *v1.Pod) { n, ok := cache.nodes[pod.Spec.NodeName] if !ok { - n = newNodeInfoListItem(schedulertypes.NewNodeInfo()) + n = newNodeInfoListItem(framework.NewNodeInfo()) cache.nodes[pod.Spec.NodeName] = n } n.info.AddPod(pod) @@ -452,7 +451,7 @@ func (cache *schedulerCache) removePod(pod *v1.Pod) error { } func (cache *schedulerCache) AddPod(pod *v1.Pod) error { - key, err := schedulertypes.GetPodKey(pod) + key, err := framework.GetPodKey(pod) if err != nil { return err } @@ -489,7 +488,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error { } func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error { - key, err := schedulertypes.GetPodKey(oldPod) + key, err := framework.GetPodKey(oldPod) if err != nil { return err } @@ -517,7 +516,7 @@ func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error { } func (cache *schedulerCache) RemovePod(pod *v1.Pod) error { - key, err := schedulertypes.GetPodKey(pod) + key, err := framework.GetPodKey(pod) if err != nil { return err } @@ -546,7 +545,7 @@ func (cache *schedulerCache) RemovePod(pod *v1.Pod) error { } func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) { - key, err := schedulertypes.GetPodKey(pod) + key, err := framework.GetPodKey(pod) if err != nil { return false, err } @@ -564,7 +563,7 @@ func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) { // GetPod might return a pod for which its node has already been deleted from // the main cache. This is useful to properly process pod update events. func (cache *schedulerCache) GetPod(pod *v1.Pod) (*v1.Pod, error) { - key, err := schedulertypes.GetPodKey(pod) + key, err := framework.GetPodKey(pod) if err != nil { return nil, err } @@ -586,7 +585,7 @@ func (cache *schedulerCache) AddNode(node *v1.Node) error { n, ok := cache.nodes[node.Name] if !ok { - n = newNodeInfoListItem(schedulertypes.NewNodeInfo()) + n = newNodeInfoListItem(framework.NewNodeInfo()) cache.nodes[node.Name] = n } else { cache.removeNodeImageStates(n.info.Node()) @@ -604,7 +603,7 @@ func (cache *schedulerCache) UpdateNode(oldNode, newNode *v1.Node) error { n, ok := cache.nodes[newNode.Name] if !ok { - n = newNodeInfoListItem(schedulertypes.NewNodeInfo()) + n = newNodeInfoListItem(framework.NewNodeInfo()) cache.nodes[newNode.Name] = n cache.nodeTree.addNode(newNode) } else { @@ -641,8 +640,8 @@ func (cache *schedulerCache) RemoveNode(node *v1.Node) error { // addNodeImageStates adds states of the images on given node to the given nodeInfo and update the imageStates in // scheduler cache. This function assumes the lock to scheduler cache has been acquired. -func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *schedulertypes.NodeInfo) { - newSum := make(map[string]*schedulertypes.ImageStateSummary) +func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *framework.NodeInfo) { + newSum := make(map[string]*framework.ImageStateSummary) for _, image := range node.Status.Images { for _, name := range image.Names { diff --git a/pkg/scheduler/internal/cache/cache_test.go b/pkg/scheduler/internal/cache/cache_test.go index 831d881980a..8fe04df8545 100644 --- a/pkg/scheduler/internal/cache/cache_test.go +++ b/pkg/scheduler/internal/cache/cache_test.go @@ -31,11 +31,11 @@ import ( utilfeature "k8s.io/apiserver/pkg/util/feature" featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/kubernetes/pkg/features" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" schedutil "k8s.io/kubernetes/pkg/scheduler/util" ) -func deepEqualWithoutGeneration(actual *nodeInfoListItem, expected *schedulertypes.NodeInfo) error { +func deepEqualWithoutGeneration(actual *nodeInfoListItem, expected *framework.NodeInfo) error { if (actual == nil) != (expected == nil) { return errors.New("one of the actual or expected is nil and the other is not") } @@ -70,21 +70,21 @@ func (b *hostPortInfoBuilder) add(protocol, ip string, port int32) *hostPortInfo return b } -func (b *hostPortInfoBuilder) build() schedulertypes.HostPortInfo { - res := make(schedulertypes.HostPortInfo) +func (b *hostPortInfoBuilder) build() framework.HostPortInfo { + res := make(framework.HostPortInfo) for _, param := range b.inputs { res.Add(param.ip, param.protocol, param.port) } return res } -func newNodeInfo(requestedResource *schedulertypes.Resource, - nonzeroRequest *schedulertypes.Resource, +func newNodeInfo(requestedResource *framework.Resource, + nonzeroRequest *framework.Resource, pods []*v1.Pod, - usedPorts schedulertypes.HostPortInfo, - imageStates map[string]*schedulertypes.ImageStateSummary, -) *schedulertypes.NodeInfo { - nodeInfo := schedulertypes.NewNodeInfo(pods...) + usedPorts framework.HostPortInfo, + imageStates map[string]*framework.ImageStateSummary, +) *framework.NodeInfo { + nodeInfo := framework.NewNodeInfo(pods...) nodeInfo.SetRequestedResource(requestedResource) nodeInfo.SetNonZeroRequest(nonzeroRequest) nodeInfo.SetUsedPorts(usedPorts) @@ -112,98 +112,98 @@ func TestAssumePodScheduled(t *testing.T) { tests := []struct { pods []*v1.Pod - wNodeInfo *schedulertypes.NodeInfo + wNodeInfo *framework.NodeInfo }{{ pods: []*v1.Pod{testPods[0]}, wNodeInfo: newNodeInfo( - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 100, Memory: 500, }, - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 100, Memory: 500, }, []*v1.Pod{testPods[0]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), - make(map[string]*schedulertypes.ImageStateSummary), + make(map[string]*framework.ImageStateSummary), ), }, { pods: []*v1.Pod{testPods[1], testPods[2]}, wNodeInfo: newNodeInfo( - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 300, Memory: 1524, }, - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 300, Memory: 1524, }, []*v1.Pod{testPods[1], testPods[2]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(), - make(map[string]*schedulertypes.ImageStateSummary), + make(map[string]*framework.ImageStateSummary), ), }, { // test non-zero request pods: []*v1.Pod{testPods[3]}, wNodeInfo: newNodeInfo( - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 0, Memory: 0, }, - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: schedutil.DefaultMilliCPURequest, Memory: schedutil.DefaultMemoryRequest, }, []*v1.Pod{testPods[3]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), - make(map[string]*schedulertypes.ImageStateSummary), + make(map[string]*framework.ImageStateSummary), ), }, { pods: []*v1.Pod{testPods[4]}, wNodeInfo: newNodeInfo( - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 100, Memory: 500, ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 3}, }, - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 100, Memory: 500, }, []*v1.Pod{testPods[4]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), - make(map[string]*schedulertypes.ImageStateSummary), + make(map[string]*framework.ImageStateSummary), ), }, { pods: []*v1.Pod{testPods[4], testPods[5]}, wNodeInfo: newNodeInfo( - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 300, Memory: 1524, ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 8}, }, - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 300, Memory: 1524, }, []*v1.Pod{testPods[4], testPods[5]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(), - make(map[string]*schedulertypes.ImageStateSummary), + make(map[string]*framework.ImageStateSummary), ), }, { pods: []*v1.Pod{testPods[6]}, wNodeInfo: newNodeInfo( - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 100, Memory: 500, }, - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 100, Memory: 500, }, []*v1.Pod{testPods[6]}, newHostPortInfoBuilder().build(), - make(map[string]*schedulertypes.ImageStateSummary), + make(map[string]*framework.ImageStateSummary), ), }, } @@ -263,13 +263,13 @@ func TestExpirePod(t *testing.T) { pods []*testExpirePodStruct cleanupTime time.Time - wNodeInfo *schedulertypes.NodeInfo + wNodeInfo *framework.NodeInfo }{{ // assumed pod would expires pods: []*testExpirePodStruct{ {pod: testPods[0], finishBind: true, assumedTime: now}, }, cleanupTime: now.Add(2 * ttl), - wNodeInfo: schedulertypes.NewNodeInfo(), + wNodeInfo: framework.NewNodeInfo(), }, { // first one would expire, second and third would not. pods: []*testExpirePodStruct{ {pod: testPods[0], finishBind: true, assumedTime: now}, @@ -278,18 +278,18 @@ func TestExpirePod(t *testing.T) { }, cleanupTime: now.Add(2 * ttl), wNodeInfo: newNodeInfo( - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 400, Memory: 2048, }, - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 400, Memory: 2048, }, // Order gets altered when removing pods. []*v1.Pod{testPods[2], testPods[1]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(), - make(map[string]*schedulertypes.ImageStateSummary), + make(map[string]*framework.ImageStateSummary), ), }} @@ -336,22 +336,22 @@ func TestAddPodWillConfirm(t *testing.T) { podsToAssume []*v1.Pod podsToAdd []*v1.Pod - wNodeInfo *schedulertypes.NodeInfo + wNodeInfo *framework.NodeInfo }{{ // two pod were assumed at same time. But first one is called Add() and gets confirmed. podsToAssume: []*v1.Pod{testPods[0], testPods[1]}, podsToAdd: []*v1.Pod{testPods[0]}, wNodeInfo: newNodeInfo( - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 100, Memory: 500, }, - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 100, Memory: 500, }, []*v1.Pod{testPods[0]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), - make(map[string]*schedulertypes.ImageStateSummary), + make(map[string]*framework.ImageStateSummary), ), }} @@ -438,25 +438,25 @@ func TestAddPodWillReplaceAssumed(t *testing.T) { podsToAdd []*v1.Pod podsToUpdate [][]*v1.Pod - wNodeInfo map[string]*schedulertypes.NodeInfo + wNodeInfo map[string]*framework.NodeInfo }{{ podsToAssume: []*v1.Pod{assumedPod.DeepCopy()}, podsToAdd: []*v1.Pod{addedPod.DeepCopy()}, podsToUpdate: [][]*v1.Pod{{addedPod.DeepCopy(), updatedPod.DeepCopy()}}, - wNodeInfo: map[string]*schedulertypes.NodeInfo{ + wNodeInfo: map[string]*framework.NodeInfo{ "assumed-node": nil, "actual-node": newNodeInfo( - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 200, Memory: 500, }, - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 200, Memory: 500, }, []*v1.Pod{updatedPod.DeepCopy()}, newHostPortInfoBuilder().add("TCP", "0.0.0.0", 90).build(), - make(map[string]*schedulertypes.ImageStateSummary), + make(map[string]*framework.ImageStateSummary), ), }, }} @@ -499,21 +499,21 @@ func TestAddPodAfterExpiration(t *testing.T) { tests := []struct { pod *v1.Pod - wNodeInfo *schedulertypes.NodeInfo + wNodeInfo *framework.NodeInfo }{{ pod: basePod, wNodeInfo: newNodeInfo( - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 100, Memory: 500, }, - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 100, Memory: 500, }, []*v1.Pod{basePod}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), - make(map[string]*schedulertypes.ImageStateSummary), + make(map[string]*framework.ImageStateSummary), ), }} @@ -555,34 +555,34 @@ func TestUpdatePod(t *testing.T) { podsToAdd []*v1.Pod podsToUpdate []*v1.Pod - wNodeInfo []*schedulertypes.NodeInfo + wNodeInfo []*framework.NodeInfo }{{ // add a pod and then update it twice podsToAdd: []*v1.Pod{testPods[0]}, podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]}, - wNodeInfo: []*schedulertypes.NodeInfo{newNodeInfo( - &schedulertypes.Resource{ + wNodeInfo: []*framework.NodeInfo{newNodeInfo( + &framework.Resource{ MilliCPU: 200, Memory: 1024, }, - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 200, Memory: 1024, }, []*v1.Pod{testPods[1]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(), - make(map[string]*schedulertypes.ImageStateSummary), + make(map[string]*framework.ImageStateSummary), ), newNodeInfo( - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 100, Memory: 500, }, - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 100, Memory: 500, }, []*v1.Pod{testPods[0]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), - make(map[string]*schedulertypes.ImageStateSummary), + make(map[string]*framework.ImageStateSummary), )}, }} @@ -686,35 +686,35 @@ func TestExpireAddUpdatePod(t *testing.T) { podsToAdd []*v1.Pod podsToUpdate []*v1.Pod - wNodeInfo []*schedulertypes.NodeInfo + wNodeInfo []*framework.NodeInfo }{{ // Pod is assumed, expired, and added. Then it would be updated twice. podsToAssume: []*v1.Pod{testPods[0]}, podsToAdd: []*v1.Pod{testPods[0]}, podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]}, - wNodeInfo: []*schedulertypes.NodeInfo{newNodeInfo( - &schedulertypes.Resource{ + wNodeInfo: []*framework.NodeInfo{newNodeInfo( + &framework.Resource{ MilliCPU: 200, Memory: 1024, }, - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 200, Memory: 1024, }, []*v1.Pod{testPods[1]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(), - make(map[string]*schedulertypes.ImageStateSummary), + make(map[string]*framework.ImageStateSummary), ), newNodeInfo( - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 100, Memory: 500, }, - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 100, Memory: 500, }, []*v1.Pod{testPods[0]}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), - make(map[string]*schedulertypes.ImageStateSummary), + make(map[string]*framework.ImageStateSummary), )}, }} @@ -780,21 +780,21 @@ func TestEphemeralStorageResource(t *testing.T) { podE := makePodWithEphemeralStorage(nodeName, "500") tests := []struct { pod *v1.Pod - wNodeInfo *schedulertypes.NodeInfo + wNodeInfo *framework.NodeInfo }{ { pod: podE, wNodeInfo: newNodeInfo( - &schedulertypes.Resource{ + &framework.Resource{ EphemeralStorage: 500, }, - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: schedutil.DefaultMilliCPURequest, Memory: schedutil.DefaultMemoryRequest, }, []*v1.Pod{podE}, - schedulertypes.HostPortInfo{}, - make(map[string]*schedulertypes.ImageStateSummary), + framework.HostPortInfo{}, + make(map[string]*framework.ImageStateSummary), ), }, } @@ -827,7 +827,7 @@ func TestRemovePod(t *testing.T) { tests := []struct { nodes []*v1.Node pod *v1.Pod - wNodeInfo *schedulertypes.NodeInfo + wNodeInfo *framework.NodeInfo }{{ nodes: []*v1.Node{ { @@ -839,17 +839,17 @@ func TestRemovePod(t *testing.T) { }, pod: basePod, wNodeInfo: newNodeInfo( - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 100, Memory: 500, }, - &schedulertypes.Resource{ + &framework.Resource{ MilliCPU: 100, Memory: 500, }, []*v1.Pod{basePod}, newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), - make(map[string]*schedulertypes.ImageStateSummary), + make(map[string]*framework.ImageStateSummary), ), }} @@ -930,7 +930,7 @@ func TestForgetPod(t *testing.T) { // getResourceRequest returns the resource request of all containers in Pods; // excluding initContainers. func getResourceRequest(pod *v1.Pod) v1.ResourceList { - result := &schedulertypes.Resource{} + result := &framework.Resource{} for _, container := range pod.Spec.Containers { result.Add(container.Resources.Requests) } @@ -939,13 +939,13 @@ func getResourceRequest(pod *v1.Pod) v1.ResourceList { } // buildNodeInfo creates a NodeInfo by simulating node operations in cache. -func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *schedulertypes.NodeInfo { - expected := schedulertypes.NewNodeInfo() +func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *framework.NodeInfo { + expected := framework.NewNodeInfo() // Simulate SetNode. expected.SetNode(node) - expected.SetAllocatableResource(schedulertypes.NewResource(node.Status.Allocatable)) + expected.SetAllocatableResource(framework.NewResource(node.Status.Allocatable)) expected.SetTaints(node.Spec.Taints) expected.SetGeneration(expected.GetGeneration() + 1) @@ -1533,8 +1533,8 @@ func compareCacheWithNodeInfoSnapshot(cache *schedulerCache, snapshot *Snapshot) return fmt.Errorf("unexpected number of nodes in NodeInfoList. Expected: %v, got: %v", len(cache.nodes), len(snapshot.nodeInfoList)) } - expectedNodeInfoList := make([]*schedulertypes.NodeInfo, 0, cache.nodeTree.numNodes) - expectedHavePodsWithAffinityNodeInfoList := make([]*schedulertypes.NodeInfo, 0, cache.nodeTree.numNodes) + expectedNodeInfoList := make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes) + expectedHavePodsWithAffinityNodeInfoList := make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes) for i := 0; i < cache.nodeTree.numNodes; i++ { nodeName := cache.nodeTree.next() if n := snapshot.nodeInfoMap[nodeName]; n != nil { diff --git a/pkg/scheduler/internal/cache/debugger/BUILD b/pkg/scheduler/internal/cache/debugger/BUILD index 62baa63e3d7..2eb744a6e00 100644 --- a/pkg/scheduler/internal/cache/debugger/BUILD +++ b/pkg/scheduler/internal/cache/debugger/BUILD @@ -12,9 +12,9 @@ go_library( importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger", visibility = ["//pkg/scheduler:__subpackages__"], deps = [ + "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/queue:go_default_library", - "//pkg/scheduler/types:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", @@ -27,7 +27,7 @@ go_test( srcs = ["comparer_test.go"], embed = [":go_default_library"], deps = [ - "//pkg/scheduler/types:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", ], diff --git a/pkg/scheduler/internal/cache/debugger/comparer.go b/pkg/scheduler/internal/cache/debugger/comparer.go index af8efc46064..897066301ae 100644 --- a/pkg/scheduler/internal/cache/debugger/comparer.go +++ b/pkg/scheduler/internal/cache/debugger/comparer.go @@ -24,9 +24,9 @@ import ( "k8s.io/apimachinery/pkg/labels" corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/klog" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) // CacheComparer is an implementation of the Scheduler's cache comparer. @@ -68,7 +68,7 @@ func (c *CacheComparer) Compare() error { } // CompareNodes compares actual nodes with cached nodes. -func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*schedulertypes.NodeInfo) (missed, redundant []string) { +func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*framework.NodeInfo) (missed, redundant []string) { actual := []string{} for _, node := range nodes { actual = append(actual, node.Name) @@ -83,7 +83,7 @@ func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*sch } // ComparePods compares actual pods with cached pods. -func (c *CacheComparer) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*schedulertypes.NodeInfo) (missed, redundant []string) { +func (c *CacheComparer) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*framework.NodeInfo) (missed, redundant []string) { actual := []string{} for _, pod := range pods { actual = append(actual, string(pod.UID)) diff --git a/pkg/scheduler/internal/cache/debugger/comparer_test.go b/pkg/scheduler/internal/cache/debugger/comparer_test.go index c9c253123cb..e0348e81525 100644 --- a/pkg/scheduler/internal/cache/debugger/comparer_test.go +++ b/pkg/scheduler/internal/cache/debugger/comparer_test.go @@ -22,7 +22,7 @@ import ( "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/types" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) func TestCompareNodes(t *testing.T) { @@ -72,9 +72,9 @@ func testCompareNodes(actual, cached, missing, redundant []string, t *testing.T) nodes = append(nodes, node) } - nodeInfo := make(map[string]*schedulertypes.NodeInfo) + nodeInfo := make(map[string]*framework.NodeInfo) for _, nodeName := range cached { - nodeInfo[nodeName] = &schedulertypes.NodeInfo{} + nodeInfo[nodeName] = &framework.NodeInfo{} } m, r := compare.CompareNodes(nodes, nodeInfo) @@ -170,14 +170,14 @@ func testComparePods(actual, cached, queued, missing, redundant []string, t *tes queuedPods = append(queuedPods, pod) } - nodeInfo := make(map[string]*schedulertypes.NodeInfo) + nodeInfo := make(map[string]*framework.NodeInfo) for _, uid := range cached { pod := &v1.Pod{} pod.UID = types.UID(uid) pod.Namespace = "ns" pod.Name = uid - nodeInfo[uid] = schedulertypes.NewNodeInfo(pod) + nodeInfo[uid] = framework.NewNodeInfo(pod) } m, r := compare.ComparePods(pods, queuedPods, nodeInfo) diff --git a/pkg/scheduler/internal/cache/debugger/dumper.go b/pkg/scheduler/internal/cache/debugger/dumper.go index 3c28852df20..276b628aa7d 100644 --- a/pkg/scheduler/internal/cache/debugger/dumper.go +++ b/pkg/scheduler/internal/cache/debugger/dumper.go @@ -23,9 +23,9 @@ import ( "k8s.io/klog" "k8s.io/api/core/v1" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/queue" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) // CacheDumper writes some information from the scheduler cache and the scheduling queue to the @@ -61,7 +61,7 @@ func (d *CacheDumper) dumpSchedulingQueue() { } // printNodeInfo writes parts of NodeInfo to a string. -func (d *CacheDumper) printNodeInfo(n *schedulertypes.NodeInfo) string { +func (d *CacheDumper) printNodeInfo(n *framework.NodeInfo) string { var nodeData strings.Builder nodeData.WriteString(fmt.Sprintf("\nNode name: %+v\nRequested Resources: %+v\nAllocatable Resources:%+v\nScheduled Pods(number: %v):\n", n.Node().Name, n.RequestedResource(), n.AllocatableResource(), len(n.Pods()))) diff --git a/pkg/scheduler/internal/cache/fake/BUILD b/pkg/scheduler/internal/cache/fake/BUILD index 4eb6c41e533..ee657f33c0d 100644 --- a/pkg/scheduler/internal/cache/fake/BUILD +++ b/pkg/scheduler/internal/cache/fake/BUILD @@ -6,8 +6,8 @@ go_library( importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake", visibility = ["//pkg/scheduler:__subpackages__"], deps = [ + "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/internal/cache:go_default_library", - "//pkg/scheduler/listers:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", ], diff --git a/pkg/scheduler/internal/cache/fake/fake_cache.go b/pkg/scheduler/internal/cache/fake/fake_cache.go index 40010dbc793..c1d47988fa9 100644 --- a/pkg/scheduler/internal/cache/fake/fake_cache.go +++ b/pkg/scheduler/internal/cache/fake/fake_cache.go @@ -19,8 +19,8 @@ package fake import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" - schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" ) // Cache is used for testing @@ -83,7 +83,7 @@ func (c *Cache) UpdateSnapshot(snapshot *internalcache.Snapshot) error { func (c *Cache) List(s labels.Selector) ([]*v1.Pod, error) { return nil, nil } // FilteredList is a fake method for testing. -func (c *Cache) FilteredList(filter schedulerlisters.PodFilter, selector labels.Selector) ([]*v1.Pod, error) { +func (c *Cache) FilteredList(filter framework.PodFilter, selector labels.Selector) ([]*v1.Pod, error) { return nil, nil } diff --git a/pkg/scheduler/internal/cache/interface.go b/pkg/scheduler/internal/cache/interface.go index 17eebbebd67..a851de26c3f 100644 --- a/pkg/scheduler/internal/cache/interface.go +++ b/pkg/scheduler/internal/cache/interface.go @@ -18,8 +18,7 @@ package cache import ( v1 "k8s.io/api/core/v1" - schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) // Cache collects pods' information and provides node-level aggregated information. @@ -57,7 +56,7 @@ import ( // - Both "Expired" and "Deleted" are valid end states. In case of some problems, e.g. network issue, // a pod might have changed its state (e.g. added and deleted) without delivering notification to the cache. type Cache interface { - schedulerlisters.PodLister + framework.PodLister // AssumePod assumes a pod scheduled and aggregates the pod's information into its node. // The implementation also decides the policy to expire pod before being confirmed (receiving Add event). @@ -108,5 +107,5 @@ type Cache interface { // Dump is a dump of the cache state. type Dump struct { AssumedPods map[string]bool - Nodes map[string]*schedulertypes.NodeInfo + Nodes map[string]*framework.NodeInfo } diff --git a/pkg/scheduler/internal/cache/snapshot.go b/pkg/scheduler/internal/cache/snapshot.go index 1950f046f64..50527bc072b 100644 --- a/pkg/scheduler/internal/cache/snapshot.go +++ b/pkg/scheduler/internal/cache/snapshot.go @@ -22,36 +22,35 @@ import ( v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/sets" - schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) // Snapshot is a snapshot of cache NodeInfo and NodeTree order. The scheduler takes a // snapshot at the beginning of each scheduling cycle and uses it for its operations in that cycle. type Snapshot struct { // nodeInfoMap a map of node name to a snapshot of its NodeInfo. - nodeInfoMap map[string]*schedulertypes.NodeInfo + nodeInfoMap map[string]*framework.NodeInfo // nodeInfoList is the list of nodes as ordered in the cache's nodeTree. - nodeInfoList []*schedulertypes.NodeInfo + nodeInfoList []*framework.NodeInfo // havePodsWithAffinityNodeInfoList is the list of nodes with at least one pod declaring affinity terms. - havePodsWithAffinityNodeInfoList []*schedulertypes.NodeInfo + havePodsWithAffinityNodeInfoList []*framework.NodeInfo generation int64 } -var _ schedulerlisters.SharedLister = &Snapshot{} +var _ framework.SharedLister = &Snapshot{} // NewEmptySnapshot initializes a Snapshot struct and returns it. func NewEmptySnapshot() *Snapshot { return &Snapshot{ - nodeInfoMap: make(map[string]*schedulertypes.NodeInfo), + nodeInfoMap: make(map[string]*framework.NodeInfo), } } // NewSnapshot initializes a Snapshot struct and returns it. func NewSnapshot(pods []*v1.Pod, nodes []*v1.Node) *Snapshot { nodeInfoMap := createNodeInfoMap(pods, nodes) - nodeInfoList := make([]*schedulertypes.NodeInfo, 0, len(nodeInfoMap)) - havePodsWithAffinityNodeInfoList := make([]*schedulertypes.NodeInfo, 0, len(nodeInfoMap)) + nodeInfoList := make([]*framework.NodeInfo, 0, len(nodeInfoMap)) + havePodsWithAffinityNodeInfoList := make([]*framework.NodeInfo, 0, len(nodeInfoMap)) for _, v := range nodeInfoMap { nodeInfoList = append(nodeInfoList, v) if len(v.PodsWithAffinity()) > 0 { @@ -70,12 +69,12 @@ func NewSnapshot(pods []*v1.Pod, nodes []*v1.Node) *Snapshot { // createNodeInfoMap obtains a list of pods and pivots that list into a map // where the keys are node names and the values are the aggregated information // for that node. -func createNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*schedulertypes.NodeInfo { - nodeNameToInfo := make(map[string]*schedulertypes.NodeInfo) +func createNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*framework.NodeInfo { + nodeNameToInfo := make(map[string]*framework.NodeInfo) for _, pod := range pods { nodeName := pod.Spec.NodeName if _, ok := nodeNameToInfo[nodeName]; !ok { - nodeNameToInfo[nodeName] = schedulertypes.NewNodeInfo() + nodeNameToInfo[nodeName] = framework.NewNodeInfo() } nodeNameToInfo[nodeName].AddPod(pod) } @@ -83,7 +82,7 @@ func createNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*schedulerty for _, node := range nodes { if _, ok := nodeNameToInfo[node.Name]; !ok { - nodeNameToInfo[node.Name] = schedulertypes.NewNodeInfo() + nodeNameToInfo[node.Name] = framework.NewNodeInfo() } nodeInfo := nodeNameToInfo[node.Name] nodeInfo.SetNode(node) @@ -93,12 +92,12 @@ func createNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*schedulerty } // getNodeImageStates returns the given node's image states based on the given imageExistence map. -func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.String) map[string]*schedulertypes.ImageStateSummary { - imageStates := make(map[string]*schedulertypes.ImageStateSummary) +func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.String) map[string]*framework.ImageStateSummary { + imageStates := make(map[string]*framework.ImageStateSummary) for _, image := range node.Status.Images { for _, name := range image.Names { - imageStates[name] = &schedulertypes.ImageStateSummary{ + imageStates[name] = &framework.ImageStateSummary{ Size: image.SizeBytes, NumNodes: len(imageExistenceMap[name]), } @@ -125,12 +124,12 @@ func createImageExistenceMap(nodes []*v1.Node) map[string]sets.String { } // Pods returns a PodLister -func (s *Snapshot) Pods() schedulerlisters.PodLister { +func (s *Snapshot) Pods() framework.PodLister { return podLister(s.nodeInfoList) } // NodeInfos returns a NodeInfoLister. -func (s *Snapshot) NodeInfos() schedulerlisters.NodeInfoLister { +func (s *Snapshot) NodeInfos() framework.NodeInfoLister { return s } @@ -139,7 +138,7 @@ func (s *Snapshot) NumNodes() int { return len(s.nodeInfoList) } -type podLister []*schedulertypes.NodeInfo +type podLister []*framework.NodeInfo // List returns the list of pods in the snapshot. func (p podLister) List(selector labels.Selector) ([]*v1.Pod, error) { @@ -148,7 +147,7 @@ func (p podLister) List(selector labels.Selector) ([]*v1.Pod, error) { } // FilteredList returns a filtered list of pods in the snapshot. -func (p podLister) FilteredList(filter schedulerlisters.PodFilter, selector labels.Selector) ([]*v1.Pod, error) { +func (p podLister) FilteredList(filter framework.PodFilter, selector labels.Selector) ([]*v1.Pod, error) { // podFilter is expected to return true for most or all of the pods. We // can avoid expensive array growth without wasting too much memory by // pre-allocating capacity. @@ -168,17 +167,17 @@ func (p podLister) FilteredList(filter schedulerlisters.PodFilter, selector labe } // List returns the list of nodes in the snapshot. -func (s *Snapshot) List() ([]*schedulertypes.NodeInfo, error) { +func (s *Snapshot) List() ([]*framework.NodeInfo, error) { return s.nodeInfoList, nil } // HavePodsWithAffinityList returns the list of nodes with at least one pods with inter-pod affinity -func (s *Snapshot) HavePodsWithAffinityList() ([]*schedulertypes.NodeInfo, error) { +func (s *Snapshot) HavePodsWithAffinityList() ([]*framework.NodeInfo, error) { return s.havePodsWithAffinityNodeInfoList, nil } // Get returns the NodeInfo of the given node name. -func (s *Snapshot) Get(nodeName string) (*schedulertypes.NodeInfo, error) { +func (s *Snapshot) Get(nodeName string) (*framework.NodeInfo, error) { if v, ok := s.nodeInfoMap[nodeName]; ok && v.Node() != nil { return v, nil } diff --git a/pkg/scheduler/internal/cache/snapshot_test.go b/pkg/scheduler/internal/cache/snapshot_test.go index 50ed79270ef..0aa927c2bbd 100644 --- a/pkg/scheduler/internal/cache/snapshot_test.go +++ b/pkg/scheduler/internal/cache/snapshot_test.go @@ -23,7 +23,7 @@ import ( "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) const mb int64 = 1024 * 1024 @@ -32,7 +32,7 @@ func TestGetNodeImageStates(t *testing.T) { tests := []struct { node *v1.Node imageExistenceMap map[string]sets.String - expected map[string]*schedulertypes.ImageStateSummary + expected map[string]*framework.ImageStateSummary }{ { node: &v1.Node{ @@ -58,7 +58,7 @@ func TestGetNodeImageStates(t *testing.T) { "gcr.io/10:v1": sets.NewString("node-0", "node-1"), "gcr.io/200:v1": sets.NewString("node-0"), }, - expected: map[string]*schedulertypes.ImageStateSummary{ + expected: map[string]*framework.ImageStateSummary{ "gcr.io/10:v1": { Size: int64(10 * mb), NumNodes: 2, @@ -78,7 +78,7 @@ func TestGetNodeImageStates(t *testing.T) { "gcr.io/10:v1": sets.NewString("node-1"), "gcr.io/200:v1": sets.NewString(), }, - expected: map[string]*schedulertypes.ImageStateSummary{}, + expected: map[string]*framework.ImageStateSummary{}, }, } diff --git a/pkg/scheduler/listers/BUILD b/pkg/scheduler/listers/BUILD deleted file mode 100644 index 98ed92b7d83..00000000000 --- a/pkg/scheduler/listers/BUILD +++ /dev/null @@ -1,31 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library") - -go_library( - name = "go_default_library", - srcs = ["listers.go"], - importpath = "k8s.io/kubernetes/pkg/scheduler/listers", - visibility = ["//visibility:public"], - deps = [ - "//pkg/scheduler/types:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", - "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [ - ":package-srcs", - "//pkg/scheduler/listers/fake:all-srcs", - ], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/pkg/scheduler/nodeinfo/BUILD b/pkg/scheduler/nodeinfo/BUILD index 214f7e79bd2..818dd307299 100644 --- a/pkg/scheduler/nodeinfo/BUILD +++ b/pkg/scheduler/nodeinfo/BUILD @@ -6,7 +6,7 @@ go_library( importpath = "k8s.io/kubernetes/pkg/scheduler/nodeinfo", visibility = ["//visibility:public"], deps = [ - "//pkg/scheduler/types:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", ], ) diff --git a/pkg/scheduler/nodeinfo/node_info.go b/pkg/scheduler/nodeinfo/node_info.go index dc5f17e31d8..42cd5031158 100644 --- a/pkg/scheduler/nodeinfo/node_info.go +++ b/pkg/scheduler/nodeinfo/node_info.go @@ -18,26 +18,26 @@ package nodeinfo import ( v1 "k8s.io/api/core/v1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" + framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" ) // TODO(#89528): This file defines temporary aliases of types used by kubelet. // Those will be removed and the underlying types defined in scheduler/types will be used directly. // NodeInfo is node level aggregated information. -type NodeInfo = schedulertypes.NodeInfo +type NodeInfo = framework.NodeInfo // Resource is a collection of compute resource. -type Resource = schedulertypes.Resource +type Resource = framework.Resource // NewResource creates a Resource from ResourceList func NewResource(rl v1.ResourceList) *Resource { - return schedulertypes.NewResource(rl) + return framework.NewResource(rl) } // NewNodeInfo returns a ready to use empty NodeInfo object. // If any pods are given in arguments, their information will be aggregated in // the returned object. func NewNodeInfo(pods ...*v1.Pod) *NodeInfo { - return schedulertypes.NewNodeInfo(pods...) + return framework.NewNodeInfo(pods...) } diff --git a/pkg/scheduler/scheduler_test.go b/pkg/scheduler/scheduler_test.go index 68888051220..d0379568084 100644 --- a/pkg/scheduler/scheduler_test.go +++ b/pkg/scheduler/scheduler_test.go @@ -60,7 +60,6 @@ import ( internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" "k8s.io/kubernetes/pkg/scheduler/profile" st "k8s.io/kubernetes/pkg/scheduler/testing" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" ) type fakePodConditionUpdater struct{} @@ -401,7 +400,7 @@ func (s *fakeNodeSelector) Name() string { return "FakeNodeSelector" } -func (s *fakeNodeSelector) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (s *fakeNodeSelector) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { if nodeInfo.Node().Name != s.NodeName { return framework.NewStatus(framework.UnschedulableAndUnresolvable) } diff --git a/pkg/scheduler/types/BUILD b/pkg/scheduler/types/BUILD deleted file mode 100644 index 8d69c5de56b..00000000000 --- a/pkg/scheduler/types/BUILD +++ /dev/null @@ -1,49 +0,0 @@ -load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "go_default_library", - srcs = [ - "host_ports.go", - "node_info.go", - ], - importpath = "k8s.io/kubernetes/pkg/scheduler/types", - visibility = ["//visibility:public"], - deps = [ - "//pkg/apis/core/v1/helper:go_default_library", - "//pkg/features:go_default_library", - "//pkg/scheduler/util:go_default_library", - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", - "//vendor/k8s.io/klog:go_default_library", - ], -) - -go_test( - name = "go_default_test", - srcs = [ - "host_ports_test.go", - "node_info_test.go", - ], - embed = [":go_default_library"], - deps = [ - "//staging/src/k8s.io/api/core/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", - ], -) - -filegroup( - name = "package-srcs", - srcs = glob(["**"]), - tags = ["automanaged"], - visibility = ["//visibility:private"], -) - -filegroup( - name = "all-srcs", - srcs = [":package-srcs"], - tags = ["automanaged"], - visibility = ["//visibility:public"], -) diff --git a/pkg/scheduler/types/host_ports.go b/pkg/scheduler/types/host_ports.go deleted file mode 100644 index 731f1df965b..00000000000 --- a/pkg/scheduler/types/host_ports.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "k8s.io/api/core/v1" -) - -// DefaultBindAllHostIP defines the default ip address used to bind to all host. -const DefaultBindAllHostIP = "0.0.0.0" - -// ProtocolPort represents a protocol port pair, e.g. tcp:80. -type ProtocolPort struct { - Protocol string - Port int32 -} - -// NewProtocolPort creates a ProtocolPort instance. -func NewProtocolPort(protocol string, port int32) *ProtocolPort { - pp := &ProtocolPort{ - Protocol: protocol, - Port: port, - } - - if len(pp.Protocol) == 0 { - pp.Protocol = string(v1.ProtocolTCP) - } - - return pp -} - -// HostPortInfo stores mapping from ip to a set of ProtocolPort -type HostPortInfo map[string]map[ProtocolPort]struct{} - -// Add adds (ip, protocol, port) to HostPortInfo -func (h HostPortInfo) Add(ip, protocol string, port int32) { - if port <= 0 { - return - } - - h.sanitize(&ip, &protocol) - - pp := NewProtocolPort(protocol, port) - if _, ok := h[ip]; !ok { - h[ip] = map[ProtocolPort]struct{}{ - *pp: {}, - } - return - } - - h[ip][*pp] = struct{}{} -} - -// Remove removes (ip, protocol, port) from HostPortInfo -func (h HostPortInfo) Remove(ip, protocol string, port int32) { - if port <= 0 { - return - } - - h.sanitize(&ip, &protocol) - - pp := NewProtocolPort(protocol, port) - if m, ok := h[ip]; ok { - delete(m, *pp) - if len(h[ip]) == 0 { - delete(h, ip) - } - } -} - -// Len returns the total number of (ip, protocol, port) tuple in HostPortInfo -func (h HostPortInfo) Len() int { - length := 0 - for _, m := range h { - length += len(m) - } - return length -} - -// CheckConflict checks if the input (ip, protocol, port) conflicts with the existing -// ones in HostPortInfo. -func (h HostPortInfo) CheckConflict(ip, protocol string, port int32) bool { - if port <= 0 { - return false - } - - h.sanitize(&ip, &protocol) - - pp := NewProtocolPort(protocol, port) - - // If ip is 0.0.0.0 check all IP's (protocol, port) pair - if ip == DefaultBindAllHostIP { - for _, m := range h { - if _, ok := m[*pp]; ok { - return true - } - } - return false - } - - // If ip isn't 0.0.0.0, only check IP and 0.0.0.0's (protocol, port) pair - for _, key := range []string{DefaultBindAllHostIP, ip} { - if m, ok := h[key]; ok { - if _, ok2 := m[*pp]; ok2 { - return true - } - } - } - - return false -} - -// sanitize the parameters -func (h HostPortInfo) sanitize(ip, protocol *string) { - if len(*ip) == 0 { - *ip = DefaultBindAllHostIP - } - if len(*protocol) == 0 { - *protocol = string(v1.ProtocolTCP) - } -} diff --git a/pkg/scheduler/types/host_ports_test.go b/pkg/scheduler/types/host_ports_test.go deleted file mode 100644 index 04507b94a84..00000000000 --- a/pkg/scheduler/types/host_ports_test.go +++ /dev/null @@ -1,231 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package types - -import ( - "testing" -) - -type hostPortInfoParam struct { - protocol, ip string - port int32 -} - -func TestHostPortInfo_AddRemove(t *testing.T) { - tests := []struct { - desc string - added []hostPortInfoParam - removed []hostPortInfoParam - length int - }{ - { - desc: "normal add case", - added: []hostPortInfoParam{ - {"TCP", "127.0.0.1", 79}, - {"UDP", "127.0.0.1", 80}, - {"TCP", "127.0.0.1", 81}, - {"TCP", "127.0.0.1", 82}, - // this might not make sense in real case, but the struct doesn't forbid it. - {"TCP", "0.0.0.0", 79}, - {"UDP", "0.0.0.0", 80}, - {"TCP", "0.0.0.0", 81}, - {"TCP", "0.0.0.0", 82}, - {"TCP", "0.0.0.0", 0}, - {"TCP", "0.0.0.0", -1}, - }, - length: 8, - }, - { - desc: "empty ip and protocol add should work", - added: []hostPortInfoParam{ - {"", "127.0.0.1", 79}, - {"UDP", "127.0.0.1", 80}, - {"", "127.0.0.1", 81}, - {"", "127.0.0.1", 82}, - {"", "", 79}, - {"UDP", "", 80}, - {"", "", 81}, - {"", "", 82}, - {"", "", 0}, - {"", "", -1}, - }, - length: 8, - }, - { - desc: "normal remove case", - added: []hostPortInfoParam{ - {"TCP", "127.0.0.1", 79}, - {"UDP", "127.0.0.1", 80}, - {"TCP", "127.0.0.1", 81}, - {"TCP", "127.0.0.1", 82}, - {"TCP", "0.0.0.0", 79}, - {"UDP", "0.0.0.0", 80}, - {"TCP", "0.0.0.0", 81}, - {"TCP", "0.0.0.0", 82}, - }, - removed: []hostPortInfoParam{ - {"TCP", "127.0.0.1", 79}, - {"UDP", "127.0.0.1", 80}, - {"TCP", "127.0.0.1", 81}, - {"TCP", "127.0.0.1", 82}, - {"TCP", "0.0.0.0", 79}, - {"UDP", "0.0.0.0", 80}, - {"TCP", "0.0.0.0", 81}, - {"TCP", "0.0.0.0", 82}, - }, - length: 0, - }, - { - desc: "empty ip and protocol remove should work", - added: []hostPortInfoParam{ - {"TCP", "127.0.0.1", 79}, - {"UDP", "127.0.0.1", 80}, - {"TCP", "127.0.0.1", 81}, - {"TCP", "127.0.0.1", 82}, - {"TCP", "0.0.0.0", 79}, - {"UDP", "0.0.0.0", 80}, - {"TCP", "0.0.0.0", 81}, - {"TCP", "0.0.0.0", 82}, - }, - removed: []hostPortInfoParam{ - {"", "127.0.0.1", 79}, - {"", "127.0.0.1", 81}, - {"", "127.0.0.1", 82}, - {"UDP", "127.0.0.1", 80}, - {"", "", 79}, - {"", "", 81}, - {"", "", 82}, - {"UDP", "", 80}, - }, - length: 0, - }, - } - - for _, test := range tests { - hp := make(HostPortInfo) - for _, param := range test.added { - hp.Add(param.ip, param.protocol, param.port) - } - for _, param := range test.removed { - hp.Remove(param.ip, param.protocol, param.port) - } - if hp.Len() != test.length { - t.Errorf("%v failed: expect length %d; got %d", test.desc, test.length, hp.Len()) - t.Error(hp) - } - } -} - -func TestHostPortInfo_Check(t *testing.T) { - tests := []struct { - desc string - added []hostPortInfoParam - check hostPortInfoParam - expect bool - }{ - { - desc: "empty check should check 0.0.0.0 and TCP", - added: []hostPortInfoParam{ - {"TCP", "127.0.0.1", 80}, - }, - check: hostPortInfoParam{"", "", 81}, - expect: false, - }, - { - desc: "empty check should check 0.0.0.0 and TCP (conflicted)", - added: []hostPortInfoParam{ - {"TCP", "127.0.0.1", 80}, - }, - check: hostPortInfoParam{"", "", 80}, - expect: true, - }, - { - desc: "empty port check should pass", - added: []hostPortInfoParam{ - {"TCP", "127.0.0.1", 80}, - }, - check: hostPortInfoParam{"", "", 0}, - expect: false, - }, - { - desc: "0.0.0.0 should check all registered IPs", - added: []hostPortInfoParam{ - {"TCP", "127.0.0.1", 80}, - }, - check: hostPortInfoParam{"TCP", "0.0.0.0", 80}, - expect: true, - }, - { - desc: "0.0.0.0 with different protocol should be allowed", - added: []hostPortInfoParam{ - {"UDP", "127.0.0.1", 80}, - }, - check: hostPortInfoParam{"TCP", "0.0.0.0", 80}, - expect: false, - }, - { - desc: "0.0.0.0 with different port should be allowed", - added: []hostPortInfoParam{ - {"TCP", "127.0.0.1", 79}, - {"TCP", "127.0.0.1", 81}, - {"TCP", "127.0.0.1", 82}, - }, - check: hostPortInfoParam{"TCP", "0.0.0.0", 80}, - expect: false, - }, - { - desc: "normal ip should check all registered 0.0.0.0", - added: []hostPortInfoParam{ - {"TCP", "0.0.0.0", 80}, - }, - check: hostPortInfoParam{"TCP", "127.0.0.1", 80}, - expect: true, - }, - { - desc: "normal ip with different port/protocol should be allowed (0.0.0.0)", - added: []hostPortInfoParam{ - {"TCP", "0.0.0.0", 79}, - {"UDP", "0.0.0.0", 80}, - {"TCP", "0.0.0.0", 81}, - {"TCP", "0.0.0.0", 82}, - }, - check: hostPortInfoParam{"TCP", "127.0.0.1", 80}, - expect: false, - }, - { - desc: "normal ip with different port/protocol should be allowed", - added: []hostPortInfoParam{ - {"TCP", "127.0.0.1", 79}, - {"UDP", "127.0.0.1", 80}, - {"TCP", "127.0.0.1", 81}, - {"TCP", "127.0.0.1", 82}, - }, - check: hostPortInfoParam{"TCP", "127.0.0.1", 80}, - expect: false, - }, - } - - for _, test := range tests { - hp := make(HostPortInfo) - for _, param := range test.added { - hp.Add(param.ip, param.protocol, param.port) - } - if hp.CheckConflict(test.check.ip, test.check.protocol, test.check.port) != test.expect { - t.Errorf("%v failed, expected %t; got %t", test.desc, test.expect, !test.expect) - } - } -} diff --git a/test/e2e/apps/BUILD b/test/e2e/apps/BUILD index 251400af485..76d900de255 100644 --- a/test/e2e/apps/BUILD +++ b/test/e2e/apps/BUILD @@ -36,7 +36,7 @@ go_library( "//pkg/controller/replicaset:go_default_library", "//pkg/controller/replication:go_default_library", "//pkg/master/ports:go_default_library", - "//pkg/scheduler/types:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/autoscaling/v1:go_default_library", "//staging/src/k8s.io/api/batch/v1:go_default_library", diff --git a/test/e2e/apps/daemon_set.go b/test/e2e/apps/daemon_set.go index 22e16fb6d22..123b2a763e2 100644 --- a/test/e2e/apps/daemon_set.go +++ b/test/e2e/apps/daemon_set.go @@ -36,7 +36,7 @@ import ( podutil "k8s.io/kubernetes/pkg/api/v1/pod" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" "k8s.io/kubernetes/pkg/controller/daemon" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" + schedfwk "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" "k8s.io/kubernetes/test/e2e/framework" e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2eresource "k8s.io/kubernetes/test/e2e/framework/resource" @@ -688,7 +688,7 @@ func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]st // canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node func canScheduleOnNode(node v1.Node, ds *appsv1.DaemonSet) bool { newPod := daemon.NewPod(ds, node.Name) - nodeInfo := schedulertypes.NewNodeInfo() + nodeInfo := schedfwk.NewNodeInfo() nodeInfo.SetNode(&node) taints, err := nodeInfo.Taints() if err != nil { diff --git a/test/e2e/framework/.import-restrictions b/test/e2e/framework/.import-restrictions index d9b2caf7643..7ecf913d6ec 100644 --- a/test/e2e/framework/.import-restrictions +++ b/test/e2e/framework/.import-restrictions @@ -191,7 +191,6 @@ "k8s.io/kubernetes/pkg/scheduler/listers", "k8s.io/kubernetes/pkg/scheduler/metrics", "k8s.io/kubernetes/pkg/scheduler/nodeinfo", - "k8s.io/kubernetes/pkg/scheduler/types", "k8s.io/kubernetes/pkg/scheduler/util", "k8s.io/kubernetes/pkg/scheduler/volumebinder", "k8s.io/kubernetes/pkg/security/apparmor", diff --git a/test/e2e/framework/node/BUILD b/test/e2e/framework/node/BUILD index 3279bdc4145..a4a2d4cf032 100644 --- a/test/e2e/framework/node/BUILD +++ b/test/e2e/framework/node/BUILD @@ -10,7 +10,7 @@ go_library( importpath = "k8s.io/kubernetes/test/e2e/framework/node", visibility = ["//visibility:public"], deps = [ - "//pkg/scheduler/types:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", diff --git a/test/e2e/framework/node/resource.go b/test/e2e/framework/node/resource.go index 52bcd60816b..d8f05ea7264 100644 --- a/test/e2e/framework/node/resource.go +++ b/test/e2e/framework/node/resource.go @@ -30,7 +30,7 @@ import ( "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/sets" clientset "k8s.io/client-go/kubernetes" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" + schedfwk "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" e2elog "k8s.io/kubernetes/test/e2e/framework/log" "k8s.io/kubernetes/test/e2e/system" ) @@ -391,7 +391,7 @@ func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) boo }, } - nodeInfo := schedulertypes.NewNodeInfo() + nodeInfo := schedfwk.NewNodeInfo() // Simple lookup for nonblocking taints based on comma-delimited list. nonblockingTaintsMap := map[string]struct{}{} diff --git a/test/integration/framework/BUILD b/test/integration/framework/BUILD index 367243d8967..eea9f1b2292 100644 --- a/test/integration/framework/BUILD +++ b/test/integration/framework/BUILD @@ -44,7 +44,7 @@ go_library( "//pkg/kubeapiserver:go_default_library", "//pkg/kubelet/client:go_default_library", "//pkg/master:go_default_library", - "//pkg/scheduler/types:go_default_library", + "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/util/env:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", diff --git a/test/integration/framework/util.go b/test/integration/framework/util.go index 79704e74c80..089c089ff34 100644 --- a/test/integration/framework/util.go +++ b/test/integration/framework/util.go @@ -34,7 +34,7 @@ import ( "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" + schedfwk "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" testutils "k8s.io/kubernetes/test/utils" ) @@ -250,7 +250,7 @@ func isNodeUntainted(node *v1.Node) bool { }, } - nodeInfo := schedulertypes.NewNodeInfo() + nodeInfo := schedfwk.NewNodeInfo() // Simple lookup for nonblocking taints based on comma-delimited list. nonblockingTaintsMap := map[string]struct{}{} diff --git a/test/integration/scheduler/BUILD b/test/integration/scheduler/BUILD index c788a3ff737..515a708bf01 100644 --- a/test/integration/scheduler/BUILD +++ b/test/integration/scheduler/BUILD @@ -33,7 +33,6 @@ go_test( "//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/profile:go_default_library", "//pkg/scheduler/testing:go_default_library", - "//pkg/scheduler/types:go_default_library", "//plugin/pkg/admission/podtolerationrestriction:go_default_library", "//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library", "//plugin/pkg/admission/priority:go_default_library", diff --git a/test/integration/scheduler/framework_test.go b/test/integration/scheduler/framework_test.go index 53b19a121fb..63f327a2c9b 100644 --- a/test/integration/scheduler/framework_test.go +++ b/test/integration/scheduler/framework_test.go @@ -32,7 +32,6 @@ import ( schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" testutils "k8s.io/kubernetes/test/integration/util" ) @@ -214,7 +213,7 @@ func (fp *FilterPlugin) reset() { // Filter is a test function that returns an error or nil, depending on the // value of "failFilter". -func (fp *FilterPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { +func (fp *FilterPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { fp.numFilterCalled++ if fp.failFilter { diff --git a/test/integration/scheduler/preemption_test.go b/test/integration/scheduler/preemption_test.go index c2c087f8b30..ec544e5cc48 100644 --- a/test/integration/scheduler/preemption_test.go +++ b/test/integration/scheduler/preemption_test.go @@ -43,7 +43,6 @@ import ( "k8s.io/kubernetes/pkg/scheduler" schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" - schedulertypes "k8s.io/kubernetes/pkg/scheduler/types" "k8s.io/kubernetes/plugin/pkg/admission/priority" testutils "k8s.io/kubernetes/test/integration/util" utils "k8s.io/kubernetes/test/utils" @@ -84,7 +83,7 @@ func (fp *tokenFilter) Name() string { } func (fp *tokenFilter) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, - nodeInfo *schedulertypes.NodeInfo) *framework.Status { + nodeInfo *framework.NodeInfo) *framework.Status { if fp.Tokens > 0 { fp.Tokens-- return nil @@ -101,13 +100,13 @@ func (fp *tokenFilter) PreFilter(ctx context.Context, state *framework.CycleStat } func (fp *tokenFilter) AddPod(ctx context.Context, state *framework.CycleState, podToSchedule *v1.Pod, - podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { + podToAdd *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { fp.Tokens-- return nil } func (fp *tokenFilter) RemovePod(ctx context.Context, state *framework.CycleState, podToSchedule *v1.Pod, - podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status { + podToRemove *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status { fp.Tokens++ return nil }