Merge pull request #89912 from ahg-g/ahg-fwk

Move scheduler's NodeInfo and Listers types to framework pkg
This commit is contained in:
Kubernetes Prow Robot 2020-04-07 12:49:57 -07:00 committed by GitHub
commit 007f7ae7dc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
98 changed files with 837 additions and 1053 deletions

View File

@ -80,10 +80,8 @@ go_test(
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/internal/cache/fake:go_default_library",
"//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/profile:go_default_library",
"//pkg/scheduler/testing:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/events/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
@ -127,12 +125,10 @@ filegroup(
"//pkg/scheduler/internal/heap:all-srcs",
"//pkg/scheduler/internal/parallelize:all-srcs",
"//pkg/scheduler/internal/queue:all-srcs",
"//pkg/scheduler/listers:all-srcs",
"//pkg/scheduler/metrics:all-srcs",
"//pkg/scheduler/nodeinfo:all-srcs",
"//pkg/scheduler/profile:all-srcs",
"//pkg/scheduler/testing:all-srcs",
"//pkg/scheduler/types:all-srcs",
"//pkg/scheduler/util:all-srcs",
],
tags = ["automanaged"],

View File

@ -15,10 +15,8 @@ go_library(
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/internal/parallelize:go_default_library",
"//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/metrics:go_default_library",
"//pkg/scheduler/profile:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
@ -60,13 +58,11 @@ go_test(
"//pkg/scheduler/framework/plugins/volumerestrictions:go_default_library",
"//pkg/scheduler/framework/plugins/volumezone:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/framework/v1alpha1/fake:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/listers/fake:go_default_library",
"//pkg/scheduler/profile:go_default_library",
"//pkg/scheduler/testing:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library",

View File

@ -30,8 +30,7 @@ import (
restclient "k8s.io/client-go/rest"
extenderv1 "k8s.io/kube-scheduler/extender/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/listers"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
const (
@ -77,7 +76,7 @@ type SchedulerExtender interface {
ProcessPreemption(
pod *v1.Pod,
nodeToVictims map[*v1.Node]*extenderv1.Victims,
nodeInfos listers.NodeInfoLister) (map[*v1.Node]*extenderv1.Victims, error)
nodeInfos framework.NodeInfoLister) (map[*v1.Node]*extenderv1.Victims, error)
// SupportsPreemption returns if the scheduler extender support preemption or not.
SupportsPreemption() bool
@ -214,7 +213,7 @@ func (h *HTTPExtender) SupportsPreemption() bool {
func (h *HTTPExtender) ProcessPreemption(
pod *v1.Pod,
nodeToVictims map[*v1.Node]*extenderv1.Victims,
nodeInfos listers.NodeInfoLister,
nodeInfos framework.NodeInfoLister,
) (map[*v1.Node]*extenderv1.Victims, error) {
var (
result extenderv1.ExtenderPreemptionResult
@ -258,7 +257,7 @@ func (h *HTTPExtender) ProcessPreemption(
// such as UIDs and names, to object pointers.
func (h *HTTPExtender) convertToNodeToVictims(
nodeNameToMetaVictims map[string]*extenderv1.MetaVictims,
nodeInfos listers.NodeInfoLister,
nodeInfos framework.NodeInfoLister,
) (map[*v1.Node]*extenderv1.Victims, error) {
nodeToVictims := map[*v1.Node]*extenderv1.Victims{}
for nodeName, metaVictims := range nodeNameToMetaVictims {
@ -287,7 +286,7 @@ func (h *HTTPExtender) convertToNodeToVictims(
// and extender, i.e. when the pod is not found in nodeInfo.Pods.
func (h *HTTPExtender) convertPodUIDToPod(
metaPod *extenderv1.MetaPod,
nodeInfo *schedulertypes.NodeInfo) (*v1.Pod, error) {
nodeInfo *framework.NodeInfo) (*v1.Pod, error) {
for _, pod := range nodeInfo.Pods() {
if string(pod.UID) == metaPod.UID {
return pod, nil

View File

@ -40,10 +40,8 @@ import (
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
"k8s.io/kubernetes/pkg/scheduler/listers"
"k8s.io/kubernetes/pkg/scheduler/profile"
st "k8s.io/kubernetes/pkg/scheduler/testing"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
"k8s.io/kubernetes/pkg/scheduler/util"
)
@ -143,7 +141,7 @@ type FakeExtender struct {
ignorable bool
// Cached node information for fake extender
cachedNodeNameToInfo map[string]*schedulertypes.NodeInfo
cachedNodeNameToInfo map[string]*framework.NodeInfo
}
func (f *FakeExtender) Name() string {
@ -162,7 +160,7 @@ func (f *FakeExtender) SupportsPreemption() bool {
func (f *FakeExtender) ProcessPreemption(
pod *v1.Pod,
nodeToVictims map[*v1.Node]*extenderv1.Victims,
nodeInfos listers.NodeInfoLister,
nodeInfos framework.NodeInfoLister,
) (map[*v1.Node]*extenderv1.Victims, error) {
nodeToVictimsCopy := map[*v1.Node]*extenderv1.Victims{}
// We don't want to change the original nodeToVictims

View File

@ -41,10 +41,8 @@ import (
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
"k8s.io/kubernetes/pkg/scheduler/internal/parallelize"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
"k8s.io/kubernetes/pkg/scheduler/listers"
"k8s.io/kubernetes/pkg/scheduler/metrics"
"k8s.io/kubernetes/pkg/scheduler/profile"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
"k8s.io/kubernetes/pkg/scheduler/util"
utiltrace "k8s.io/utils/trace"
)
@ -524,7 +522,7 @@ func (g *genericScheduler) findNodesThatPassExtenders(pod *v1.Pod, filtered []*v
// addNominatedPods adds pods with equal or greater priority which are nominated
// to run on the node. It returns 1) whether any pod was added, 2) augmented cycleState,
// 3) augmented nodeInfo.
func (g *genericScheduler) addNominatedPods(ctx context.Context, prof *profile.Profile, pod *v1.Pod, state *framework.CycleState, nodeInfo *schedulertypes.NodeInfo) (bool, *framework.CycleState, *schedulertypes.NodeInfo, error) {
func (g *genericScheduler) addNominatedPods(ctx context.Context, prof *profile.Profile, pod *v1.Pod, state *framework.CycleState, nodeInfo *framework.NodeInfo) (bool, *framework.CycleState, *framework.NodeInfo, error) {
if g.schedulingQueue == nil || nodeInfo == nil || nodeInfo.Node() == nil {
// This may happen only in tests.
return false, state, nodeInfo, nil
@ -564,7 +562,7 @@ func (g *genericScheduler) podPassesFiltersOnNode(
prof *profile.Profile,
state *framework.CycleState,
pod *v1.Pod,
info *schedulertypes.NodeInfo,
info *framework.NodeInfo,
) (bool, *framework.Status, error) {
var status *framework.Status
@ -856,7 +854,7 @@ func (g *genericScheduler) selectNodesForPreemption(
prof *profile.Profile,
state *framework.CycleState,
pod *v1.Pod,
potentialNodes []*schedulertypes.NodeInfo,
potentialNodes []*framework.NodeInfo,
pdbs []*policy.PodDisruptionBudget,
) (map[*v1.Node]*extenderv1.Victims, error) {
nodeToVictims := map[*v1.Node]*extenderv1.Victims{}
@ -946,7 +944,7 @@ func (g *genericScheduler) selectVictimsOnNode(
prof *profile.Profile,
state *framework.CycleState,
pod *v1.Pod,
nodeInfo *schedulertypes.NodeInfo,
nodeInfo *framework.NodeInfo,
pdbs []*policy.PodDisruptionBudget,
) ([]*v1.Pod, int, bool) {
var potentialVictims []*v1.Pod
@ -1034,8 +1032,8 @@ func (g *genericScheduler) selectVictimsOnNode(
// nodesWherePreemptionMightHelp returns a list of nodes with failed predicates
// that may be satisfied by removing pods from the node.
func nodesWherePreemptionMightHelp(nodes []*schedulertypes.NodeInfo, fitErr *FitError) []*schedulertypes.NodeInfo {
var potentialNodes []*schedulertypes.NodeInfo
func nodesWherePreemptionMightHelp(nodes []*framework.NodeInfo, fitErr *FitError) []*framework.NodeInfo {
var potentialNodes []*framework.NodeInfo
for _, node := range nodes {
name := node.Node().Name
// We reply on the status by each plugin - 'Unschedulable' or 'UnschedulableAndUnresolvable'
@ -1055,7 +1053,7 @@ func nodesWherePreemptionMightHelp(nodes []*schedulertypes.NodeInfo, fitErr *Fit
// considered for preemption.
// We look at the node that is nominated for this pod and as long as there are
// terminating pods on the node, we don't consider this for preempting more pods.
func podEligibleToPreemptOthers(pod *v1.Pod, nodeInfos listers.NodeInfoLister, enableNonPreempting bool) bool {
func podEligibleToPreemptOthers(pod *v1.Pod, nodeInfos framework.NodeInfoLister, enableNonPreempting bool) bool {
if enableNonPreempting && pod.Spec.PreemptionPolicy != nil && *pod.Spec.PreemptionPolicy == v1.PreemptNever {
klog.V(5).Infof("Pod %v/%v is not eligible for preemption because it has a preemptionPolicy of %v", pod.Namespace, pod.Name, v1.PreemptNever)
return false

View File

@ -54,12 +54,11 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumerestrictions"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/volumezone"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/fake"
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
"k8s.io/kubernetes/pkg/scheduler/profile"
st "k8s.io/kubernetes/pkg/scheduler/testing"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)
@ -77,7 +76,7 @@ func (pl *trueFilterPlugin) Name() string {
}
// Filter invoked at the filter extension point.
func (pl *trueFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *trueFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
return nil
}
@ -94,7 +93,7 @@ func (pl *falseFilterPlugin) Name() string {
}
// Filter invoked at the filter extension point.
func (pl *falseFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *falseFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
return framework.NewStatus(framework.Unschedulable, ErrReasonFake)
}
@ -111,7 +110,7 @@ func (pl *matchFilterPlugin) Name() string {
}
// Filter invoked at the filter extension point.
func (pl *matchFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *matchFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
node := nodeInfo.Node()
if node == nil {
return framework.NewStatus(framework.Error, "node not found")
@ -135,7 +134,7 @@ func (pl *noPodsFilterPlugin) Name() string {
}
// Filter invoked at the filter extension point.
func (pl *noPodsFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *noPodsFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
if len(nodeInfo.Pods()) == 0 {
return nil
}
@ -160,7 +159,7 @@ func (pl *fakeFilterPlugin) Name() string {
}
// Filter invoked at the filter extension point.
func (pl *fakeFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *fakeFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
atomic.AddInt32(&pl.numFilterCalled, 1)
if returnCode, ok := pl.failedNodeReturnCodeMap[nodeInfo.Node().Name]; ok {
@ -808,7 +807,7 @@ func TestGenericScheduler(t *testing.T) {
var pvcs []v1.PersistentVolumeClaim
pvcs = append(pvcs, test.pvcs...)
pvcLister := fakelisters.PersistentVolumeClaimLister(pvcs)
pvcLister := fakeframework.PersistentVolumeClaimLister(pvcs)
scheduler := NewGenericScheduler(
cache,
@ -2028,9 +2027,9 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
fitErr := FitError{
FilteredNodesStatuses: test.nodesStatuses,
}
var nodeInfos []*schedulertypes.NodeInfo
var nodeInfos []*framework.NodeInfo
for _, n := range makeNodeList(nodeNames) {
ni := schedulertypes.NewNodeInfo()
ni := framework.NewNodeInfo()
ni.SetNode(n)
nodeInfos = append(nodeInfos, ni)
}
@ -2371,7 +2370,7 @@ func TestPreempt(t *testing.T) {
for _, pod := range test.pods {
cache.AddPod(pod)
}
cachedNodeInfoMap := map[string]*schedulertypes.NodeInfo{}
cachedNodeInfoMap := map[string]*framework.NodeInfo{}
nodeNames := defaultNodeNames
if len(test.nodeNames) != 0 {
nodeNames = test.nodeNames
@ -2391,7 +2390,7 @@ func TestPreempt(t *testing.T) {
nodeNames[i] = node.Name
// Set nodeInfo to extenders to mock extenders' cache for preemption.
cachedNodeInfo := schedulertypes.NewNodeInfo()
cachedNodeInfo := framework.NewNodeInfo()
cachedNodeInfo.SetNode(node)
cachedNodeInfoMap[node.Name] = cachedNodeInfo
}
@ -2570,8 +2569,8 @@ func TestFairEvaluationForNodes(t *testing.T) {
}
}
func nodesToNodeInfos(nodes []*v1.Node, snapshot *internalcache.Snapshot) ([]*schedulertypes.NodeInfo, error) {
var nodeInfos []*schedulertypes.NodeInfo
func nodesToNodeInfos(nodes []*v1.Node, snapshot *internalcache.Snapshot) ([]*framework.NodeInfo, error) {
var nodeInfos []*framework.NodeInfo
for _, n := range nodes {
nodeInfo, err := snapshot.NodeInfos().Get(n.Name)
if err != nil {

View File

@ -53,9 +53,7 @@ import (
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
"k8s.io/kubernetes/pkg/scheduler/listers"
"k8s.io/kubernetes/pkg/scheduler/profile"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
const (
@ -538,7 +536,7 @@ func (f *fakeExtender) IsIgnorable() bool {
func (f *fakeExtender) ProcessPreemption(
pod *v1.Pod,
nodeToVictims map[*v1.Node]*extenderv1.Victims,
nodeInfos listers.NodeInfoLister,
nodeInfos framework.NodeInfoLister,
) (map[*v1.Node]*extenderv1.Victims, error) {
return nil, nil
}
@ -593,6 +591,6 @@ func (t *TestPlugin) ScoreExtensions() framework.ScoreExtensions {
return nil
}
func (t *TestPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (t *TestPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
return nil
}

View File

@ -8,7 +8,6 @@ go_library(
deps = [
"//pkg/scheduler/framework/plugins/helper:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/util/node:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",

View File

@ -25,7 +25,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
utilnode "k8s.io/kubernetes/pkg/util/node"
)
@ -196,7 +195,7 @@ func New(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin
}
// countMatchingPods counts pods based on namespace and matching all selectors
func countMatchingPods(namespace string, selector labels.Selector, nodeInfo *schedulertypes.NodeInfo) int {
func countMatchingPods(namespace string, selector labels.Selector, nodeInfo *framework.NodeInfo) int {
if len(nodeInfo.Pods()) == 0 || selector.Empty() {
return 0
}

View File

@ -7,7 +7,6 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/util/parsers:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",

View File

@ -24,7 +24,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
"k8s.io/kubernetes/pkg/util/parsers"
)
@ -94,7 +93,7 @@ func calculatePriority(sumScores int64) int64 {
// sumImageScores returns the sum of image scores of all the containers that are already on the node.
// Each image receives a raw score of its size, scaled by scaledImageScore. The raw scores are later used to calculate
// the final score. Note that the init containers are not considered for it's rare for users to deploy huge init containers.
func sumImageScores(nodeInfo *schedulertypes.NodeInfo, containers []v1.Container, totalNumNodes int) int64 {
func sumImageScores(nodeInfo *framework.NodeInfo, containers []v1.Container, totalNumNodes int) int64 {
var sum int64
imageStates := nodeInfo.ImageStates()
@ -111,7 +110,7 @@ func sumImageScores(nodeInfo *schedulertypes.NodeInfo, containers []v1.Container
// The size of the image is used as the base score, scaled by a factor which considers how much nodes the image has "spread" to.
// This heuristic aims to mitigate the undesirable "node heating problem", i.e., pods get assigned to the same or
// a few nodes due to image locality.
func scaledImageScore(imageState *schedulertypes.ImageStateSummary, totalNumNodes int) int64 {
func scaledImageScore(imageState *framework.ImageStateSummary, totalNumNodes int) int64 {
spread := float64(imageState.NumNodes) / float64(totalNumNodes)
return int64(float64(imageState.Size) * spread)
}

View File

@ -12,9 +12,6 @@ go_library(
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/parallelize:go_default_library",
"//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
@ -37,7 +34,6 @@ go_test(
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",

View File

@ -28,8 +28,6 @@ import (
"k8s.io/klog"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/parallelize"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)
@ -210,7 +208,7 @@ func podMatchesAllAffinityTerms(pod *v1.Pod, terms []*affinityTerm) bool {
// getTPMapMatchingExistingAntiAffinity calculates the following for each existing pod on each node:
// (1) Whether it has PodAntiAffinity
// (2) Whether any AffinityTerm matches the incoming pod
func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, allNodes []*nodeinfo.NodeInfo) (topologyToMatchedTermCount, error) {
func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, allNodes []*framework.NodeInfo) (topologyToMatchedTermCount, error) {
errCh := parallelize.NewErrorChannel()
var lock sync.Mutex
topologyMap := make(topologyToMatchedTermCount)
@ -254,7 +252,7 @@ func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, allNodes []*nodeinfo.Node
// It returns a topologyToMatchedTermCount that are checked later by the affinity
// predicate. With this topologyToMatchedTermCount available, the affinity predicate does not
// need to check all the pods in the cluster.
func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, allNodes []*schedulertypes.NodeInfo) (topologyToMatchedTermCount, topologyToMatchedTermCount, error) {
func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, allNodes []*framework.NodeInfo) (topologyToMatchedTermCount, topologyToMatchedTermCount, error) {
topologyPairsAffinityPodsMap := make(topologyToMatchedTermCount)
topologyToMatchedExistingAntiAffinityTerms := make(topologyToMatchedTermCount)
affinity := pod.Spec.Affinity
@ -329,8 +327,8 @@ func targetPodMatchesAffinityOfPod(pod, targetPod *v1.Pod) bool {
// PreFilter invoked at the prefilter extension point.
func (pl *InterPodAffinity) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) *framework.Status {
var allNodes []*schedulertypes.NodeInfo
var havePodsWithAffinityNodes []*schedulertypes.NodeInfo
var allNodes []*framework.NodeInfo
var havePodsWithAffinityNodes []*framework.NodeInfo
var err error
if allNodes, err = pl.sharedLister.NodeInfos().List(); err != nil {
return framework.NewStatus(framework.Error, fmt.Sprintf("failed to list NodeInfos: %v", err))
@ -367,7 +365,7 @@ func (pl *InterPodAffinity) PreFilterExtensions() framework.PreFilterExtensions
}
// AddPod from pre-computed data in cycleState.
func (pl *InterPodAffinity) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *InterPodAffinity) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
state, err := getPreFilterState(cycleState)
if err != nil {
return framework.NewStatus(framework.Error, err.Error())
@ -377,7 +375,7 @@ func (pl *InterPodAffinity) AddPod(ctx context.Context, cycleState *framework.Cy
}
// RemovePod from pre-computed data in cycleState.
func (pl *InterPodAffinity) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *InterPodAffinity) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
state, err := getPreFilterState(cycleState)
if err != nil {
return framework.NewStatus(framework.Error, err.Error())
@ -402,7 +400,7 @@ func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error
// Checks if scheduling the pod onto this node would break any anti-affinity
// terms indicated by the existing pods.
func (pl *InterPodAffinity) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, state *preFilterState, nodeInfo *schedulertypes.NodeInfo) (bool, error) {
func (pl *InterPodAffinity) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, state *preFilterState, nodeInfo *framework.NodeInfo) (bool, error) {
node := nodeInfo.Node()
topologyMap := state.topologyToMatchedExistingAntiAffinityTerms
@ -418,7 +416,7 @@ func (pl *InterPodAffinity) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, state
}
// nodeMatchesAllTopologyTerms checks whether "nodeInfo" matches topology of all the "terms" for the given "pod".
func nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPairs topologyToMatchedTermCount, nodeInfo *schedulertypes.NodeInfo, terms []v1.PodAffinityTerm) bool {
func nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPairs topologyToMatchedTermCount, nodeInfo *framework.NodeInfo, terms []v1.PodAffinityTerm) bool {
node := nodeInfo.Node()
for _, term := range terms {
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
@ -435,7 +433,7 @@ func nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPairs topologyToMatchedTer
// nodeMatchesAnyTopologyTerm checks whether "nodeInfo" matches
// topology of any "term" for the given "pod".
func nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPairs topologyToMatchedTermCount, nodeInfo *schedulertypes.NodeInfo, terms []v1.PodAffinityTerm) bool {
func nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPairs topologyToMatchedTermCount, nodeInfo *framework.NodeInfo, terms []v1.PodAffinityTerm) bool {
node := nodeInfo.Node()
for _, term := range terms {
if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
@ -478,7 +476,7 @@ func getMatchingAntiAffinityTopologyPairsOfPod(newPod *v1.Pod, existingPod *v1.P
// This function returns two boolean flags. The first boolean flag indicates whether the pod matches affinity rules
// or not. The second boolean flag indicates if the pod matches anti-affinity rules.
func (pl *InterPodAffinity) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
state *preFilterState, nodeInfo *schedulertypes.NodeInfo,
state *preFilterState, nodeInfo *framework.NodeInfo,
affinity *v1.Affinity) (bool, bool, error) {
node := nodeInfo.Node()
if node == nil {
@ -514,7 +512,7 @@ func (pl *InterPodAffinity) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
// Filter invoked at the filter extension point.
// It checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration.
func (pl *InterPodAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *InterPodAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
state, err := getPreFilterState(cycleState)
if err != nil {
return framework.NewStatus(framework.Error, err.Error())

View File

@ -25,7 +25,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
var (
@ -1636,7 +1635,7 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) {
func TestPreFilterDisabled(t *testing.T) {
pod := &v1.Pod{}
nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo := framework.NewNodeInfo()
node := v1.Node{}
nodeInfo.SetNode(&node)
p := &InterPodAffinity{}
@ -2211,7 +2210,7 @@ func TestGetTPMapMatchingIncomingAffinityAntiAffinity(t *testing.T) {
}
}
func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *schedulertypes.NodeInfo {
func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *framework.NodeInfo {
t.Helper()
nodeInfo, err := snapshot.NodeInfos().Get(name)
if err != nil {

View File

@ -23,7 +23,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/validation/field"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
"k8s.io/utils/pointer"
)
@ -54,7 +53,7 @@ var _ framework.ScorePlugin = &InterPodAffinity{}
// InterPodAffinity is a plugin that checks inter pod affinity
type InterPodAffinity struct {
Args
sharedLister schedulerlisters.SharedLister
sharedLister framework.SharedLister
sync.Mutex
}

View File

@ -25,7 +25,6 @@ import (
"k8s.io/klog"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/parallelize"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)
@ -118,7 +117,7 @@ func (m scoreMap) append(other scoreMap) {
}
}
func (pl *InterPodAffinity) processExistingPod(state *preScoreState, existingPod *v1.Pod, existingPodNodeInfo *schedulertypes.NodeInfo, incomingPod *v1.Pod, topoScore scoreMap) error {
func (pl *InterPodAffinity) processExistingPod(state *preScoreState, existingPod *v1.Pod, existingPodNodeInfo *framework.NodeInfo, incomingPod *v1.Pod, topoScore scoreMap) error {
existingPodAffinity := existingPod.Spec.Affinity
existingHasAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAffinity != nil
existingHasAntiAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAntiAffinity != nil

View File

@ -9,7 +9,6 @@ go_library(
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/scheduler/framework/plugins/helper:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
@ -38,7 +37,6 @@ go_test(
"//pkg/apis/core:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
],

View File

@ -26,7 +26,6 @@ import (
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
// NodeAffinity is a plugin that checks if a pod node selector matches the node label.
@ -51,7 +50,7 @@ func (pl *NodeAffinity) Name() string {
}
// Filter invoked at the filter extension point.
func (pl *NodeAffinity) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *NodeAffinity) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
node := nodeInfo.Node()
if node == nil {
return framework.NewStatus(framework.Error, "node not found")

View File

@ -26,7 +26,6 @@ import (
api "k8s.io/kubernetes/pkg/apis/core"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
// TODO: Add test case for RequiredDuringSchedulingRequiredDuringExecution after it's implemented.
@ -694,7 +693,7 @@ func TestNodeAffinity(t *testing.T) {
Name: test.nodeName,
Labels: test.labels,
}}
nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo := framework.NewNodeInfo()
nodeInfo.SetNode(&node)
p, _ := New(nil, nil)

View File

@ -7,7 +7,6 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
@ -21,7 +20,6 @@ go_test(
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",

View File

@ -24,7 +24,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
// Name of this plugin.
@ -102,7 +101,7 @@ func (pl *NodeLabel) Name() string {
// Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
// A node may have a label with "retiring" as key and the date as the value
// and it may be desirable to avoid scheduling new pods on this node.
func (pl *NodeLabel) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *NodeLabel) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
node := nodeInfo.Node()
if node == nil {
return framework.NewStatus(framework.Error, "node not found")

View File

@ -25,7 +25,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
func TestValidateNodeLabelArgs(t *testing.T) {
@ -133,7 +132,7 @@ func TestNodeLabelFilter(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: label}}
nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo := framework.NewNodeInfo()
nodeInfo.SetNode(&node)
args := &runtime.Unknown{Raw: []byte(test.rawArgs)}
@ -248,7 +247,7 @@ func TestNodeLabelScore(t *testing.T) {
func TestNodeLabelFilterWithoutNode(t *testing.T) {
var pod *v1.Pod
t.Run("node does not exist", func(t *testing.T) {
nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo := framework.NewNodeInfo()
p, err := New(nil, nil)
if err != nil {
t.Fatalf("Failed to create plugin: %v", err)

View File

@ -7,7 +7,6 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
@ -33,7 +32,6 @@ go_test(
embed = [":go_default_library"],
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
],

View File

@ -22,7 +22,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
// NodeName is a plugin that checks if a pod spec node name matches the current node.
@ -44,7 +43,7 @@ func (pl *NodeName) Name() string {
}
// Filter invoked at the filter extension point.
func (pl *NodeName) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *NodeName) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
if nodeInfo.Node() == nil {
return framework.NewStatus(framework.Error, "node not found")
}
@ -55,7 +54,7 @@ func (pl *NodeName) Filter(ctx context.Context, _ *framework.CycleState, pod *v1
}
// Fits actually checks if the pod fits the node.
func Fits(pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) bool {
func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo) bool {
return len(pod.Spec.NodeName) == 0 || pod.Spec.NodeName == nodeInfo.Node().Name
}

View File

@ -24,7 +24,6 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
func TestNodeName(t *testing.T) {
@ -70,7 +69,7 @@ func TestNodeName(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo := framework.NewNodeInfo()
nodeInfo.SetNode(test.node)
p, _ := New(nil, nil)

View File

@ -7,7 +7,6 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
@ -33,7 +32,6 @@ go_test(
embed = [":go_default_library"],
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
],

View File

@ -23,7 +23,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
// NodePorts is a plugin that checks if a node has free ports for the requested pod ports.
@ -98,7 +97,7 @@ func getPreFilterState(cycleState *framework.CycleState) (preFilterState, error)
}
// Filter invoked at the filter extension point.
func (pl *NodePorts) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *NodePorts) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
wantPorts, err := getPreFilterState(cycleState)
if err != nil {
return framework.NewStatus(framework.Error, err.Error())
@ -113,11 +112,11 @@ func (pl *NodePorts) Filter(ctx context.Context, cycleState *framework.CycleStat
}
// Fits checks if the pod fits the node.
func Fits(pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) bool {
func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo) bool {
return fitsPorts(getContainerPorts(pod), nodeInfo)
}
func fitsPorts(wantPorts []*v1.ContainerPort, nodeInfo *schedulertypes.NodeInfo) bool {
func fitsPorts(wantPorts []*v1.ContainerPort, nodeInfo *framework.NodeInfo) bool {
// try to see whether existingPorts and wantPorts will conflict or not
existingPorts := nodeInfo.UsedPorts()
for _, cp := range wantPorts {

View File

@ -26,7 +26,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/diff"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
func newPod(host string, hostPortInfos ...string) *v1.Pod {
@ -56,91 +55,91 @@ func newPod(host string, hostPortInfos ...string) *v1.Pod {
func TestNodePorts(t *testing.T) {
tests := []struct {
pod *v1.Pod
nodeInfo *schedulertypes.NodeInfo
nodeInfo *framework.NodeInfo
name string
wantStatus *framework.Status
}{
{
pod: &v1.Pod{},
nodeInfo: schedulertypes.NewNodeInfo(),
nodeInfo: framework.NewNodeInfo(),
name: "nothing running",
},
{
pod: newPod("m1", "UDP/127.0.0.1/8080"),
nodeInfo: schedulertypes.NewNodeInfo(
nodeInfo: framework.NewNodeInfo(
newPod("m1", "UDP/127.0.0.1/9090")),
name: "other port",
},
{
pod: newPod("m1", "UDP/127.0.0.1/8080"),
nodeInfo: schedulertypes.NewNodeInfo(
nodeInfo: framework.NewNodeInfo(
newPod("m1", "UDP/127.0.0.1/8080")),
name: "same udp port",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
},
{
pod: newPod("m1", "TCP/127.0.0.1/8080"),
nodeInfo: schedulertypes.NewNodeInfo(
nodeInfo: framework.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8080")),
name: "same tcp port",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
},
{
pod: newPod("m1", "TCP/127.0.0.1/8080"),
nodeInfo: schedulertypes.NewNodeInfo(
nodeInfo: framework.NewNodeInfo(
newPod("m1", "TCP/127.0.0.2/8080")),
name: "different host ip",
},
{
pod: newPod("m1", "UDP/127.0.0.1/8080"),
nodeInfo: schedulertypes.NewNodeInfo(
nodeInfo: framework.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8080")),
name: "different protocol",
},
{
pod: newPod("m1", "UDP/127.0.0.1/8000", "UDP/127.0.0.1/8080"),
nodeInfo: schedulertypes.NewNodeInfo(
nodeInfo: framework.NewNodeInfo(
newPod("m1", "UDP/127.0.0.1/8080")),
name: "second udp port conflict",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
},
{
pod: newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8080"),
nodeInfo: schedulertypes.NewNodeInfo(
nodeInfo: framework.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8081")),
name: "first tcp port conflict",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
},
{
pod: newPod("m1", "TCP/0.0.0.0/8001"),
nodeInfo: schedulertypes.NewNodeInfo(
nodeInfo: framework.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8001")),
name: "first tcp port conflict due to 0.0.0.0 hostIP",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
},
{
pod: newPod("m1", "TCP/10.0.10.10/8001", "TCP/0.0.0.0/8001"),
nodeInfo: schedulertypes.NewNodeInfo(
nodeInfo: framework.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8001")),
name: "TCP hostPort conflict due to 0.0.0.0 hostIP",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
},
{
pod: newPod("m1", "TCP/127.0.0.1/8001"),
nodeInfo: schedulertypes.NewNodeInfo(
nodeInfo: framework.NewNodeInfo(
newPod("m1", "TCP/0.0.0.0/8001")),
name: "second tcp port conflict to 0.0.0.0 hostIP",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
},
{
pod: newPod("m1", "UDP/127.0.0.1/8001"),
nodeInfo: schedulertypes.NewNodeInfo(
nodeInfo: framework.NewNodeInfo(
newPod("m1", "TCP/0.0.0.0/8001")),
name: "second different protocol",
},
{
pod: newPod("m1", "UDP/127.0.0.1/8001"),
nodeInfo: schedulertypes.NewNodeInfo(
nodeInfo: framework.NewNodeInfo(
newPod("m1", "TCP/0.0.0.0/8001", "UDP/0.0.0.0/8001")),
name: "UDP hostPort conflict due to 0.0.0.0 hostIP",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
@ -165,7 +164,7 @@ func TestNodePorts(t *testing.T) {
func TestPreFilterDisabled(t *testing.T) {
pod := &v1.Pod{}
nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo := framework.NewNodeInfo()
node := v1.Node{}
nodeInfo.SetNode(&node)
p, _ := New(nil, nil)

View File

@ -19,7 +19,6 @@ go_library(
"//pkg/features:go_default_library",
"//pkg/scheduler/apis/config:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
@ -61,7 +60,6 @@ go_test(
"//pkg/features:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -27,7 +27,6 @@ import (
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
var _ framework.PreFilterPlugin = &Fit{}
@ -56,7 +55,7 @@ type FitArgs struct {
// preFilterState computed at PreFilter and used at Filter.
type preFilterState struct {
schedulertypes.Resource
framework.Resource
}
// Clone the prefilter state.
@ -69,7 +68,7 @@ func (f *Fit) Name() string {
return FitName
}
// computePodResourceRequest returns a schedulertypes.Resource that covers the largest
// computePodResourceRequest returns a framework.Resource that covers the largest
// width in each resource dimension. Because init-containers run sequentially, we collect
// the max in each dimension iteratively. In contrast, we sum the resource vectors for
// regular containers since they run simultaneously.
@ -143,7 +142,7 @@ func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error
// Filter invoked at the filter extension point.
// Checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod.
// It returns a list of insufficient resources, if empty, then the node has all the resources requested by the pod.
func (f *Fit) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (f *Fit) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
s, err := getPreFilterState(cycleState)
if err != nil {
return framework.NewStatus(framework.Error, err.Error())
@ -174,11 +173,11 @@ type InsufficientResource struct {
}
// Fits checks if node have enough resources to host the pod.
func Fits(pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo, ignoredExtendedResources sets.String) []InsufficientResource {
func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo, ignoredExtendedResources sets.String) []InsufficientResource {
return fitsRequest(computePodResourceRequest(pod), nodeInfo, ignoredExtendedResources)
}
func fitsRequest(podRequest *preFilterState, nodeInfo *schedulertypes.NodeInfo, ignoredExtendedResources sets.String) []InsufficientResource {
func fitsRequest(podRequest *preFilterState, nodeInfo *framework.NodeInfo, ignoredExtendedResources sets.String) []InsufficientResource {
insufficientResources := make([]InsufficientResource, 0, 4)
allowedPodNumber := nodeInfo.AllowedPodNumber()

View File

@ -27,7 +27,6 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
var (
@ -62,7 +61,7 @@ func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePa
}
}
func newResourcePod(usage ...schedulertypes.Resource) *v1.Pod {
func newResourcePod(usage ...framework.Resource) *v1.Pod {
containers := []v1.Container{}
for _, req := range usage {
containers = append(containers, v1.Container{
@ -76,7 +75,7 @@ func newResourcePod(usage ...schedulertypes.Resource) *v1.Pod {
}
}
func newResourceInitPod(pod *v1.Pod, usage ...schedulertypes.Resource) *v1.Pod {
func newResourceInitPod(pod *v1.Pod, usage ...framework.Resource) *v1.Pod {
pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers
return pod
}
@ -93,7 +92,7 @@ func getErrReason(rn v1.ResourceName) string {
func TestEnoughRequests(t *testing.T) {
enoughPodsTests := []struct {
pod *v1.Pod
nodeInfo *schedulertypes.NodeInfo
nodeInfo *framework.NodeInfo
name string
ignoredResources []byte
wantInsufficientResources []InsufficientResource
@ -101,266 +100,266 @@ func TestEnoughRequests(t *testing.T) {
}{
{
pod: &v1.Pod{},
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 10, Memory: 20})),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 10, Memory: 20})),
name: "no resources requested always fits",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 10, Memory: 20})),
pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 10, Memory: 20})),
name: "too many resources fails",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU), getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 1, 10, 10}, {v1.ResourceMemory, getErrReason(v1.ResourceMemory), 1, 20, 20}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 3, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 8, Memory: 19})),
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 3, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 8, Memory: 19})),
name: "too many resources fails due to init container cpu",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 3, 8, 10}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 3, Memory: 1}, schedulertypes.Resource{MilliCPU: 2, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 8, Memory: 19})),
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 3, Memory: 1}, framework.Resource{MilliCPU: 2, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 8, Memory: 19})),
name: "too many resources fails due to highest init container cpu",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 3, 8, 10}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 1, Memory: 3}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 19})),
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 3}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})),
name: "too many resources fails due to init container memory",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 3, 19, 20}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 1, Memory: 3}, schedulertypes.Resource{MilliCPU: 1, Memory: 2}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 19})),
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 3}, framework.Resource{MilliCPU: 1, Memory: 2}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})),
name: "too many resources fails due to highest init container memory",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 3, 19, 20}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 19})),
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})),
name: "init container fits because it's the max, not sum, of containers and init containers",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 1, Memory: 1}, schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 19})),
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}), framework.Resource{MilliCPU: 1, Memory: 1}, framework.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 9, Memory: 19})),
name: "multiple init containers fit because it's the max, not sum, of containers and init containers",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 5})),
pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})),
name: "both resources fit",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourcePod(schedulertypes.Resource{MilliCPU: 2, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 5})),
pod: newResourcePod(framework.Resource{MilliCPU: 2, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 9, Memory: 5})),
name: "one resource memory fits",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 2, 9, 10}},
},
{
pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 2}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})),
pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 2}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
name: "one resource cpu fits",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 2, 19, 20}},
},
{
pod: newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})),
pod: newResourcePod(framework.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
name: "equal edge case",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 4, Memory: 1}), schedulertypes.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})),
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 4, Memory: 1}), framework.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
name: "equal edge case for init container",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourcePod(schedulertypes.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{})),
pod: newResourcePod(framework.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{})),
name: "extended resource fits",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}), schedulertypes.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{})),
pod: newResourceInitPod(newResourcePod(framework.Resource{}), framework.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{})),
name: "extended resource fits for init container",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourcePod(
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
name: "extended resource capacity enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 10, 0, 5}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
name: "extended resource capacity enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 10, 0, 5}},
},
{
pod: newResourcePod(
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
name: "extended resource allocatable enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 1, 5, 5}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
name: "extended resource allocatable enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 1, 5, 5}},
},
{
pod: newResourcePod(
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
name: "extended resource allocatable enforced for multiple containers",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 6, 2, 5}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
name: "extended resource allocatable admits multiple init containers",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 6}},
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 6}},
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
name: "extended resource allocatable enforced for multiple init containers",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 6, 2, 5}},
},
{
pod: newResourcePod(
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
name: "extended resource allocatable enforced for unknown resource",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
wantInsufficientResources: []InsufficientResource{{extendedResourceB, getErrReason(extendedResourceB), 1, 0, 0}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})),
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
name: "extended resource allocatable enforced for unknown resource for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
wantInsufficientResources: []InsufficientResource{{extendedResourceB, getErrReason(extendedResourceB), 1, 0, 0}},
},
{
pod: newResourcePod(
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
name: "kubernetes.io resource capacity enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceA)),
wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceA, getErrReason(kubernetesIOResourceA), 10, 0, 0}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})),
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
name: "kubernetes.io resource capacity enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceB)),
wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceB, getErrReason(kubernetesIOResourceB), 10, 0, 0}},
},
{
pod: newResourcePod(
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
name: "hugepages resource capacity enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 10, 0, 5}},
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
pod: newResourceInitPod(newResourcePod(framework.Resource{}),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
name: "hugepages resource capacity enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 10, 0, 5}},
},
{
pod: newResourcePod(
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}},
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}},
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
name: "hugepages resource allocatable enforced for multiple containers",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 6, 2, 5}},
},
{
pod: newResourcePod(
schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})),
framework.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 0, Memory: 0})),
ignoredResources: []byte(`{"IgnoredResources" : ["example.com/bbb"]}`),
name: "skip checking ignored extended resource",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourceOverheadPod(
newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
v1.ResourceList{v1.ResourceCPU: resource.MustParse("3m"), v1.ResourceMemory: resource.MustParse("13")},
),
nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 5})),
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})),
name: "resources + pod overhead fits",
wantInsufficientResources: []InsufficientResource{},
},
{
pod: newResourceOverheadPod(
newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
v1.ResourceList{v1.ResourceCPU: resource.MustParse("1m"), v1.ResourceMemory: resource.MustParse("15")},
),
nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 5})),
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})),
name: "requests + overhead does not fit for memory",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 16, 5, 20}},
@ -395,7 +394,7 @@ func TestEnoughRequests(t *testing.T) {
func TestPreFilterDisabled(t *testing.T) {
pod := &v1.Pod{}
nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo := framework.NewNodeInfo()
node := v1.Node{}
nodeInfo.SetNode(&node)
p, _ := NewFit(nil, nil)
@ -410,32 +409,32 @@ func TestPreFilterDisabled(t *testing.T) {
func TestNotEnoughRequests(t *testing.T) {
notEnoughPodsTests := []struct {
pod *v1.Pod
nodeInfo *schedulertypes.NodeInfo
nodeInfo *framework.NodeInfo
fits bool
name string
wantStatus *framework.Status
}{
{
pod: &v1.Pod{},
nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 10, Memory: 20})),
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 10, Memory: 20})),
name: "even without specified resources predicate fails when there's no space for additional pod",
wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
},
{
pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 5})),
pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 5})),
name: "even if both resources fit predicate fails when there's no space for additional pod",
wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
},
{
pod: newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})),
pod: newResourcePod(framework.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
name: "even for equal edge case predicate fails when there's no space for additional pod",
wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
},
{
pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 1}), schedulertypes.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})),
pod: newResourceInitPod(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 1}), framework.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: framework.NewNodeInfo(newResourcePod(framework.Resource{MilliCPU: 5, Memory: 19})),
name: "even for equal edge case predicate fails when there's no space for additional pod due to init container",
wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
},
@ -464,34 +463,34 @@ func TestNotEnoughRequests(t *testing.T) {
func TestStorageRequests(t *testing.T) {
storagePodsTests := []struct {
pod *v1.Pod
nodeInfo *schedulertypes.NodeInfo
nodeInfo *framework.NodeInfo
name string
wantStatus *framework.Status
}{
{
pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 10, Memory: 10})),
pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 10, Memory: 10})),
name: "due to container scratch disk",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
},
{
pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 2, Memory: 10})),
pod: newResourcePod(framework.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 2, Memory: 10})),
name: "pod fit",
},
{
pod: newResourcePod(schedulertypes.Resource{EphemeralStorage: 25}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 2, Memory: 2})),
pod: newResourcePod(framework.Resource{EphemeralStorage: 25}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 2, Memory: 2})),
name: "storage ephemeral local storage request exceeds allocatable",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceEphemeralStorage)),
},
{
pod: newResourcePod(schedulertypes.Resource{EphemeralStorage: 10}),
nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulertypes.Resource{MilliCPU: 2, Memory: 2})),
pod: newResourcePod(framework.Resource{EphemeralStorage: 10}),
nodeInfo: framework.NewNodeInfo(
newResourcePod(framework.Resource{MilliCPU: 2, Memory: 2})),
name: "pod fits",
},
}

View File

@ -23,7 +23,6 @@ import (
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)
@ -46,7 +45,7 @@ type resourceToValueMap map[v1.ResourceName]int64
// score will use `scorer` function to calculate the score.
func (r *resourceAllocationScorer) score(
pod *v1.Pod,
nodeInfo *schedulertypes.NodeInfo) (int64, *framework.Status) {
nodeInfo *framework.NodeInfo) (int64, *framework.Status) {
node := nodeInfo.Node()
if node == nil {
return 0, framework.NewStatus(framework.Error, "node not found")
@ -90,7 +89,7 @@ func (r *resourceAllocationScorer) score(
}
// calculateResourceAllocatableRequest returns resources Allocatable and Requested values
func calculateResourceAllocatableRequest(nodeInfo *schedulertypes.NodeInfo, pod *v1.Pod, resource v1.ResourceName) (int64, int64) {
func calculateResourceAllocatableRequest(nodeInfo *framework.NodeInfo, pod *v1.Pod, resource v1.ResourceName) (int64, int64) {
allocatable := nodeInfo.AllocatableResource()
requested := nodeInfo.RequestedResource()
podRequest := calculatePodResourceRequest(pod, resource)

View File

@ -23,7 +23,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
// ResourceLimits is a score plugin that increases score of input node by 1 if the node satisfies
@ -46,7 +45,7 @@ const (
// preScoreState computed at PreScore and used at Score.
type preScoreState struct {
podResourceRequest *schedulertypes.Resource
podResourceRequest *framework.Resource
}
// Clone the preScore state.
@ -81,7 +80,7 @@ func (rl *ResourceLimits) PreScore(
return nil
}
func getPodResource(cycleState *framework.CycleState) (*schedulertypes.Resource, error) {
func getPodResource(cycleState *framework.CycleState) (*framework.Resource, error) {
c, err := cycleState.Read(preScoreStateKey)
if err != nil {
return nil, fmt.Errorf("Error reading %q from cycleState: %v", preScoreStateKey, err)
@ -136,9 +135,9 @@ func NewResourceLimits(_ *runtime.Unknown, h framework.FrameworkHandle) (framewo
// getResourceLimits computes resource limits for input pod.
// The reason to create this new function is to be consistent with other
// priority functions because most or perhaps all priority functions work
// with schedulertypes.Resource.
func getResourceLimits(pod *v1.Pod) *schedulertypes.Resource {
result := &schedulertypes.Resource{}
// with framework.Resource.
func getResourceLimits(pod *v1.Pod) *framework.Resource {
result := &framework.Resource{}
for _, container := range pod.Spec.Containers {
result.Add(container.Resources.Limits)
}

View File

@ -8,7 +8,6 @@ go_library(
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
@ -20,7 +19,6 @@ go_test(
embed = [":go_default_library"],
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
],
)

View File

@ -23,7 +23,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
// NodeUnschedulable is a plugin that priorities nodes according to the node annotation
@ -49,7 +48,7 @@ func (pl *NodeUnschedulable) Name() string {
}
// Filter invoked at the filter extension point.
func (pl *NodeUnschedulable) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *NodeUnschedulable) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
if nodeInfo == nil || nodeInfo.Node() == nil {
return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonUnknownCondition)
}

View File

@ -23,7 +23,6 @@ import (
v1 "k8s.io/api/core/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
func TestNodeUnschedulable(t *testing.T) {
@ -73,7 +72,7 @@ func TestNodeUnschedulable(t *testing.T) {
}
for _, test := range testCases {
nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo := framework.NewNodeInfo()
nodeInfo.SetNode(test.node)
p, _ := New(nil, nil)

View File

@ -13,7 +13,6 @@ go_library(
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
@ -40,8 +39,7 @@ go_test(
deps = [
"//pkg/features:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/listers/fake:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/framework/v1alpha1/fake:go_default_library",
"//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",

View File

@ -29,7 +29,6 @@ import (
csitrans "k8s.io/csi-translation-lib"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/klog"
@ -68,7 +67,7 @@ func (pl *CSILimits) Name() string {
}
// Filter invoked at the filter extension point.
func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
// If the new pod doesn't have any volume attached to it, the predicate will always be true
if len(pod.Spec.Volumes) == 0 {
return nil
@ -285,7 +284,7 @@ func NewCSI(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plu
}, nil
}
func getVolumeLimits(nodeInfo *schedulertypes.NodeInfo, csiNode *storagev1.CSINode) map[v1.ResourceName]int64 {
func getVolumeLimits(nodeInfo *framework.NodeInfo, csiNode *storagev1.CSINode) map[v1.ResourceName]int64 {
// TODO: stop getting values from Node object in v1.18
nodeVolumeLimits := nodeInfo.VolumeLimits()
if csiNode != nil {

View File

@ -35,8 +35,7 @@ import (
csilibplugins "k8s.io/csi-translation-lib/plugins"
"k8s.io/kubernetes/pkg/features"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/fake"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
utilpointer "k8s.io/utils/pointer"
)
@ -475,8 +474,8 @@ func TestCSILimits(t *testing.T) {
}
}
func getFakeCSIPVLister(volumeName string, driverNames ...string) fakelisters.PersistentVolumeLister {
pvLister := fakelisters.PersistentVolumeLister{}
func getFakeCSIPVLister(volumeName string, driverNames ...string) fakeframework.PersistentVolumeLister {
pvLister := fakeframework.PersistentVolumeLister{}
for _, driver := range driverNames {
for j := 0; j < 4; j++ {
volumeHandle := fmt.Sprintf("%s-%s-%d", volumeName, driver, j)
@ -520,8 +519,8 @@ func getFakeCSIPVLister(volumeName string, driverNames ...string) fakelisters.Pe
return pvLister
}
func getFakeCSIPVCLister(volumeName, scName string, driverNames ...string) fakelisters.PersistentVolumeClaimLister {
pvcLister := fakelisters.PersistentVolumeClaimLister{}
func getFakeCSIPVCLister(volumeName, scName string, driverNames ...string) fakeframework.PersistentVolumeClaimLister {
pvcLister := fakeframework.PersistentVolumeClaimLister{}
for _, driver := range driverNames {
for j := 0; j < 4; j++ {
v := fmt.Sprintf("%s-%s-%d", volumeName, driver, j)
@ -563,8 +562,8 @@ func enableMigrationOnNode(csiNode *storagev1.CSINode, pluginName string) {
csiNode.Annotations = nodeInfoAnnotations
}
func getFakeCSIStorageClassLister(scName, provisionerName string) fakelisters.StorageClassLister {
return fakelisters.StorageClassLister{
func getFakeCSIStorageClassLister(scName, provisionerName string) fakeframework.StorageClassLister {
return fakeframework.StorageClassLister{
{
ObjectMeta: metav1.ObjectMeta{Name: scName},
Provisioner: provisionerName,
@ -572,15 +571,15 @@ func getFakeCSIStorageClassLister(scName, provisionerName string) fakelisters.St
}
}
func getFakeCSINodeLister(csiNode *storagev1.CSINode) fakelisters.CSINodeLister {
func getFakeCSINodeLister(csiNode *storagev1.CSINode) fakeframework.CSINodeLister {
if csiNode != nil {
return fakelisters.CSINodeLister(*csiNode)
return fakeframework.CSINodeLister(*csiNode)
}
return fakelisters.CSINodeLister{}
return fakeframework.CSINodeLister{}
}
func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*schedulertypes.NodeInfo, *storagev1.CSINode) {
nodeInfo := schedulertypes.NewNodeInfo(pods...)
func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*framework.NodeInfo, *storagev1.CSINode) {
nodeInfo := framework.NewNodeInfo(pods...)
node := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
Status: v1.NodeStatus{

View File

@ -36,7 +36,6 @@ import (
"k8s.io/kubernetes/pkg/features"
kubefeatures "k8s.io/kubernetes/pkg/features"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
)
@ -196,7 +195,7 @@ func (pl *nonCSILimits) Name() string {
}
// Filter invoked at the filter extension point.
func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
// If a pod doesn't have any volume attached to it, the predicate will always be true.
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
if len(pod.Spec.Volumes) == 0 {

View File

@ -27,7 +27,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
csilibplugins "k8s.io/csi-translation-lib/plugins"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/fake"
utilpointer "k8s.io/utils/pointer"
)
@ -1222,8 +1222,8 @@ func TestGetMaxVols(t *testing.T) {
}
}
func getFakePVCLister(filterName string) fakelisters.PersistentVolumeClaimLister {
return fakelisters.PersistentVolumeClaimLister{
func getFakePVCLister(filterName string) fakeframework.PersistentVolumeClaimLister {
return fakeframework.PersistentVolumeClaimLister{
{
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
Spec: v1.PersistentVolumeClaimSpec{
@ -1283,8 +1283,8 @@ func getFakePVCLister(filterName string) fakelisters.PersistentVolumeClaimLister
}
}
func getFakePVLister(filterName string) fakelisters.PersistentVolumeLister {
return fakelisters.PersistentVolumeLister{
func getFakePVLister(filterName string) fakeframework.PersistentVolumeLister {
return fakeframework.PersistentVolumeLister{
{
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
Spec: v1.PersistentVolumeSpec{

View File

@ -14,8 +14,6 @@ go_library(
"//pkg/scheduler/framework/plugins/helper:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/parallelize:go_default_library",
"//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library",
@ -43,7 +41,6 @@ go_test(
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/internal/parallelize:go_default_library",
"//pkg/scheduler/testing:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -28,7 +28,6 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/parallelize"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
const preFilterStateKey = "PreFilter" + Name
@ -160,7 +159,7 @@ func (pl *PodTopologySpread) PreFilterExtensions() framework.PreFilterExtensions
}
// AddPod from pre-computed data in cycleState.
func (pl *PodTopologySpread) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *PodTopologySpread) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
s, err := getPreFilterState(cycleState)
if err != nil {
return framework.NewStatus(framework.Error, err.Error())
@ -171,7 +170,7 @@ func (pl *PodTopologySpread) AddPod(ctx context.Context, cycleState *framework.C
}
// RemovePod from pre-computed data in cycleState.
func (pl *PodTopologySpread) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *PodTopologySpread) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
s, err := getPreFilterState(cycleState)
if err != nil {
return framework.NewStatus(framework.Error, err.Error())
@ -275,7 +274,7 @@ func (pl *PodTopologySpread) calPreFilterState(pod *v1.Pod) (*preFilterState, er
}
// Filter invoked at the filter extension point.
func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
node := nodeInfo.Node()
if node == nil {
return framework.NewStatus(framework.Error, "node not found")

View File

@ -32,7 +32,6 @@ import (
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
"k8s.io/kubernetes/pkg/scheduler/internal/parallelize"
st "k8s.io/kubernetes/pkg/scheduler/testing"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
"k8s.io/utils/pointer"
)
@ -1619,7 +1618,7 @@ func TestMultipleConstraints(t *testing.T) {
func TestPreFilterDisabled(t *testing.T) {
pod := &v1.Pod{}
nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo := framework.NewNodeInfo()
node := v1.Node{}
nodeInfo.SetNode(&node)
p := &PodTopologySpread{}

View File

@ -28,7 +28,6 @@ import (
appslisters "k8s.io/client-go/listers/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
)
const (
@ -56,7 +55,7 @@ type Args struct {
// PodTopologySpread is a plugin that ensures pod's topologySpreadConstraints is satisfied.
type PodTopologySpread struct {
Args
sharedLister schedulerlisters.SharedLister
sharedLister framework.SharedLister
services corelisters.ServiceLister
replicationCtrls corelisters.ReplicationControllerLister
replicaSets appslisters.ReplicaSetLister

View File

@ -8,8 +8,6 @@ go_library(
deps = [
"//pkg/scheduler/framework/plugins/helper:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
@ -23,9 +21,8 @@ go_test(
embed = [":go_default_library"],
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/framework/v1alpha1/fake:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/listers/fake:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
],

View File

@ -26,8 +26,6 @@ import (
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
const (
@ -92,7 +90,7 @@ func New(plArgs *runtime.Unknown, handle framework.FrameworkHandle) (framework.P
// ServiceAffinity is a plugin that checks service affinity.
type ServiceAffinity struct {
args Args
sharedLister schedulerlisters.SharedLister
sharedLister framework.SharedLister
serviceLister corelisters.ServiceLister
}
@ -146,7 +144,7 @@ func (pl *ServiceAffinity) PreFilterExtensions() framework.PreFilterExtensions {
}
// AddPod from pre-computed data in cycleState.
func (pl *ServiceAffinity) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *ServiceAffinity) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
s, err := getPreFilterState(cycleState)
if err != nil {
return framework.NewStatus(framework.Error, err.Error())
@ -167,7 +165,7 @@ func (pl *ServiceAffinity) AddPod(ctx context.Context, cycleState *framework.Cyc
}
// RemovePod from pre-computed data in cycleState.
func (pl *ServiceAffinity) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *ServiceAffinity) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
s, err := getPreFilterState(cycleState)
if err != nil {
return framework.NewStatus(framework.Error, err.Error())
@ -230,7 +228,7 @@ func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error
// - L is a label that the ServiceAffinity object needs as a matching constraint.
// - L is not defined in the pod itself already.
// - and SOME pod, from a service, in the same namespace, ALREADY scheduled onto a node, has a matching value.
func (pl *ServiceAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *ServiceAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
if len(pl.args.AffinityLabels) == 0 {
return nil
}
@ -332,7 +330,7 @@ func (pl *ServiceAffinity) NormalizeScore(ctx context.Context, _ *framework.Cycl
// we need to modify the old priority to be able to handle multiple labels so that it can be mapped
// to a single plugin.
// TODO: This will be deprecated soon.
func (pl *ServiceAffinity) updateNodeScoresForLabel(sharedLister schedulerlisters.SharedLister, mapResult framework.NodeScoreList, reduceResult []float64, label string) error {
func (pl *ServiceAffinity) updateNodeScoresForLabel(sharedLister framework.SharedLister, mapResult framework.NodeScoreList, reduceResult []float64, label string) error {
var numServicePods int64
var labelValue string
podCounts := map[string]int64{}

View File

@ -25,9 +25,8 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/fake"
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
func TestServiceAffinity(t *testing.T) {
@ -164,7 +163,7 @@ func TestServiceAffinity(t *testing.T) {
p := &ServiceAffinity{
sharedLister: snapshot,
serviceLister: fakelisters.ServiceLister(test.services),
serviceLister: fakeframework.ServiceLister(test.services),
args: Args{
AffinityLabels: test.labels,
},
@ -384,7 +383,7 @@ func TestServiceAffinityScore(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
nodes := makeLabeledNodeList(test.nodes)
snapshot := cache.NewSnapshot(test.pods, nodes)
serviceLister := fakelisters.ServiceLister(test.services)
serviceLister := fakeframework.ServiceLister(test.services)
p := &ServiceAffinity{
sharedLister: snapshot,
@ -499,7 +498,7 @@ func TestPreFilterStateAddRemovePod(t *testing.T) {
p := &ServiceAffinity{
sharedLister: snapshot,
serviceLister: fakelisters.ServiceLister(test.services),
serviceLister: fakeframework.ServiceLister(test.services),
}
cycleState := framework.NewCycleState()
preFilterStatus := p.PreFilter(context.Background(), cycleState, test.pendingPod)
@ -591,7 +590,7 @@ func sortNodeScoreList(out framework.NodeScoreList) {
})
}
func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *schedulertypes.NodeInfo {
func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *framework.NodeInfo {
t.Helper()
nodeInfo, err := snapshot.NodeInfos().Get(name)
if err != nil {
@ -602,7 +601,7 @@ func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *sched
func TestPreFilterDisabled(t *testing.T) {
pod := &v1.Pod{}
nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo := framework.NewNodeInfo()
node := v1.Node{}
nodeInfo.SetNode(&node)
p := &ServiceAffinity{

View File

@ -9,7 +9,6 @@ go_library(
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/scheduler/framework/plugins/helper:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
@ -36,7 +35,6 @@ go_test(
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
],

View File

@ -25,7 +25,6 @@ import (
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
// TaintToleration is a plugin that checks if a pod tolerates a node's taints.
@ -52,7 +51,7 @@ func (pl *TaintToleration) Name() string {
}
// Filter invoked at the filter extension point.
func (pl *TaintToleration) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *TaintToleration) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
if nodeInfo == nil || nodeInfo.Node() == nil {
return framework.NewStatus(framework.Error, "invalid nodeInfo")
}

View File

@ -25,7 +25,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
func nodeWithTaints(nodeName string, taints []v1.Taint) *v1.Node {
@ -330,7 +329,7 @@ func TestTaintTolerationFilter(t *testing.T) {
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo := framework.NewNodeInfo()
nodeInfo.SetNode(test.node)
p, _ := New(nil, nil)
gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), nil, test.pod, nodeInfo)

View File

@ -8,7 +8,6 @@ go_library(
deps = [
"//pkg/controller/volume/scheduling:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
@ -35,7 +34,6 @@ go_test(
deps = [
"//pkg/controller/volume/scheduling:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
],
)

View File

@ -23,7 +23,6 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/controller/volume/scheduling"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
// VolumeBinding is a plugin that binds pod volumes in scheduling.
@ -62,7 +61,7 @@ func podHasPVCs(pod *v1.Pod) bool {
//
// The predicate returns true if all bound PVCs have compatible PVs with the node, and if all unbound
// PVCs can be matched with an available and node-compatible PV.
func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
node := nodeInfo.Node()
if node == nil {
return framework.NewStatus(framework.Error, "node not found")

View File

@ -25,7 +25,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/controller/volume/scheduling"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
func TestVolumeBinding(t *testing.T) {
@ -99,7 +98,7 @@ func TestVolumeBinding(t *testing.T) {
for _, item := range table {
t.Run(item.name, func(t *testing.T) {
nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo := framework.NewNodeInfo()
nodeInfo.SetNode(item.node)
fakeVolumeBinder := scheduling.NewFakeVolumeBinder(item.volumeBinderConfig)
p := &VolumeBinding{

View File

@ -7,7 +7,6 @@ go_library(
visibility = ["//visibility:public"],
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
],
@ -33,7 +32,6 @@ go_test(
embed = [":go_default_library"],
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
],
)

View File

@ -22,7 +22,6 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
// VolumeRestrictions is a plugin that checks volume restrictions.
@ -118,7 +117,7 @@ func haveOverlap(a1, a2 []string) bool {
// - AWS EBS forbids any two pods mounting the same volume ID
// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image, and the image is read-only
// - ISCSI forbids if any two pods share at least same IQN and ISCSI volume is read-only
func (pl *VolumeRestrictions) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *VolumeRestrictions) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
for _, v := range pod.Spec.Volumes {
for _, ev := range nodeInfo.Pods() {
if isVolumeConflict(v, ev) {

View File

@ -23,7 +23,6 @@ import (
v1 "k8s.io/api/core/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
func TestGCEDiskConflicts(t *testing.T) {
@ -52,15 +51,15 @@ func TestGCEDiskConflicts(t *testing.T) {
errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
tests := []struct {
pod *v1.Pod
nodeInfo *schedulertypes.NodeInfo
nodeInfo *framework.NodeInfo
isOk bool
name string
wantStatus *framework.Status
}{
{&v1.Pod{}, schedulertypes.NewNodeInfo(), true, "nothing", nil},
{&v1.Pod{}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
{&v1.Pod{Spec: volState}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
{&v1.Pod{Spec: volState2}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
{&v1.Pod{}, framework.NewNodeInfo(), true, "nothing", nil},
{&v1.Pod{}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
{&v1.Pod{Spec: volState}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
{&v1.Pod{Spec: volState2}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
}
for _, test := range tests {
@ -100,15 +99,15 @@ func TestAWSDiskConflicts(t *testing.T) {
errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
tests := []struct {
pod *v1.Pod
nodeInfo *schedulertypes.NodeInfo
nodeInfo *framework.NodeInfo
isOk bool
name string
wantStatus *framework.Status
}{
{&v1.Pod{}, schedulertypes.NewNodeInfo(), true, "nothing", nil},
{&v1.Pod{}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
{&v1.Pod{Spec: volState}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
{&v1.Pod{Spec: volState2}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
{&v1.Pod{}, framework.NewNodeInfo(), true, "nothing", nil},
{&v1.Pod{}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
{&v1.Pod{Spec: volState}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
{&v1.Pod{Spec: volState2}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
}
for _, test := range tests {
@ -154,15 +153,15 @@ func TestRBDDiskConflicts(t *testing.T) {
errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
tests := []struct {
pod *v1.Pod
nodeInfo *schedulertypes.NodeInfo
nodeInfo *framework.NodeInfo
isOk bool
name string
wantStatus *framework.Status
}{
{&v1.Pod{}, schedulertypes.NewNodeInfo(), true, "nothing", nil},
{&v1.Pod{}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
{&v1.Pod{Spec: volState}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
{&v1.Pod{Spec: volState2}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
{&v1.Pod{}, framework.NewNodeInfo(), true, "nothing", nil},
{&v1.Pod{}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
{&v1.Pod{Spec: volState}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
{&v1.Pod{Spec: volState2}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
}
for _, test := range tests {
@ -208,15 +207,15 @@ func TestISCSIDiskConflicts(t *testing.T) {
errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
tests := []struct {
pod *v1.Pod
nodeInfo *schedulertypes.NodeInfo
nodeInfo *framework.NodeInfo
isOk bool
name string
wantStatus *framework.Status
}{
{&v1.Pod{}, schedulertypes.NewNodeInfo(), true, "nothing", nil},
{&v1.Pod{}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
{&v1.Pod{Spec: volState}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
{&v1.Pod{Spec: volState2}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
{&v1.Pod{}, framework.NewNodeInfo(), true, "nothing", nil},
{&v1.Pod{}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
{&v1.Pod{Spec: volState}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
{&v1.Pod{Spec: volState2}, framework.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
}
for _, test := range tests {

View File

@ -8,7 +8,6 @@ go_library(
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
@ -26,8 +25,7 @@ go_test(
embed = [":go_default_library"],
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/listers/fake:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/framework/v1alpha1/fake:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -30,7 +30,6 @@ import (
"k8s.io/klog"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
// VolumeZone is a plugin that checks volume zone.
@ -78,7 +77,7 @@ func (pl *VolumeZone) Name() string {
// determining the zone of a volume during scheduling, and that is likely to
// require calling out to the cloud provider. It seems that we are moving away
// from inline volume declarations anyway.
func (pl *VolumeZone) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (pl *VolumeZone) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
// If a pod doesn't have any volume attached to it, the predicate will always be true.
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
if len(pod.Spec.Volumes) == 0 {

View File

@ -25,8 +25,7 @@ import (
storagev1 "k8s.io/api/storage/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
fakeframework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/fake"
)
func createPodWithVolume(pod, pv, pvc string) *v1.Pod {
@ -48,7 +47,7 @@ func createPodWithVolume(pod, pv, pvc string) *v1.Pod {
}
func TestSingleZone(t *testing.T) {
pvLister := fakelisters.PersistentVolumeLister{
pvLister := fakeframework.PersistentVolumeLister{
{
ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}},
},
@ -66,7 +65,7 @@ func TestSingleZone(t *testing.T) {
},
}
pvcLister := fakelisters.PersistentVolumeClaimLister{
pvcLister := fakeframework.PersistentVolumeClaimLister{
{
ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"},
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"},
@ -208,7 +207,7 @@ func TestSingleZone(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
node := &schedulertypes.NodeInfo{}
node := &framework.NodeInfo{}
node.SetNode(test.Node)
p := &VolumeZone{
pvLister,
@ -224,7 +223,7 @@ func TestSingleZone(t *testing.T) {
}
func TestMultiZone(t *testing.T) {
pvLister := fakelisters.PersistentVolumeLister{
pvLister := fakeframework.PersistentVolumeLister{
{
ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}},
},
@ -242,7 +241,7 @@ func TestMultiZone(t *testing.T) {
},
}
pvcLister := fakelisters.PersistentVolumeClaimLister{
pvcLister := fakeframework.PersistentVolumeClaimLister{
{
ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"},
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"},
@ -330,7 +329,7 @@ func TestMultiZone(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
node := &schedulertypes.NodeInfo{}
node := &framework.NodeInfo{}
node.SetNode(test.Node)
p := &VolumeZone{
pvLister,
@ -354,7 +353,7 @@ func TestWithBinding(t *testing.T) {
classImmediate = "Class_Immediate"
)
scLister := fakelisters.StorageClassLister{
scLister := fakeframework.StorageClassLister{
{
ObjectMeta: metav1.ObjectMeta{Name: classImmediate},
},
@ -364,13 +363,13 @@ func TestWithBinding(t *testing.T) {
},
}
pvLister := fakelisters.PersistentVolumeLister{
pvLister := fakeframework.PersistentVolumeLister{
{
ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}},
},
}
pvcLister := fakelisters.PersistentVolumeClaimLister{
pvcLister := fakeframework.PersistentVolumeClaimLister{
{
ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"},
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"},
@ -439,7 +438,7 @@ func TestWithBinding(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
node := &schedulertypes.NodeInfo{}
node := &framework.NodeInfo{}
node.SetNode(test.Node)
p := &VolumeZone{
pvLister,

View File

@ -6,26 +6,33 @@ go_library(
"cycle_state.go",
"framework.go",
"interface.go",
"listers.go",
"metrics_recorder.go",
"registry.go",
"types.go",
"waiting_pods_map.go",
],
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/controller/volume/scheduling:go_default_library",
"//pkg/features:go_default_library",
"//pkg/scheduler/apis/config:go_default_library",
"//pkg/scheduler/internal/parallelize:go_default_library",
"//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/metrics:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/json:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
"//staging/src/k8s.io/component-base/metrics:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/sigs.k8s.io/yaml:go_default_library",
@ -41,7 +48,10 @@ filegroup(
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
srcs = [
":package-srcs",
"//pkg/scheduler/framework/v1alpha1/fake:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
@ -53,13 +63,14 @@ go_test(
"framework_test.go",
"interface_test.go",
"registry_test.go",
"types_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/scheduler/apis/config:go_default_library",
"//pkg/scheduler/metrics:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",

View File

@ -3,11 +3,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["listers.go"],
importpath = "k8s.io/kubernetes/pkg/scheduler/listers/fake",
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1/fake",
visibility = ["//visibility:public"],
deps = [
"//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library",

View File

@ -27,11 +27,10 @@ import (
appslisters "k8s.io/client-go/listers/apps/v1"
corelisters "k8s.io/client-go/listers/core/v1"
storagelisters "k8s.io/client-go/listers/storage/v1"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
var _ schedulerlisters.PodLister = &PodLister{}
var _ framework.PodLister = &PodLister{}
// PodLister implements PodLister on an []v1.Pods for test purposes.
type PodLister []*v1.Pod
@ -47,7 +46,7 @@ func (f PodLister) List(s labels.Selector) (selected []*v1.Pod, err error) {
}
// FilteredList returns pods matching a pod filter and a label selector.
func (f PodLister) FilteredList(podFilter schedulerlisters.PodFilter, s labels.Selector) (selected []*v1.Pod, err error) {
func (f PodLister) FilteredList(podFilter framework.PodFilter, s labels.Selector) (selected []*v1.Pod, err error) {
for _, pod := range f {
if podFilter(pod) && s.Matches(labels.Set(pod.Labels)) {
selected = append(selected, pod)
@ -247,11 +246,11 @@ func (pvcs PersistentVolumeClaimLister) PersistentVolumeClaims(namespace string)
}
}
// NodeInfoLister declares a schedulertypes.NodeInfo type for testing.
type NodeInfoLister []*schedulertypes.NodeInfo
// NodeInfoLister declares a framework.NodeInfo type for testing.
type NodeInfoLister []*framework.NodeInfo
// Get returns a fake node object in the fake nodes.
func (nodes NodeInfoLister) Get(nodeName string) (*schedulertypes.NodeInfo, error) {
func (nodes NodeInfoLister) Get(nodeName string) (*framework.NodeInfo, error) {
for _, node := range nodes {
if node != nil && node.Node().Name == nodeName {
return node, nil
@ -261,21 +260,21 @@ func (nodes NodeInfoLister) Get(nodeName string) (*schedulertypes.NodeInfo, erro
}
// List lists all nodes.
func (nodes NodeInfoLister) List() ([]*schedulertypes.NodeInfo, error) {
func (nodes NodeInfoLister) List() ([]*framework.NodeInfo, error) {
return nodes, nil
}
// HavePodsWithAffinityList is supposed to list nodes with at least one pod with affinity. For the fake lister
// we just return everything.
func (nodes NodeInfoLister) HavePodsWithAffinityList() ([]*schedulertypes.NodeInfo, error) {
func (nodes NodeInfoLister) HavePodsWithAffinityList() ([]*framework.NodeInfo, error) {
return nodes, nil
}
// NewNodeInfoLister create a new fake NodeInfoLister from a slice of v1.Nodes.
func NewNodeInfoLister(nodes []*v1.Node) schedulerlisters.NodeInfoLister {
nodeInfoList := make([]*schedulertypes.NodeInfo, len(nodes))
func NewNodeInfoLister(nodes []*v1.Node) framework.NodeInfoLister {
nodeInfoList := make([]*framework.NodeInfo, len(nodes))
for _, node := range nodes {
nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo := framework.NewNodeInfo()
nodeInfo.SetNode(node)
nodeInfoList = append(nodeInfoList, nodeInfo)
}

View File

@ -32,9 +32,7 @@ import (
"k8s.io/kubernetes/pkg/controller/volume/scheduling"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/internal/parallelize"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
"k8s.io/kubernetes/pkg/scheduler/metrics"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
const (
@ -60,7 +58,7 @@ const (
// plugins.
type framework struct {
registry Registry
snapshotSharedLister schedulerlisters.SharedLister
snapshotSharedLister SharedLister
waitingPods *waitingPodsMap
pluginNameToWeightMap map[string]int
queueSortPlugins []QueueSortPlugin
@ -116,7 +114,7 @@ func (f *framework) getExtensionPoints(plugins *config.Plugins) []extensionPoint
type frameworkOptions struct {
clientSet clientset.Interface
informerFactory informers.SharedInformerFactory
snapshotSharedLister schedulerlisters.SharedLister
snapshotSharedLister SharedLister
metricsRecorder *metricsRecorder
volumeBinder scheduling.SchedulerVolumeBinder
runAllFilters bool
@ -140,7 +138,7 @@ func WithInformerFactory(informerFactory informers.SharedInformerFactory) Option
}
// WithSnapshotSharedLister sets the SharedLister of the snapshot.
func WithSnapshotSharedLister(snapshotSharedLister schedulerlisters.SharedLister) Option {
func WithSnapshotSharedLister(snapshotSharedLister SharedLister) Option {
return func(o *frameworkOptions) {
o.snapshotSharedLister = snapshotSharedLister
}
@ -352,7 +350,7 @@ func (f *framework) RunPreFilterExtensionAddPod(
state *CycleState,
podToSchedule *v1.Pod,
podToAdd *v1.Pod,
nodeInfo *schedulertypes.NodeInfo,
nodeInfo *NodeInfo,
) (status *Status) {
for _, pl := range f.preFilterPlugins {
if pl.PreFilterExtensions() == nil {
@ -370,7 +368,7 @@ func (f *framework) RunPreFilterExtensionAddPod(
return nil
}
func (f *framework) runPreFilterExtensionAddPod(ctx context.Context, pl PreFilterPlugin, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status {
func (f *framework) runPreFilterExtensionAddPod(ctx context.Context, pl PreFilterPlugin, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *NodeInfo) *Status {
if !state.ShouldRecordPluginMetrics() {
return pl.PreFilterExtensions().AddPod(ctx, state, podToSchedule, podToAdd, nodeInfo)
}
@ -388,7 +386,7 @@ func (f *framework) RunPreFilterExtensionRemovePod(
state *CycleState,
podToSchedule *v1.Pod,
podToRemove *v1.Pod,
nodeInfo *schedulertypes.NodeInfo,
nodeInfo *NodeInfo,
) (status *Status) {
for _, pl := range f.preFilterPlugins {
if pl.PreFilterExtensions() == nil {
@ -406,7 +404,7 @@ func (f *framework) RunPreFilterExtensionRemovePod(
return nil
}
func (f *framework) runPreFilterExtensionRemovePod(ctx context.Context, pl PreFilterPlugin, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status {
func (f *framework) runPreFilterExtensionRemovePod(ctx context.Context, pl PreFilterPlugin, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *NodeInfo) *Status {
if !state.ShouldRecordPluginMetrics() {
return pl.PreFilterExtensions().RemovePod(ctx, state, podToSchedule, podToAdd, nodeInfo)
}
@ -424,7 +422,7 @@ func (f *framework) RunFilterPlugins(
ctx context.Context,
state *CycleState,
pod *v1.Pod,
nodeInfo *schedulertypes.NodeInfo,
nodeInfo *NodeInfo,
) PluginToStatus {
var firstFailedStatus *Status
statuses := make(PluginToStatus)
@ -451,7 +449,7 @@ func (f *framework) RunFilterPlugins(
return statuses
}
func (f *framework) runFilterPlugin(ctx context.Context, pl FilterPlugin, state *CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status {
func (f *framework) runFilterPlugin(ctx context.Context, pl FilterPlugin, state *CycleState, pod *v1.Pod, nodeInfo *NodeInfo) *Status {
if !state.ShouldRecordPluginMetrics() {
return pl.Filter(ctx, state, pod, nodeInfo)
}
@ -817,7 +815,7 @@ func (f *framework) WaitOnPermit(ctx context.Context, pod *v1.Pod) (status *Stat
// snapshot. The snapshot is taken at the beginning of a scheduling cycle and remains
// unchanged until a pod finishes "Reserve". There is no guarantee that the information
// remains unchanged after "Reserve".
func (f *framework) SnapshotSharedLister() schedulerlisters.SharedLister {
func (f *framework) SnapshotSharedLister() SharedLister {
return f.snapshotSharedLister
}

View File

@ -32,7 +32,6 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/metrics"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
const (
@ -138,10 +137,10 @@ type TestPluginPreFilterExtension struct {
inj injectedResult
}
func (e *TestPluginPreFilterExtension) AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status {
func (e *TestPluginPreFilterExtension) AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *NodeInfo) *Status {
return NewStatus(Code(e.inj.PreFilterAddPodStatus), "injected status")
}
func (e *TestPluginPreFilterExtension) RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status {
func (e *TestPluginPreFilterExtension) RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *NodeInfo) *Status {
return NewStatus(Code(e.inj.PreFilterRemovePodStatus), "injected status")
}
@ -165,7 +164,7 @@ func (pl *TestPlugin) PreFilterExtensions() PreFilterExtensions {
return &TestPluginPreFilterExtension{inj: pl.inj}
}
func (pl *TestPlugin) Filter(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status {
func (pl *TestPlugin) Filter(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *NodeInfo) *Status {
return NewStatus(Code(pl.inj.FilterStatus), "injected filter status")
}
@ -228,13 +227,13 @@ func (pl *TestPreFilterWithExtensionsPlugin) PreFilter(ctx context.Context, stat
}
func (pl *TestPreFilterWithExtensionsPlugin) AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod,
podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status {
podToAdd *v1.Pod, nodeInfo *NodeInfo) *Status {
pl.AddCalled++
return nil
}
func (pl *TestPreFilterWithExtensionsPlugin) RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod,
podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status {
podToRemove *v1.Pod, nodeInfo *NodeInfo) *Status {
pl.RemoveCalled++
return nil
}

View File

@ -31,8 +31,6 @@ import (
clientset "k8s.io/client-go/kubernetes"
"k8s.io/kubernetes/pkg/controller/volume/scheduling"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
// NodeScoreList declares a list of nodes and their scores.
@ -256,10 +254,10 @@ type QueueSortPlugin interface {
type PreFilterExtensions interface {
// AddPod is called by the framework while trying to evaluate the impact
// of adding podToAdd to the node while scheduling podToSchedule.
AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status
AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *NodeInfo) *Status
// RemovePod is called by the framework while trying to evaluate the impact
// of removing podToRemove from the node while scheduling podToSchedule.
RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status
RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *NodeInfo) *Status
}
// PreFilterPlugin is an interface that must be implemented by "prefilter" plugins.
@ -299,7 +297,7 @@ type FilterPlugin interface {
// For example, during preemption, we may pass a copy of the original
// nodeInfo object that has some pods removed from it to evaluate the
// possibility of preempting them to schedule the target pod.
Filter(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status
Filter(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *NodeInfo) *Status
}
// PreScorePlugin is an interface for Pre-score plugin. Pre-score is an
@ -425,17 +423,17 @@ type Framework interface {
// preemption, we may pass a copy of the original nodeInfo object that has some pods
// removed from it to evaluate the possibility of preempting them to
// schedule the target pod.
RunFilterPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) PluginToStatus
RunFilterPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *NodeInfo) PluginToStatus
// RunPreFilterExtensionAddPod calls the AddPod interface for the set of configured
// PreFilter plugins. It returns directly if any of the plugins return any
// status other than Success.
RunPreFilterExtensionAddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status
RunPreFilterExtensionAddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *NodeInfo) *Status
// RunPreFilterExtensionRemovePod calls the RemovePod interface for the set of configured
// PreFilter plugins. It returns directly if any of the plugins return any
// status other than Success.
RunPreFilterExtensionRemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status
RunPreFilterExtensionRemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *NodeInfo) *Status
// RunPreScorePlugins runs the set of configured pre-score plugins. If any
// of these plugins returns any status other than "Success", the given pod is rejected.
@ -504,7 +502,7 @@ type FrameworkHandle interface {
// cycle (pre-bind/bind/post-bind/un-reserve plugin) should not use it,
// otherwise a concurrent read/write error might occur, they should use scheduler
// cache instead.
SnapshotSharedLister() schedulerlisters.SharedLister
SnapshotSharedLister() SharedLister
// IterateOverWaitingPods acquires a read lock and iterates over the WaitingPods map.
IterateOverWaitingPods(callback func(WaitingPod))

View File

@ -14,13 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package listers
package v1alpha1
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
v1listers "k8s.io/client-go/listers/core/v1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
// PodFilter is a function to filter a pod. If pod passed return true else return false.
@ -38,11 +37,11 @@ type PodLister interface {
// NodeInfoLister interface represents anything that can list/get NodeInfo objects from node name.
type NodeInfoLister interface {
// Returns the list of NodeInfos.
List() ([]*schedulertypes.NodeInfo, error)
List() ([]*NodeInfo, error)
// Returns the list of NodeInfos of nodes with pods with affinity terms.
HavePodsWithAffinityList() ([]*schedulertypes.NodeInfo, error)
HavePodsWithAffinityList() ([]*NodeInfo, error)
// Returns the NodeInfo of the given node name.
Get(nodeName string) (*schedulertypes.NodeInfo, error)
Get(nodeName string) (*NodeInfo, error)
}
// SharedLister groups scheduler-specific listers.

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package types
package v1alpha1
import (
"errors"
@ -689,3 +689,117 @@ func (n *NodeInfo) Filter(pod *v1.Pod) bool {
}
return false
}
// DefaultBindAllHostIP defines the default ip address used to bind to all host.
const DefaultBindAllHostIP = "0.0.0.0"
// ProtocolPort represents a protocol port pair, e.g. tcp:80.
type ProtocolPort struct {
Protocol string
Port int32
}
// NewProtocolPort creates a ProtocolPort instance.
func NewProtocolPort(protocol string, port int32) *ProtocolPort {
pp := &ProtocolPort{
Protocol: protocol,
Port: port,
}
if len(pp.Protocol) == 0 {
pp.Protocol = string(v1.ProtocolTCP)
}
return pp
}
// HostPortInfo stores mapping from ip to a set of ProtocolPort
type HostPortInfo map[string]map[ProtocolPort]struct{}
// Add adds (ip, protocol, port) to HostPortInfo
func (h HostPortInfo) Add(ip, protocol string, port int32) {
if port <= 0 {
return
}
h.sanitize(&ip, &protocol)
pp := NewProtocolPort(protocol, port)
if _, ok := h[ip]; !ok {
h[ip] = map[ProtocolPort]struct{}{
*pp: {},
}
return
}
h[ip][*pp] = struct{}{}
}
// Remove removes (ip, protocol, port) from HostPortInfo
func (h HostPortInfo) Remove(ip, protocol string, port int32) {
if port <= 0 {
return
}
h.sanitize(&ip, &protocol)
pp := NewProtocolPort(protocol, port)
if m, ok := h[ip]; ok {
delete(m, *pp)
if len(h[ip]) == 0 {
delete(h, ip)
}
}
}
// Len returns the total number of (ip, protocol, port) tuple in HostPortInfo
func (h HostPortInfo) Len() int {
length := 0
for _, m := range h {
length += len(m)
}
return length
}
// CheckConflict checks if the input (ip, protocol, port) conflicts with the existing
// ones in HostPortInfo.
func (h HostPortInfo) CheckConflict(ip, protocol string, port int32) bool {
if port <= 0 {
return false
}
h.sanitize(&ip, &protocol)
pp := NewProtocolPort(protocol, port)
// If ip is 0.0.0.0 check all IP's (protocol, port) pair
if ip == DefaultBindAllHostIP {
for _, m := range h {
if _, ok := m[*pp]; ok {
return true
}
}
return false
}
// If ip isn't 0.0.0.0, only check IP and 0.0.0.0's (protocol, port) pair
for _, key := range []string{DefaultBindAllHostIP, ip} {
if m, ok := h[key]; ok {
if _, ok2 := m[*pp]; ok2 {
return true
}
}
}
return false
}
// sanitize the parameters
func (h HostPortInfo) sanitize(ip, protocol *string) {
if len(*ip) == 0 {
*ip = DefaultBindAllHostIP
}
if len(*protocol) == 0 {
*protocol = string(v1.ProtocolTCP)
}
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package types
package v1alpha1
import (
"fmt"
@ -967,3 +967,213 @@ func fakeNodeInfo(pods ...*v1.Pod) *NodeInfo {
})
return ni
}
type hostPortInfoParam struct {
protocol, ip string
port int32
}
func TestHostPortInfo_AddRemove(t *testing.T) {
tests := []struct {
desc string
added []hostPortInfoParam
removed []hostPortInfoParam
length int
}{
{
desc: "normal add case",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 79},
{"UDP", "127.0.0.1", 80},
{"TCP", "127.0.0.1", 81},
{"TCP", "127.0.0.1", 82},
// this might not make sense in real case, but the struct doesn't forbid it.
{"TCP", "0.0.0.0", 79},
{"UDP", "0.0.0.0", 80},
{"TCP", "0.0.0.0", 81},
{"TCP", "0.0.0.0", 82},
{"TCP", "0.0.0.0", 0},
{"TCP", "0.0.0.0", -1},
},
length: 8,
},
{
desc: "empty ip and protocol add should work",
added: []hostPortInfoParam{
{"", "127.0.0.1", 79},
{"UDP", "127.0.0.1", 80},
{"", "127.0.0.1", 81},
{"", "127.0.0.1", 82},
{"", "", 79},
{"UDP", "", 80},
{"", "", 81},
{"", "", 82},
{"", "", 0},
{"", "", -1},
},
length: 8,
},
{
desc: "normal remove case",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 79},
{"UDP", "127.0.0.1", 80},
{"TCP", "127.0.0.1", 81},
{"TCP", "127.0.0.1", 82},
{"TCP", "0.0.0.0", 79},
{"UDP", "0.0.0.0", 80},
{"TCP", "0.0.0.0", 81},
{"TCP", "0.0.0.0", 82},
},
removed: []hostPortInfoParam{
{"TCP", "127.0.0.1", 79},
{"UDP", "127.0.0.1", 80},
{"TCP", "127.0.0.1", 81},
{"TCP", "127.0.0.1", 82},
{"TCP", "0.0.0.0", 79},
{"UDP", "0.0.0.0", 80},
{"TCP", "0.0.0.0", 81},
{"TCP", "0.0.0.0", 82},
},
length: 0,
},
{
desc: "empty ip and protocol remove should work",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 79},
{"UDP", "127.0.0.1", 80},
{"TCP", "127.0.0.1", 81},
{"TCP", "127.0.0.1", 82},
{"TCP", "0.0.0.0", 79},
{"UDP", "0.0.0.0", 80},
{"TCP", "0.0.0.0", 81},
{"TCP", "0.0.0.0", 82},
},
removed: []hostPortInfoParam{
{"", "127.0.0.1", 79},
{"", "127.0.0.1", 81},
{"", "127.0.0.1", 82},
{"UDP", "127.0.0.1", 80},
{"", "", 79},
{"", "", 81},
{"", "", 82},
{"UDP", "", 80},
},
length: 0,
},
}
for _, test := range tests {
hp := make(HostPortInfo)
for _, param := range test.added {
hp.Add(param.ip, param.protocol, param.port)
}
for _, param := range test.removed {
hp.Remove(param.ip, param.protocol, param.port)
}
if hp.Len() != test.length {
t.Errorf("%v failed: expect length %d; got %d", test.desc, test.length, hp.Len())
t.Error(hp)
}
}
}
func TestHostPortInfo_Check(t *testing.T) {
tests := []struct {
desc string
added []hostPortInfoParam
check hostPortInfoParam
expect bool
}{
{
desc: "empty check should check 0.0.0.0 and TCP",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 80},
},
check: hostPortInfoParam{"", "", 81},
expect: false,
},
{
desc: "empty check should check 0.0.0.0 and TCP (conflicted)",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 80},
},
check: hostPortInfoParam{"", "", 80},
expect: true,
},
{
desc: "empty port check should pass",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 80},
},
check: hostPortInfoParam{"", "", 0},
expect: false,
},
{
desc: "0.0.0.0 should check all registered IPs",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 80},
},
check: hostPortInfoParam{"TCP", "0.0.0.0", 80},
expect: true,
},
{
desc: "0.0.0.0 with different protocol should be allowed",
added: []hostPortInfoParam{
{"UDP", "127.0.0.1", 80},
},
check: hostPortInfoParam{"TCP", "0.0.0.0", 80},
expect: false,
},
{
desc: "0.0.0.0 with different port should be allowed",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 79},
{"TCP", "127.0.0.1", 81},
{"TCP", "127.0.0.1", 82},
},
check: hostPortInfoParam{"TCP", "0.0.0.0", 80},
expect: false,
},
{
desc: "normal ip should check all registered 0.0.0.0",
added: []hostPortInfoParam{
{"TCP", "0.0.0.0", 80},
},
check: hostPortInfoParam{"TCP", "127.0.0.1", 80},
expect: true,
},
{
desc: "normal ip with different port/protocol should be allowed (0.0.0.0)",
added: []hostPortInfoParam{
{"TCP", "0.0.0.0", 79},
{"UDP", "0.0.0.0", 80},
{"TCP", "0.0.0.0", 81},
{"TCP", "0.0.0.0", 82},
},
check: hostPortInfoParam{"TCP", "127.0.0.1", 80},
expect: false,
},
{
desc: "normal ip with different port/protocol should be allowed",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 79},
{"UDP", "127.0.0.1", 80},
{"TCP", "127.0.0.1", 81},
{"TCP", "127.0.0.1", 82},
},
check: hostPortInfoParam{"TCP", "127.0.0.1", 80},
expect: false,
},
}
for _, test := range tests {
hp := make(HostPortInfo)
for _, param := range test.added {
hp.Add(param.ip, param.protocol, param.port)
}
if hp.CheckConflict(test.check.ip, test.check.protocol, test.check.port) != test.expect {
t.Errorf("%v failed, expected %t; got %t", test.desc, test.expect, !test.expect)
}
}
}

View File

@ -12,9 +12,8 @@ go_library(
visibility = ["//pkg/scheduler:__subpackages__"],
deps = [
"//pkg/features:go_default_library",
"//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/metrics:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/util/node:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
@ -35,7 +34,7 @@ go_test(
embed = [":go_default_library"],
deps = [
"//pkg/features:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",

View File

@ -28,9 +28,8 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog"
"k8s.io/kubernetes/pkg/features"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/metrics"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
var (
@ -51,7 +50,7 @@ func New(ttl time.Duration, stop <-chan struct{}) Cache {
// linked list. When a NodeInfo is updated, it goes to the head of the list.
// The items closer to the head are the most recently updated items.
type nodeInfoListItem struct {
info *schedulertypes.NodeInfo
info *framework.NodeInfo
next *nodeInfoListItem
prev *nodeInfoListItem
}
@ -93,8 +92,8 @@ type imageState struct {
}
// createImageStateSummary returns a summarizing snapshot of the given image's state.
func (cache *schedulerCache) createImageStateSummary(state *imageState) *schedulertypes.ImageStateSummary {
return &schedulertypes.ImageStateSummary{
func (cache *schedulerCache) createImageStateSummary(state *imageState) *framework.ImageStateSummary {
return &framework.ImageStateSummary{
Size: state.size,
NumNodes: len(state.nodes),
}
@ -115,7 +114,7 @@ func newSchedulerCache(ttl, period time.Duration, stop <-chan struct{}) *schedul
}
// newNodeInfoListItem initializes a new nodeInfoListItem.
func newNodeInfoListItem(ni *schedulertypes.NodeInfo) *nodeInfoListItem {
func newNodeInfoListItem(ni *framework.NodeInfo) *nodeInfoListItem {
return &nodeInfoListItem{
info: ni,
}
@ -180,7 +179,7 @@ func (cache *schedulerCache) Dump() *Dump {
cache.mu.RLock()
defer cache.mu.RUnlock()
nodes := make(map[string]*schedulertypes.NodeInfo, len(cache.nodes))
nodes := make(map[string]*framework.NodeInfo, len(cache.nodes))
for k, v := range cache.nodes {
nodes[k] = v.info.Clone()
}
@ -231,7 +230,7 @@ func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error {
existing, ok := nodeSnapshot.nodeInfoMap[np.Name]
if !ok {
updateAllLists = true
existing = &schedulertypes.NodeInfo{}
existing = &framework.NodeInfo{}
nodeSnapshot.nodeInfoMap[np.Name] = existing
}
clone := node.info.Clone()
@ -277,10 +276,10 @@ func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error {
}
func (cache *schedulerCache) updateNodeInfoSnapshotList(snapshot *Snapshot, updateAll bool) {
snapshot.havePodsWithAffinityNodeInfoList = make([]*schedulertypes.NodeInfo, 0, cache.nodeTree.numNodes)
snapshot.havePodsWithAffinityNodeInfoList = make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes)
if updateAll {
// Take a snapshot of the nodes order in the tree
snapshot.nodeInfoList = make([]*schedulertypes.NodeInfo, 0, cache.nodeTree.numNodes)
snapshot.nodeInfoList = make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes)
for i := 0; i < cache.nodeTree.numNodes; i++ {
nodeName := cache.nodeTree.next()
if n := snapshot.nodeInfoMap[nodeName]; n != nil {
@ -320,7 +319,7 @@ func (cache *schedulerCache) List(selector labels.Selector) ([]*v1.Pod, error) {
return cache.FilteredList(alwaysTrue, selector)
}
func (cache *schedulerCache) FilteredList(podFilter schedulerlisters.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
func (cache *schedulerCache) FilteredList(podFilter framework.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
cache.mu.RLock()
defer cache.mu.RUnlock()
// podFilter is expected to return true for most or all of the pods. We
@ -342,7 +341,7 @@ func (cache *schedulerCache) FilteredList(podFilter schedulerlisters.PodFilter,
}
func (cache *schedulerCache) AssumePod(pod *v1.Pod) error {
key, err := schedulertypes.GetPodKey(pod)
key, err := framework.GetPodKey(pod)
if err != nil {
return err
}
@ -368,7 +367,7 @@ func (cache *schedulerCache) FinishBinding(pod *v1.Pod) error {
// finishBinding exists to make tests determinitistic by injecting now as an argument
func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error {
key, err := schedulertypes.GetPodKey(pod)
key, err := framework.GetPodKey(pod)
if err != nil {
return err
}
@ -387,7 +386,7 @@ func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error {
}
func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error {
key, err := schedulertypes.GetPodKey(pod)
key, err := framework.GetPodKey(pod)
if err != nil {
return err
}
@ -419,7 +418,7 @@ func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error {
func (cache *schedulerCache) addPod(pod *v1.Pod) {
n, ok := cache.nodes[pod.Spec.NodeName]
if !ok {
n = newNodeInfoListItem(schedulertypes.NewNodeInfo())
n = newNodeInfoListItem(framework.NewNodeInfo())
cache.nodes[pod.Spec.NodeName] = n
}
n.info.AddPod(pod)
@ -452,7 +451,7 @@ func (cache *schedulerCache) removePod(pod *v1.Pod) error {
}
func (cache *schedulerCache) AddPod(pod *v1.Pod) error {
key, err := schedulertypes.GetPodKey(pod)
key, err := framework.GetPodKey(pod)
if err != nil {
return err
}
@ -489,7 +488,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error {
}
func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error {
key, err := schedulertypes.GetPodKey(oldPod)
key, err := framework.GetPodKey(oldPod)
if err != nil {
return err
}
@ -517,7 +516,7 @@ func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error {
}
func (cache *schedulerCache) RemovePod(pod *v1.Pod) error {
key, err := schedulertypes.GetPodKey(pod)
key, err := framework.GetPodKey(pod)
if err != nil {
return err
}
@ -546,7 +545,7 @@ func (cache *schedulerCache) RemovePod(pod *v1.Pod) error {
}
func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) {
key, err := schedulertypes.GetPodKey(pod)
key, err := framework.GetPodKey(pod)
if err != nil {
return false, err
}
@ -564,7 +563,7 @@ func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) {
// GetPod might return a pod for which its node has already been deleted from
// the main cache. This is useful to properly process pod update events.
func (cache *schedulerCache) GetPod(pod *v1.Pod) (*v1.Pod, error) {
key, err := schedulertypes.GetPodKey(pod)
key, err := framework.GetPodKey(pod)
if err != nil {
return nil, err
}
@ -586,7 +585,7 @@ func (cache *schedulerCache) AddNode(node *v1.Node) error {
n, ok := cache.nodes[node.Name]
if !ok {
n = newNodeInfoListItem(schedulertypes.NewNodeInfo())
n = newNodeInfoListItem(framework.NewNodeInfo())
cache.nodes[node.Name] = n
} else {
cache.removeNodeImageStates(n.info.Node())
@ -604,7 +603,7 @@ func (cache *schedulerCache) UpdateNode(oldNode, newNode *v1.Node) error {
n, ok := cache.nodes[newNode.Name]
if !ok {
n = newNodeInfoListItem(schedulertypes.NewNodeInfo())
n = newNodeInfoListItem(framework.NewNodeInfo())
cache.nodes[newNode.Name] = n
cache.nodeTree.addNode(newNode)
} else {
@ -641,8 +640,8 @@ func (cache *schedulerCache) RemoveNode(node *v1.Node) error {
// addNodeImageStates adds states of the images on given node to the given nodeInfo and update the imageStates in
// scheduler cache. This function assumes the lock to scheduler cache has been acquired.
func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *schedulertypes.NodeInfo) {
newSum := make(map[string]*schedulertypes.ImageStateSummary)
func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *framework.NodeInfo) {
newSum := make(map[string]*framework.ImageStateSummary)
for _, image := range node.Status.Images {
for _, name := range image.Names {

View File

@ -31,11 +31,11 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)
func deepEqualWithoutGeneration(actual *nodeInfoListItem, expected *schedulertypes.NodeInfo) error {
func deepEqualWithoutGeneration(actual *nodeInfoListItem, expected *framework.NodeInfo) error {
if (actual == nil) != (expected == nil) {
return errors.New("one of the actual or expected is nil and the other is not")
}
@ -70,21 +70,21 @@ func (b *hostPortInfoBuilder) add(protocol, ip string, port int32) *hostPortInfo
return b
}
func (b *hostPortInfoBuilder) build() schedulertypes.HostPortInfo {
res := make(schedulertypes.HostPortInfo)
func (b *hostPortInfoBuilder) build() framework.HostPortInfo {
res := make(framework.HostPortInfo)
for _, param := range b.inputs {
res.Add(param.ip, param.protocol, param.port)
}
return res
}
func newNodeInfo(requestedResource *schedulertypes.Resource,
nonzeroRequest *schedulertypes.Resource,
func newNodeInfo(requestedResource *framework.Resource,
nonzeroRequest *framework.Resource,
pods []*v1.Pod,
usedPorts schedulertypes.HostPortInfo,
imageStates map[string]*schedulertypes.ImageStateSummary,
) *schedulertypes.NodeInfo {
nodeInfo := schedulertypes.NewNodeInfo(pods...)
usedPorts framework.HostPortInfo,
imageStates map[string]*framework.ImageStateSummary,
) *framework.NodeInfo {
nodeInfo := framework.NewNodeInfo(pods...)
nodeInfo.SetRequestedResource(requestedResource)
nodeInfo.SetNonZeroRequest(nonzeroRequest)
nodeInfo.SetUsedPorts(usedPorts)
@ -112,98 +112,98 @@ func TestAssumePodScheduled(t *testing.T) {
tests := []struct {
pods []*v1.Pod
wNodeInfo *schedulertypes.NodeInfo
wNodeInfo *framework.NodeInfo
}{{
pods: []*v1.Pod{testPods[0]},
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
}, {
pods: []*v1.Pod{testPods[1], testPods[2]},
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 300,
Memory: 1524,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 300,
Memory: 1524,
},
[]*v1.Pod{testPods[1], testPods[2]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
}, { // test non-zero request
pods: []*v1.Pod{testPods[3]},
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 0,
Memory: 0,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: schedutil.DefaultMilliCPURequest,
Memory: schedutil.DefaultMemoryRequest,
},
[]*v1.Pod{testPods[3]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
}, {
pods: []*v1.Pod{testPods[4]},
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 3},
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{testPods[4]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
}, {
pods: []*v1.Pod{testPods[4], testPods[5]},
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 300,
Memory: 1524,
ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 8},
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 300,
Memory: 1524,
},
[]*v1.Pod{testPods[4], testPods[5]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
}, {
pods: []*v1.Pod{testPods[6]},
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{testPods[6]},
newHostPortInfoBuilder().build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
},
}
@ -263,13 +263,13 @@ func TestExpirePod(t *testing.T) {
pods []*testExpirePodStruct
cleanupTime time.Time
wNodeInfo *schedulertypes.NodeInfo
wNodeInfo *framework.NodeInfo
}{{ // assumed pod would expires
pods: []*testExpirePodStruct{
{pod: testPods[0], finishBind: true, assumedTime: now},
},
cleanupTime: now.Add(2 * ttl),
wNodeInfo: schedulertypes.NewNodeInfo(),
wNodeInfo: framework.NewNodeInfo(),
}, { // first one would expire, second and third would not.
pods: []*testExpirePodStruct{
{pod: testPods[0], finishBind: true, assumedTime: now},
@ -278,18 +278,18 @@ func TestExpirePod(t *testing.T) {
},
cleanupTime: now.Add(2 * ttl),
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 400,
Memory: 2048,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 400,
Memory: 2048,
},
// Order gets altered when removing pods.
[]*v1.Pod{testPods[2], testPods[1]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
}}
@ -336,22 +336,22 @@ func TestAddPodWillConfirm(t *testing.T) {
podsToAssume []*v1.Pod
podsToAdd []*v1.Pod
wNodeInfo *schedulertypes.NodeInfo
wNodeInfo *framework.NodeInfo
}{{ // two pod were assumed at same time. But first one is called Add() and gets confirmed.
podsToAssume: []*v1.Pod{testPods[0], testPods[1]},
podsToAdd: []*v1.Pod{testPods[0]},
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
}}
@ -438,25 +438,25 @@ func TestAddPodWillReplaceAssumed(t *testing.T) {
podsToAdd []*v1.Pod
podsToUpdate [][]*v1.Pod
wNodeInfo map[string]*schedulertypes.NodeInfo
wNodeInfo map[string]*framework.NodeInfo
}{{
podsToAssume: []*v1.Pod{assumedPod.DeepCopy()},
podsToAdd: []*v1.Pod{addedPod.DeepCopy()},
podsToUpdate: [][]*v1.Pod{{addedPod.DeepCopy(), updatedPod.DeepCopy()}},
wNodeInfo: map[string]*schedulertypes.NodeInfo{
wNodeInfo: map[string]*framework.NodeInfo{
"assumed-node": nil,
"actual-node": newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 200,
Memory: 500,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 200,
Memory: 500,
},
[]*v1.Pod{updatedPod.DeepCopy()},
newHostPortInfoBuilder().add("TCP", "0.0.0.0", 90).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
},
}}
@ -499,21 +499,21 @@ func TestAddPodAfterExpiration(t *testing.T) {
tests := []struct {
pod *v1.Pod
wNodeInfo *schedulertypes.NodeInfo
wNodeInfo *framework.NodeInfo
}{{
pod: basePod,
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{basePod},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
}}
@ -555,34 +555,34 @@ func TestUpdatePod(t *testing.T) {
podsToAdd []*v1.Pod
podsToUpdate []*v1.Pod
wNodeInfo []*schedulertypes.NodeInfo
wNodeInfo []*framework.NodeInfo
}{{ // add a pod and then update it twice
podsToAdd: []*v1.Pod{testPods[0]},
podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]},
wNodeInfo: []*schedulertypes.NodeInfo{newNodeInfo(
&schedulertypes.Resource{
wNodeInfo: []*framework.NodeInfo{newNodeInfo(
&framework.Resource{
MilliCPU: 200,
Memory: 1024,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 200,
Memory: 1024,
},
[]*v1.Pod{testPods[1]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
), newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
)},
}}
@ -686,35 +686,35 @@ func TestExpireAddUpdatePod(t *testing.T) {
podsToAdd []*v1.Pod
podsToUpdate []*v1.Pod
wNodeInfo []*schedulertypes.NodeInfo
wNodeInfo []*framework.NodeInfo
}{{ // Pod is assumed, expired, and added. Then it would be updated twice.
podsToAssume: []*v1.Pod{testPods[0]},
podsToAdd: []*v1.Pod{testPods[0]},
podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]},
wNodeInfo: []*schedulertypes.NodeInfo{newNodeInfo(
&schedulertypes.Resource{
wNodeInfo: []*framework.NodeInfo{newNodeInfo(
&framework.Resource{
MilliCPU: 200,
Memory: 1024,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 200,
Memory: 1024,
},
[]*v1.Pod{testPods[1]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
), newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
)},
}}
@ -780,21 +780,21 @@ func TestEphemeralStorageResource(t *testing.T) {
podE := makePodWithEphemeralStorage(nodeName, "500")
tests := []struct {
pod *v1.Pod
wNodeInfo *schedulertypes.NodeInfo
wNodeInfo *framework.NodeInfo
}{
{
pod: podE,
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
EphemeralStorage: 500,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: schedutil.DefaultMilliCPURequest,
Memory: schedutil.DefaultMemoryRequest,
},
[]*v1.Pod{podE},
schedulertypes.HostPortInfo{},
make(map[string]*schedulertypes.ImageStateSummary),
framework.HostPortInfo{},
make(map[string]*framework.ImageStateSummary),
),
},
}
@ -827,7 +827,7 @@ func TestRemovePod(t *testing.T) {
tests := []struct {
nodes []*v1.Node
pod *v1.Pod
wNodeInfo *schedulertypes.NodeInfo
wNodeInfo *framework.NodeInfo
}{{
nodes: []*v1.Node{
{
@ -839,17 +839,17 @@ func TestRemovePod(t *testing.T) {
},
pod: basePod,
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{basePod},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
}}
@ -930,7 +930,7 @@ func TestForgetPod(t *testing.T) {
// getResourceRequest returns the resource request of all containers in Pods;
// excluding initContainers.
func getResourceRequest(pod *v1.Pod) v1.ResourceList {
result := &schedulertypes.Resource{}
result := &framework.Resource{}
for _, container := range pod.Spec.Containers {
result.Add(container.Resources.Requests)
}
@ -939,13 +939,13 @@ func getResourceRequest(pod *v1.Pod) v1.ResourceList {
}
// buildNodeInfo creates a NodeInfo by simulating node operations in cache.
func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *schedulertypes.NodeInfo {
expected := schedulertypes.NewNodeInfo()
func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *framework.NodeInfo {
expected := framework.NewNodeInfo()
// Simulate SetNode.
expected.SetNode(node)
expected.SetAllocatableResource(schedulertypes.NewResource(node.Status.Allocatable))
expected.SetAllocatableResource(framework.NewResource(node.Status.Allocatable))
expected.SetTaints(node.Spec.Taints)
expected.SetGeneration(expected.GetGeneration() + 1)
@ -1533,8 +1533,8 @@ func compareCacheWithNodeInfoSnapshot(cache *schedulerCache, snapshot *Snapshot)
return fmt.Errorf("unexpected number of nodes in NodeInfoList. Expected: %v, got: %v", len(cache.nodes), len(snapshot.nodeInfoList))
}
expectedNodeInfoList := make([]*schedulertypes.NodeInfo, 0, cache.nodeTree.numNodes)
expectedHavePodsWithAffinityNodeInfoList := make([]*schedulertypes.NodeInfo, 0, cache.nodeTree.numNodes)
expectedNodeInfoList := make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes)
expectedHavePodsWithAffinityNodeInfoList := make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes)
for i := 0; i < cache.nodeTree.numNodes; i++ {
nodeName := cache.nodeTree.next()
if n := snapshot.nodeInfoMap[nodeName]; n != nil {

View File

@ -12,9 +12,9 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/debugger",
visibility = ["//pkg/scheduler:__subpackages__"],
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
@ -27,7 +27,7 @@ go_test(
srcs = ["comparer_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
],

View File

@ -24,9 +24,9 @@ import (
"k8s.io/apimachinery/pkg/labels"
corelisters "k8s.io/client-go/listers/core/v1"
"k8s.io/klog"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
// CacheComparer is an implementation of the Scheduler's cache comparer.
@ -68,7 +68,7 @@ func (c *CacheComparer) Compare() error {
}
// CompareNodes compares actual nodes with cached nodes.
func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*schedulertypes.NodeInfo) (missed, redundant []string) {
func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*framework.NodeInfo) (missed, redundant []string) {
actual := []string{}
for _, node := range nodes {
actual = append(actual, node.Name)
@ -83,7 +83,7 @@ func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*sch
}
// ComparePods compares actual pods with cached pods.
func (c *CacheComparer) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*schedulertypes.NodeInfo) (missed, redundant []string) {
func (c *CacheComparer) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*framework.NodeInfo) (missed, redundant []string) {
actual := []string{}
for _, pod := range pods {
actual = append(actual, string(pod.UID))

View File

@ -22,7 +22,7 @@ import (
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
func TestCompareNodes(t *testing.T) {
@ -72,9 +72,9 @@ func testCompareNodes(actual, cached, missing, redundant []string, t *testing.T)
nodes = append(nodes, node)
}
nodeInfo := make(map[string]*schedulertypes.NodeInfo)
nodeInfo := make(map[string]*framework.NodeInfo)
for _, nodeName := range cached {
nodeInfo[nodeName] = &schedulertypes.NodeInfo{}
nodeInfo[nodeName] = &framework.NodeInfo{}
}
m, r := compare.CompareNodes(nodes, nodeInfo)
@ -170,14 +170,14 @@ func testComparePods(actual, cached, queued, missing, redundant []string, t *tes
queuedPods = append(queuedPods, pod)
}
nodeInfo := make(map[string]*schedulertypes.NodeInfo)
nodeInfo := make(map[string]*framework.NodeInfo)
for _, uid := range cached {
pod := &v1.Pod{}
pod.UID = types.UID(uid)
pod.Namespace = "ns"
pod.Name = uid
nodeInfo[uid] = schedulertypes.NewNodeInfo(pod)
nodeInfo[uid] = framework.NewNodeInfo(pod)
}
m, r := compare.ComparePods(pods, queuedPods, nodeInfo)

View File

@ -23,9 +23,9 @@ import (
"k8s.io/klog"
"k8s.io/api/core/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
"k8s.io/kubernetes/pkg/scheduler/internal/queue"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
// CacheDumper writes some information from the scheduler cache and the scheduling queue to the
@ -61,7 +61,7 @@ func (d *CacheDumper) dumpSchedulingQueue() {
}
// printNodeInfo writes parts of NodeInfo to a string.
func (d *CacheDumper) printNodeInfo(n *schedulertypes.NodeInfo) string {
func (d *CacheDumper) printNodeInfo(n *framework.NodeInfo) string {
var nodeData strings.Builder
nodeData.WriteString(fmt.Sprintf("\nNode name: %+v\nRequested Resources: %+v\nAllocatable Resources:%+v\nScheduled Pods(number: %v):\n",
n.Node().Name, n.RequestedResource(), n.AllocatableResource(), len(n.Pods())))

View File

@ -6,8 +6,8 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake",
visibility = ["//pkg/scheduler:__subpackages__"],
deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/listers:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
],

View File

@ -19,8 +19,8 @@ package fake
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
)
// Cache is used for testing
@ -83,7 +83,7 @@ func (c *Cache) UpdateSnapshot(snapshot *internalcache.Snapshot) error {
func (c *Cache) List(s labels.Selector) ([]*v1.Pod, error) { return nil, nil }
// FilteredList is a fake method for testing.
func (c *Cache) FilteredList(filter schedulerlisters.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
func (c *Cache) FilteredList(filter framework.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
return nil, nil
}

View File

@ -18,8 +18,7 @@ package cache
import (
v1 "k8s.io/api/core/v1"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
// Cache collects pods' information and provides node-level aggregated information.
@ -57,7 +56,7 @@ import (
// - Both "Expired" and "Deleted" are valid end states. In case of some problems, e.g. network issue,
// a pod might have changed its state (e.g. added and deleted) without delivering notification to the cache.
type Cache interface {
schedulerlisters.PodLister
framework.PodLister
// AssumePod assumes a pod scheduled and aggregates the pod's information into its node.
// The implementation also decides the policy to expire pod before being confirmed (receiving Add event).
@ -108,5 +107,5 @@ type Cache interface {
// Dump is a dump of the cache state.
type Dump struct {
AssumedPods map[string]bool
Nodes map[string]*schedulertypes.NodeInfo
Nodes map[string]*framework.NodeInfo
}

View File

@ -22,36 +22,35 @@ import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
// Snapshot is a snapshot of cache NodeInfo and NodeTree order. The scheduler takes a
// snapshot at the beginning of each scheduling cycle and uses it for its operations in that cycle.
type Snapshot struct {
// nodeInfoMap a map of node name to a snapshot of its NodeInfo.
nodeInfoMap map[string]*schedulertypes.NodeInfo
nodeInfoMap map[string]*framework.NodeInfo
// nodeInfoList is the list of nodes as ordered in the cache's nodeTree.
nodeInfoList []*schedulertypes.NodeInfo
nodeInfoList []*framework.NodeInfo
// havePodsWithAffinityNodeInfoList is the list of nodes with at least one pod declaring affinity terms.
havePodsWithAffinityNodeInfoList []*schedulertypes.NodeInfo
havePodsWithAffinityNodeInfoList []*framework.NodeInfo
generation int64
}
var _ schedulerlisters.SharedLister = &Snapshot{}
var _ framework.SharedLister = &Snapshot{}
// NewEmptySnapshot initializes a Snapshot struct and returns it.
func NewEmptySnapshot() *Snapshot {
return &Snapshot{
nodeInfoMap: make(map[string]*schedulertypes.NodeInfo),
nodeInfoMap: make(map[string]*framework.NodeInfo),
}
}
// NewSnapshot initializes a Snapshot struct and returns it.
func NewSnapshot(pods []*v1.Pod, nodes []*v1.Node) *Snapshot {
nodeInfoMap := createNodeInfoMap(pods, nodes)
nodeInfoList := make([]*schedulertypes.NodeInfo, 0, len(nodeInfoMap))
havePodsWithAffinityNodeInfoList := make([]*schedulertypes.NodeInfo, 0, len(nodeInfoMap))
nodeInfoList := make([]*framework.NodeInfo, 0, len(nodeInfoMap))
havePodsWithAffinityNodeInfoList := make([]*framework.NodeInfo, 0, len(nodeInfoMap))
for _, v := range nodeInfoMap {
nodeInfoList = append(nodeInfoList, v)
if len(v.PodsWithAffinity()) > 0 {
@ -70,12 +69,12 @@ func NewSnapshot(pods []*v1.Pod, nodes []*v1.Node) *Snapshot {
// createNodeInfoMap obtains a list of pods and pivots that list into a map
// where the keys are node names and the values are the aggregated information
// for that node.
func createNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*schedulertypes.NodeInfo {
nodeNameToInfo := make(map[string]*schedulertypes.NodeInfo)
func createNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*framework.NodeInfo {
nodeNameToInfo := make(map[string]*framework.NodeInfo)
for _, pod := range pods {
nodeName := pod.Spec.NodeName
if _, ok := nodeNameToInfo[nodeName]; !ok {
nodeNameToInfo[nodeName] = schedulertypes.NewNodeInfo()
nodeNameToInfo[nodeName] = framework.NewNodeInfo()
}
nodeNameToInfo[nodeName].AddPod(pod)
}
@ -83,7 +82,7 @@ func createNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*schedulerty
for _, node := range nodes {
if _, ok := nodeNameToInfo[node.Name]; !ok {
nodeNameToInfo[node.Name] = schedulertypes.NewNodeInfo()
nodeNameToInfo[node.Name] = framework.NewNodeInfo()
}
nodeInfo := nodeNameToInfo[node.Name]
nodeInfo.SetNode(node)
@ -93,12 +92,12 @@ func createNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*schedulerty
}
// getNodeImageStates returns the given node's image states based on the given imageExistence map.
func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.String) map[string]*schedulertypes.ImageStateSummary {
imageStates := make(map[string]*schedulertypes.ImageStateSummary)
func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.String) map[string]*framework.ImageStateSummary {
imageStates := make(map[string]*framework.ImageStateSummary)
for _, image := range node.Status.Images {
for _, name := range image.Names {
imageStates[name] = &schedulertypes.ImageStateSummary{
imageStates[name] = &framework.ImageStateSummary{
Size: image.SizeBytes,
NumNodes: len(imageExistenceMap[name]),
}
@ -125,12 +124,12 @@ func createImageExistenceMap(nodes []*v1.Node) map[string]sets.String {
}
// Pods returns a PodLister
func (s *Snapshot) Pods() schedulerlisters.PodLister {
func (s *Snapshot) Pods() framework.PodLister {
return podLister(s.nodeInfoList)
}
// NodeInfos returns a NodeInfoLister.
func (s *Snapshot) NodeInfos() schedulerlisters.NodeInfoLister {
func (s *Snapshot) NodeInfos() framework.NodeInfoLister {
return s
}
@ -139,7 +138,7 @@ func (s *Snapshot) NumNodes() int {
return len(s.nodeInfoList)
}
type podLister []*schedulertypes.NodeInfo
type podLister []*framework.NodeInfo
// List returns the list of pods in the snapshot.
func (p podLister) List(selector labels.Selector) ([]*v1.Pod, error) {
@ -148,7 +147,7 @@ func (p podLister) List(selector labels.Selector) ([]*v1.Pod, error) {
}
// FilteredList returns a filtered list of pods in the snapshot.
func (p podLister) FilteredList(filter schedulerlisters.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
func (p podLister) FilteredList(filter framework.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
// podFilter is expected to return true for most or all of the pods. We
// can avoid expensive array growth without wasting too much memory by
// pre-allocating capacity.
@ -168,17 +167,17 @@ func (p podLister) FilteredList(filter schedulerlisters.PodFilter, selector labe
}
// List returns the list of nodes in the snapshot.
func (s *Snapshot) List() ([]*schedulertypes.NodeInfo, error) {
func (s *Snapshot) List() ([]*framework.NodeInfo, error) {
return s.nodeInfoList, nil
}
// HavePodsWithAffinityList returns the list of nodes with at least one pods with inter-pod affinity
func (s *Snapshot) HavePodsWithAffinityList() ([]*schedulertypes.NodeInfo, error) {
func (s *Snapshot) HavePodsWithAffinityList() ([]*framework.NodeInfo, error) {
return s.havePodsWithAffinityNodeInfoList, nil
}
// Get returns the NodeInfo of the given node name.
func (s *Snapshot) Get(nodeName string) (*schedulertypes.NodeInfo, error) {
func (s *Snapshot) Get(nodeName string) (*framework.NodeInfo, error) {
if v, ok := s.nodeInfoMap[nodeName]; ok && v.Node() != nil {
return v, nil
}

View File

@ -23,7 +23,7 @@ import (
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
const mb int64 = 1024 * 1024
@ -32,7 +32,7 @@ func TestGetNodeImageStates(t *testing.T) {
tests := []struct {
node *v1.Node
imageExistenceMap map[string]sets.String
expected map[string]*schedulertypes.ImageStateSummary
expected map[string]*framework.ImageStateSummary
}{
{
node: &v1.Node{
@ -58,7 +58,7 @@ func TestGetNodeImageStates(t *testing.T) {
"gcr.io/10:v1": sets.NewString("node-0", "node-1"),
"gcr.io/200:v1": sets.NewString("node-0"),
},
expected: map[string]*schedulertypes.ImageStateSummary{
expected: map[string]*framework.ImageStateSummary{
"gcr.io/10:v1": {
Size: int64(10 * mb),
NumNodes: 2,
@ -78,7 +78,7 @@ func TestGetNodeImageStates(t *testing.T) {
"gcr.io/10:v1": sets.NewString("node-1"),
"gcr.io/200:v1": sets.NewString(),
},
expected: map[string]*schedulertypes.ImageStateSummary{},
expected: map[string]*framework.ImageStateSummary{},
},
}

View File

@ -1,31 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["listers.go"],
importpath = "k8s.io/kubernetes/pkg/scheduler/listers",
visibility = ["//visibility:public"],
deps = [
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/scheduler/listers/fake:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -6,7 +6,7 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/scheduler/nodeinfo",
visibility = ["//visibility:public"],
deps = [
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
],
)

View File

@ -18,26 +18,26 @@ package nodeinfo
import (
v1 "k8s.io/api/core/v1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
)
// TODO(#89528): This file defines temporary aliases of types used by kubelet.
// Those will be removed and the underlying types defined in scheduler/types will be used directly.
// NodeInfo is node level aggregated information.
type NodeInfo = schedulertypes.NodeInfo
type NodeInfo = framework.NodeInfo
// Resource is a collection of compute resource.
type Resource = schedulertypes.Resource
type Resource = framework.Resource
// NewResource creates a Resource from ResourceList
func NewResource(rl v1.ResourceList) *Resource {
return schedulertypes.NewResource(rl)
return framework.NewResource(rl)
}
// NewNodeInfo returns a ready to use empty NodeInfo object.
// If any pods are given in arguments, their information will be aggregated in
// the returned object.
func NewNodeInfo(pods ...*v1.Pod) *NodeInfo {
return schedulertypes.NewNodeInfo(pods...)
return framework.NewNodeInfo(pods...)
}

View File

@ -60,7 +60,6 @@ import (
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
"k8s.io/kubernetes/pkg/scheduler/profile"
st "k8s.io/kubernetes/pkg/scheduler/testing"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
)
type fakePodConditionUpdater struct{}
@ -401,7 +400,7 @@ func (s *fakeNodeSelector) Name() string {
return "FakeNodeSelector"
}
func (s *fakeNodeSelector) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (s *fakeNodeSelector) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
if nodeInfo.Node().Name != s.NodeName {
return framework.NewStatus(framework.UnschedulableAndUnresolvable)
}

View File

@ -1,49 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"host_ports.go",
"node_info.go",
],
importpath = "k8s.io/kubernetes/pkg/scheduler/types",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"host_ports_test.go",
"node_info_test.go",
],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,135 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
import (
"k8s.io/api/core/v1"
)
// DefaultBindAllHostIP defines the default ip address used to bind to all host.
const DefaultBindAllHostIP = "0.0.0.0"
// ProtocolPort represents a protocol port pair, e.g. tcp:80.
type ProtocolPort struct {
Protocol string
Port int32
}
// NewProtocolPort creates a ProtocolPort instance.
func NewProtocolPort(protocol string, port int32) *ProtocolPort {
pp := &ProtocolPort{
Protocol: protocol,
Port: port,
}
if len(pp.Protocol) == 0 {
pp.Protocol = string(v1.ProtocolTCP)
}
return pp
}
// HostPortInfo stores mapping from ip to a set of ProtocolPort
type HostPortInfo map[string]map[ProtocolPort]struct{}
// Add adds (ip, protocol, port) to HostPortInfo
func (h HostPortInfo) Add(ip, protocol string, port int32) {
if port <= 0 {
return
}
h.sanitize(&ip, &protocol)
pp := NewProtocolPort(protocol, port)
if _, ok := h[ip]; !ok {
h[ip] = map[ProtocolPort]struct{}{
*pp: {},
}
return
}
h[ip][*pp] = struct{}{}
}
// Remove removes (ip, protocol, port) from HostPortInfo
func (h HostPortInfo) Remove(ip, protocol string, port int32) {
if port <= 0 {
return
}
h.sanitize(&ip, &protocol)
pp := NewProtocolPort(protocol, port)
if m, ok := h[ip]; ok {
delete(m, *pp)
if len(h[ip]) == 0 {
delete(h, ip)
}
}
}
// Len returns the total number of (ip, protocol, port) tuple in HostPortInfo
func (h HostPortInfo) Len() int {
length := 0
for _, m := range h {
length += len(m)
}
return length
}
// CheckConflict checks if the input (ip, protocol, port) conflicts with the existing
// ones in HostPortInfo.
func (h HostPortInfo) CheckConflict(ip, protocol string, port int32) bool {
if port <= 0 {
return false
}
h.sanitize(&ip, &protocol)
pp := NewProtocolPort(protocol, port)
// If ip is 0.0.0.0 check all IP's (protocol, port) pair
if ip == DefaultBindAllHostIP {
for _, m := range h {
if _, ok := m[*pp]; ok {
return true
}
}
return false
}
// If ip isn't 0.0.0.0, only check IP and 0.0.0.0's (protocol, port) pair
for _, key := range []string{DefaultBindAllHostIP, ip} {
if m, ok := h[key]; ok {
if _, ok2 := m[*pp]; ok2 {
return true
}
}
}
return false
}
// sanitize the parameters
func (h HostPortInfo) sanitize(ip, protocol *string) {
if len(*ip) == 0 {
*ip = DefaultBindAllHostIP
}
if len(*protocol) == 0 {
*protocol = string(v1.ProtocolTCP)
}
}

View File

@ -1,231 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
import (
"testing"
)
type hostPortInfoParam struct {
protocol, ip string
port int32
}
func TestHostPortInfo_AddRemove(t *testing.T) {
tests := []struct {
desc string
added []hostPortInfoParam
removed []hostPortInfoParam
length int
}{
{
desc: "normal add case",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 79},
{"UDP", "127.0.0.1", 80},
{"TCP", "127.0.0.1", 81},
{"TCP", "127.0.0.1", 82},
// this might not make sense in real case, but the struct doesn't forbid it.
{"TCP", "0.0.0.0", 79},
{"UDP", "0.0.0.0", 80},
{"TCP", "0.0.0.0", 81},
{"TCP", "0.0.0.0", 82},
{"TCP", "0.0.0.0", 0},
{"TCP", "0.0.0.0", -1},
},
length: 8,
},
{
desc: "empty ip and protocol add should work",
added: []hostPortInfoParam{
{"", "127.0.0.1", 79},
{"UDP", "127.0.0.1", 80},
{"", "127.0.0.1", 81},
{"", "127.0.0.1", 82},
{"", "", 79},
{"UDP", "", 80},
{"", "", 81},
{"", "", 82},
{"", "", 0},
{"", "", -1},
},
length: 8,
},
{
desc: "normal remove case",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 79},
{"UDP", "127.0.0.1", 80},
{"TCP", "127.0.0.1", 81},
{"TCP", "127.0.0.1", 82},
{"TCP", "0.0.0.0", 79},
{"UDP", "0.0.0.0", 80},
{"TCP", "0.0.0.0", 81},
{"TCP", "0.0.0.0", 82},
},
removed: []hostPortInfoParam{
{"TCP", "127.0.0.1", 79},
{"UDP", "127.0.0.1", 80},
{"TCP", "127.0.0.1", 81},
{"TCP", "127.0.0.1", 82},
{"TCP", "0.0.0.0", 79},
{"UDP", "0.0.0.0", 80},
{"TCP", "0.0.0.0", 81},
{"TCP", "0.0.0.0", 82},
},
length: 0,
},
{
desc: "empty ip and protocol remove should work",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 79},
{"UDP", "127.0.0.1", 80},
{"TCP", "127.0.0.1", 81},
{"TCP", "127.0.0.1", 82},
{"TCP", "0.0.0.0", 79},
{"UDP", "0.0.0.0", 80},
{"TCP", "0.0.0.0", 81},
{"TCP", "0.0.0.0", 82},
},
removed: []hostPortInfoParam{
{"", "127.0.0.1", 79},
{"", "127.0.0.1", 81},
{"", "127.0.0.1", 82},
{"UDP", "127.0.0.1", 80},
{"", "", 79},
{"", "", 81},
{"", "", 82},
{"UDP", "", 80},
},
length: 0,
},
}
for _, test := range tests {
hp := make(HostPortInfo)
for _, param := range test.added {
hp.Add(param.ip, param.protocol, param.port)
}
for _, param := range test.removed {
hp.Remove(param.ip, param.protocol, param.port)
}
if hp.Len() != test.length {
t.Errorf("%v failed: expect length %d; got %d", test.desc, test.length, hp.Len())
t.Error(hp)
}
}
}
func TestHostPortInfo_Check(t *testing.T) {
tests := []struct {
desc string
added []hostPortInfoParam
check hostPortInfoParam
expect bool
}{
{
desc: "empty check should check 0.0.0.0 and TCP",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 80},
},
check: hostPortInfoParam{"", "", 81},
expect: false,
},
{
desc: "empty check should check 0.0.0.0 and TCP (conflicted)",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 80},
},
check: hostPortInfoParam{"", "", 80},
expect: true,
},
{
desc: "empty port check should pass",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 80},
},
check: hostPortInfoParam{"", "", 0},
expect: false,
},
{
desc: "0.0.0.0 should check all registered IPs",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 80},
},
check: hostPortInfoParam{"TCP", "0.0.0.0", 80},
expect: true,
},
{
desc: "0.0.0.0 with different protocol should be allowed",
added: []hostPortInfoParam{
{"UDP", "127.0.0.1", 80},
},
check: hostPortInfoParam{"TCP", "0.0.0.0", 80},
expect: false,
},
{
desc: "0.0.0.0 with different port should be allowed",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 79},
{"TCP", "127.0.0.1", 81},
{"TCP", "127.0.0.1", 82},
},
check: hostPortInfoParam{"TCP", "0.0.0.0", 80},
expect: false,
},
{
desc: "normal ip should check all registered 0.0.0.0",
added: []hostPortInfoParam{
{"TCP", "0.0.0.0", 80},
},
check: hostPortInfoParam{"TCP", "127.0.0.1", 80},
expect: true,
},
{
desc: "normal ip with different port/protocol should be allowed (0.0.0.0)",
added: []hostPortInfoParam{
{"TCP", "0.0.0.0", 79},
{"UDP", "0.0.0.0", 80},
{"TCP", "0.0.0.0", 81},
{"TCP", "0.0.0.0", 82},
},
check: hostPortInfoParam{"TCP", "127.0.0.1", 80},
expect: false,
},
{
desc: "normal ip with different port/protocol should be allowed",
added: []hostPortInfoParam{
{"TCP", "127.0.0.1", 79},
{"UDP", "127.0.0.1", 80},
{"TCP", "127.0.0.1", 81},
{"TCP", "127.0.0.1", 82},
},
check: hostPortInfoParam{"TCP", "127.0.0.1", 80},
expect: false,
},
}
for _, test := range tests {
hp := make(HostPortInfo)
for _, param := range test.added {
hp.Add(param.ip, param.protocol, param.port)
}
if hp.CheckConflict(test.check.ip, test.check.protocol, test.check.port) != test.expect {
t.Errorf("%v failed, expected %t; got %t", test.desc, test.expect, !test.expect)
}
}
}

View File

@ -36,7 +36,7 @@ go_library(
"//pkg/controller/replicaset:go_default_library",
"//pkg/controller/replication:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library",

View File

@ -36,7 +36,7 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/controller/daemon"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
schedfwk "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
@ -688,7 +688,7 @@ func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]st
// canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node
func canScheduleOnNode(node v1.Node, ds *appsv1.DaemonSet) bool {
newPod := daemon.NewPod(ds, node.Name)
nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo := schedfwk.NewNodeInfo()
nodeInfo.SetNode(&node)
taints, err := nodeInfo.Taints()
if err != nil {

View File

@ -191,7 +191,6 @@
"k8s.io/kubernetes/pkg/scheduler/listers",
"k8s.io/kubernetes/pkg/scheduler/metrics",
"k8s.io/kubernetes/pkg/scheduler/nodeinfo",
"k8s.io/kubernetes/pkg/scheduler/types",
"k8s.io/kubernetes/pkg/scheduler/util",
"k8s.io/kubernetes/pkg/scheduler/volumebinder",
"k8s.io/kubernetes/pkg/security/apparmor",

View File

@ -10,7 +10,7 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/framework/node",
visibility = ["//visibility:public"],
deps = [
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",

View File

@ -30,7 +30,7 @@ import (
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
schedfwk "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/system"
)
@ -391,7 +391,7 @@ func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) boo
},
}
nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo := schedfwk.NewNodeInfo()
// Simple lookup for nonblocking taints based on comma-delimited list.
nonblockingTaintsMap := map[string]struct{}{}

View File

@ -44,7 +44,7 @@ go_library(
"//pkg/kubeapiserver:go_default_library",
"//pkg/kubelet/client:go_default_library",
"//pkg/master:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/util/env:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",

View File

@ -34,7 +34,7 @@ import (
"k8s.io/klog"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
schedfwk "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
testutils "k8s.io/kubernetes/test/utils"
)
@ -250,7 +250,7 @@ func isNodeUntainted(node *v1.Node) bool {
},
}
nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo := schedfwk.NewNodeInfo()
// Simple lookup for nonblocking taints based on comma-delimited list.
nonblockingTaintsMap := map[string]struct{}{}

View File

@ -33,7 +33,6 @@ go_test(
"//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/profile:go_default_library",
"//pkg/scheduler/testing:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//plugin/pkg/admission/podtolerationrestriction:go_default_library",
"//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library",
"//plugin/pkg/admission/priority:go_default_library",

View File

@ -32,7 +32,6 @@ import (
schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
testutils "k8s.io/kubernetes/test/integration/util"
)
@ -214,7 +213,7 @@ func (fp *FilterPlugin) reset() {
// Filter is a test function that returns an error or nil, depending on the
// value of "failFilter".
func (fp *FilterPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
func (fp *FilterPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
fp.numFilterCalled++
if fp.failFilter {

View File

@ -43,7 +43,6 @@ import (
"k8s.io/kubernetes/pkg/scheduler"
schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
"k8s.io/kubernetes/plugin/pkg/admission/priority"
testutils "k8s.io/kubernetes/test/integration/util"
utils "k8s.io/kubernetes/test/utils"
@ -84,7 +83,7 @@ func (fp *tokenFilter) Name() string {
}
func (fp *tokenFilter) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod,
nodeInfo *schedulertypes.NodeInfo) *framework.Status {
nodeInfo *framework.NodeInfo) *framework.Status {
if fp.Tokens > 0 {
fp.Tokens--
return nil
@ -101,13 +100,13 @@ func (fp *tokenFilter) PreFilter(ctx context.Context, state *framework.CycleStat
}
func (fp *tokenFilter) AddPod(ctx context.Context, state *framework.CycleState, podToSchedule *v1.Pod,
podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
podToAdd *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
fp.Tokens--
return nil
}
func (fp *tokenFilter) RemovePod(ctx context.Context, state *framework.CycleState, podToSchedule *v1.Pod,
podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
podToRemove *v1.Pod, nodeInfo *framework.NodeInfo) *framework.Status {
fp.Tokens++
return nil
}