Merge pull request #89703 from ahg-g/ahg-podinfo

Rename scheduler/nodeinfo pkg to scheduler/types
This commit is contained in:
Kubernetes Prow Robot 2020-04-02 12:43:11 -07:00 committed by GitHub
commit e8a24cb7f4
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
93 changed files with 1267 additions and 1198 deletions

View File

@ -81,9 +81,9 @@ go_test(
"//pkg/scheduler/internal/cache/fake:go_default_library", "//pkg/scheduler/internal/cache/fake:go_default_library",
"//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/profile:go_default_library", "//pkg/scheduler/profile:go_default_library",
"//pkg/scheduler/testing:go_default_library", "//pkg/scheduler/testing:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/events/v1beta1:go_default_library", "//staging/src/k8s.io/api/events/v1beta1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library",
@ -132,6 +132,7 @@ filegroup(
"//pkg/scheduler/nodeinfo:all-srcs", "//pkg/scheduler/nodeinfo:all-srcs",
"//pkg/scheduler/profile:all-srcs", "//pkg/scheduler/profile:all-srcs",
"//pkg/scheduler/testing:all-srcs", "//pkg/scheduler/testing:all-srcs",
"//pkg/scheduler/types:all-srcs",
"//pkg/scheduler/util:all-srcs", "//pkg/scheduler/util:all-srcs",
], ],
tags = ["automanaged"], tags = ["automanaged"],

View File

@ -17,8 +17,8 @@ go_library(
"//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/metrics:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/profile:go_default_library", "//pkg/scheduler/profile:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/util:go_default_library", "//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/api/policy/v1beta1:go_default_library",
@ -64,9 +64,9 @@ go_test(
"//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/listers/fake:go_default_library", "//pkg/scheduler/listers/fake:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/profile:go_default_library", "//pkg/scheduler/profile:go_default_library",
"//pkg/scheduler/testing:go_default_library", "//pkg/scheduler/testing:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/util:go_default_library", "//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/policy/v1beta1:go_default_library", "//staging/src/k8s.io/api/policy/v1beta1:go_default_library",

View File

@ -31,7 +31,7 @@ import (
extenderv1 "k8s.io/kube-scheduler/extender/v1" extenderv1 "k8s.io/kube-scheduler/extender/v1"
schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config" schedulerapi "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/listers" "k8s.io/kubernetes/pkg/scheduler/listers"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
const ( const (
@ -287,7 +287,7 @@ func (h *HTTPExtender) convertToNodeToVictims(
// and extender, i.e. when the pod is not found in nodeInfo.Pods. // and extender, i.e. when the pod is not found in nodeInfo.Pods.
func (h *HTTPExtender) convertPodUIDToPod( func (h *HTTPExtender) convertPodUIDToPod(
metaPod *extenderv1.MetaPod, metaPod *extenderv1.MetaPod,
nodeInfo *schedulernodeinfo.NodeInfo) (*v1.Pod, error) { nodeInfo *schedulertypes.NodeInfo) (*v1.Pod, error) {
for _, pod := range nodeInfo.Pods() { for _, pod := range nodeInfo.Pods() {
if string(pod.UID) == metaPod.UID { if string(pod.UID) == metaPod.UID {
return pod, nil return pod, nil

View File

@ -41,9 +41,9 @@ import (
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
"k8s.io/kubernetes/pkg/scheduler/listers" "k8s.io/kubernetes/pkg/scheduler/listers"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/scheduler/profile" "k8s.io/kubernetes/pkg/scheduler/profile"
st "k8s.io/kubernetes/pkg/scheduler/testing" st "k8s.io/kubernetes/pkg/scheduler/testing"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
"k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/kubernetes/pkg/scheduler/util"
) )
@ -143,7 +143,7 @@ type FakeExtender struct {
ignorable bool ignorable bool
// Cached node information for fake extender // Cached node information for fake extender
cachedNodeNameToInfo map[string]*schedulernodeinfo.NodeInfo cachedNodeNameToInfo map[string]*schedulertypes.NodeInfo
} }
func (f *FakeExtender) Name() string { func (f *FakeExtender) Name() string {

View File

@ -43,8 +43,8 @@ import (
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
"k8s.io/kubernetes/pkg/scheduler/listers" "k8s.io/kubernetes/pkg/scheduler/listers"
"k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/metrics"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/scheduler/profile" "k8s.io/kubernetes/pkg/scheduler/profile"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
"k8s.io/kubernetes/pkg/scheduler/util" "k8s.io/kubernetes/pkg/scheduler/util"
utiltrace "k8s.io/utils/trace" utiltrace "k8s.io/utils/trace"
) )
@ -524,7 +524,7 @@ func (g *genericScheduler) findNodesThatPassExtenders(pod *v1.Pod, filtered []*v
// addNominatedPods adds pods with equal or greater priority which are nominated // addNominatedPods adds pods with equal or greater priority which are nominated
// to run on the node. It returns 1) whether any pod was added, 2) augmented cycleState, // to run on the node. It returns 1) whether any pod was added, 2) augmented cycleState,
// 3) augmented nodeInfo. // 3) augmented nodeInfo.
func (g *genericScheduler) addNominatedPods(ctx context.Context, prof *profile.Profile, pod *v1.Pod, state *framework.CycleState, nodeInfo *schedulernodeinfo.NodeInfo) (bool, *framework.CycleState, *schedulernodeinfo.NodeInfo, error) { func (g *genericScheduler) addNominatedPods(ctx context.Context, prof *profile.Profile, pod *v1.Pod, state *framework.CycleState, nodeInfo *schedulertypes.NodeInfo) (bool, *framework.CycleState, *schedulertypes.NodeInfo, error) {
if g.schedulingQueue == nil || nodeInfo == nil || nodeInfo.Node() == nil { if g.schedulingQueue == nil || nodeInfo == nil || nodeInfo.Node() == nil {
// This may happen only in tests. // This may happen only in tests.
return false, state, nodeInfo, nil return false, state, nodeInfo, nil
@ -564,7 +564,7 @@ func (g *genericScheduler) podPassesFiltersOnNode(
prof *profile.Profile, prof *profile.Profile,
state *framework.CycleState, state *framework.CycleState,
pod *v1.Pod, pod *v1.Pod,
info *schedulernodeinfo.NodeInfo, info *schedulertypes.NodeInfo,
) (bool, *framework.Status, error) { ) (bool, *framework.Status, error) {
var status *framework.Status var status *framework.Status
@ -856,7 +856,7 @@ func (g *genericScheduler) selectNodesForPreemption(
prof *profile.Profile, prof *profile.Profile,
state *framework.CycleState, state *framework.CycleState,
pod *v1.Pod, pod *v1.Pod,
potentialNodes []*schedulernodeinfo.NodeInfo, potentialNodes []*schedulertypes.NodeInfo,
pdbs []*policy.PodDisruptionBudget, pdbs []*policy.PodDisruptionBudget,
) (map[*v1.Node]*extenderv1.Victims, error) { ) (map[*v1.Node]*extenderv1.Victims, error) {
nodeToVictims := map[*v1.Node]*extenderv1.Victims{} nodeToVictims := map[*v1.Node]*extenderv1.Victims{}
@ -946,7 +946,7 @@ func (g *genericScheduler) selectVictimsOnNode(
prof *profile.Profile, prof *profile.Profile,
state *framework.CycleState, state *framework.CycleState,
pod *v1.Pod, pod *v1.Pod,
nodeInfo *schedulernodeinfo.NodeInfo, nodeInfo *schedulertypes.NodeInfo,
pdbs []*policy.PodDisruptionBudget, pdbs []*policy.PodDisruptionBudget,
) ([]*v1.Pod, int, bool) { ) ([]*v1.Pod, int, bool) {
var potentialVictims []*v1.Pod var potentialVictims []*v1.Pod
@ -1034,8 +1034,8 @@ func (g *genericScheduler) selectVictimsOnNode(
// nodesWherePreemptionMightHelp returns a list of nodes with failed predicates // nodesWherePreemptionMightHelp returns a list of nodes with failed predicates
// that may be satisfied by removing pods from the node. // that may be satisfied by removing pods from the node.
func nodesWherePreemptionMightHelp(nodes []*schedulernodeinfo.NodeInfo, fitErr *FitError) []*schedulernodeinfo.NodeInfo { func nodesWherePreemptionMightHelp(nodes []*schedulertypes.NodeInfo, fitErr *FitError) []*schedulertypes.NodeInfo {
var potentialNodes []*schedulernodeinfo.NodeInfo var potentialNodes []*schedulertypes.NodeInfo
for _, node := range nodes { for _, node := range nodes {
name := node.Node().Name name := node.Node().Name
// We reply on the status by each plugin - 'Unschedulable' or 'UnschedulableAndUnresolvable' // We reply on the status by each plugin - 'Unschedulable' or 'UnschedulableAndUnresolvable'

View File

@ -57,10 +57,9 @@ import (
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake" fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/scheduler/profile" "k8s.io/kubernetes/pkg/scheduler/profile"
st "k8s.io/kubernetes/pkg/scheduler/testing" st "k8s.io/kubernetes/pkg/scheduler/testing"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
schedutil "k8s.io/kubernetes/pkg/scheduler/util" schedutil "k8s.io/kubernetes/pkg/scheduler/util"
) )
@ -78,7 +77,7 @@ func (pl *trueFilterPlugin) Name() string {
} }
// Filter invoked at the filter extension point. // Filter invoked at the filter extension point.
func (pl *trueFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *trueFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
return nil return nil
} }
@ -95,7 +94,7 @@ func (pl *falseFilterPlugin) Name() string {
} }
// Filter invoked at the filter extension point. // Filter invoked at the filter extension point.
func (pl *falseFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *falseFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
return framework.NewStatus(framework.Unschedulable, ErrReasonFake) return framework.NewStatus(framework.Unschedulable, ErrReasonFake)
} }
@ -112,7 +111,7 @@ func (pl *matchFilterPlugin) Name() string {
} }
// Filter invoked at the filter extension point. // Filter invoked at the filter extension point.
func (pl *matchFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *matchFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return framework.NewStatus(framework.Error, "node not found") return framework.NewStatus(framework.Error, "node not found")
@ -136,7 +135,7 @@ func (pl *noPodsFilterPlugin) Name() string {
} }
// Filter invoked at the filter extension point. // Filter invoked at the filter extension point.
func (pl *noPodsFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *noPodsFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
if len(nodeInfo.Pods()) == 0 { if len(nodeInfo.Pods()) == 0 {
return nil return nil
} }
@ -161,7 +160,7 @@ func (pl *fakeFilterPlugin) Name() string {
} }
// Filter invoked at the filter extension point. // Filter invoked at the filter extension point.
func (pl *fakeFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *fakeFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
atomic.AddInt32(&pl.numFilterCalled, 1) atomic.AddInt32(&pl.numFilterCalled, 1)
if returnCode, ok := pl.failedNodeReturnCodeMap[nodeInfo.Node().Name]; ok { if returnCode, ok := pl.failedNodeReturnCodeMap[nodeInfo.Node().Name]; ok {
@ -2029,9 +2028,9 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
fitErr := FitError{ fitErr := FitError{
FilteredNodesStatuses: test.nodesStatuses, FilteredNodesStatuses: test.nodesStatuses,
} }
var nodeInfos []*schedulernodeinfo.NodeInfo var nodeInfos []*schedulertypes.NodeInfo
for _, n := range makeNodeList(nodeNames) { for _, n := range makeNodeList(nodeNames) {
ni := schedulernodeinfo.NewNodeInfo() ni := schedulertypes.NewNodeInfo()
ni.SetNode(n) ni.SetNode(n)
nodeInfos = append(nodeInfos, ni) nodeInfos = append(nodeInfos, ni)
} }
@ -2372,7 +2371,7 @@ func TestPreempt(t *testing.T) {
for _, pod := range test.pods { for _, pod := range test.pods {
cache.AddPod(pod) cache.AddPod(pod)
} }
cachedNodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{} cachedNodeInfoMap := map[string]*schedulertypes.NodeInfo{}
nodeNames := defaultNodeNames nodeNames := defaultNodeNames
if len(test.nodeNames) != 0 { if len(test.nodeNames) != 0 {
nodeNames = test.nodeNames nodeNames = test.nodeNames
@ -2392,7 +2391,7 @@ func TestPreempt(t *testing.T) {
nodeNames[i] = node.Name nodeNames[i] = node.Name
// Set nodeInfo to extenders to mock extenders' cache for preemption. // Set nodeInfo to extenders to mock extenders' cache for preemption.
cachedNodeInfo := schedulernodeinfo.NewNodeInfo() cachedNodeInfo := schedulertypes.NewNodeInfo()
cachedNodeInfo.SetNode(node) cachedNodeInfo.SetNode(node)
cachedNodeInfoMap[node.Name] = cachedNodeInfo cachedNodeInfoMap[node.Name] = cachedNodeInfo
} }
@ -2571,8 +2570,8 @@ func TestFairEvaluationForNodes(t *testing.T) {
} }
} }
func nodesToNodeInfos(nodes []*v1.Node, snapshot *internalcache.Snapshot) ([]*schedulernodeinfo.NodeInfo, error) { func nodesToNodeInfos(nodes []*v1.Node, snapshot *internalcache.Snapshot) ([]*schedulertypes.NodeInfo, error) {
var nodeInfos []*schedulernodeinfo.NodeInfo var nodeInfos []*schedulertypes.NodeInfo
for _, n := range nodes { for _, n := range nodes {
nodeInfo, err := snapshot.NodeInfos().Get(n.Name) nodeInfo, err := snapshot.NodeInfos().Get(n.Name)
if err != nil { if err != nil {

View File

@ -54,8 +54,8 @@ import (
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
"k8s.io/kubernetes/pkg/scheduler/listers" "k8s.io/kubernetes/pkg/scheduler/listers"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/scheduler/profile" "k8s.io/kubernetes/pkg/scheduler/profile"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
const ( const (
@ -593,6 +593,6 @@ func (t *TestPlugin) ScoreExtensions() framework.ScoreExtensions {
return nil return nil
} }
func (t *TestPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *framework.Status { func (t *TestPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
return nil return nil
} }

View File

@ -8,7 +8,7 @@ go_library(
deps = [ deps = [
"//pkg/scheduler/framework/plugins/helper:go_default_library", "//pkg/scheduler/framework/plugins/helper:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//pkg/util/node:go_default_library", "//pkg/util/node:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",

View File

@ -25,7 +25,7 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
utilnode "k8s.io/kubernetes/pkg/util/node" utilnode "k8s.io/kubernetes/pkg/util/node"
) )
@ -196,7 +196,7 @@ func New(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin
} }
// countMatchingPods counts pods based on namespace and matching all selectors // countMatchingPods counts pods based on namespace and matching all selectors
func countMatchingPods(namespace string, selector labels.Selector, nodeInfo *schedulernodeinfo.NodeInfo) int { func countMatchingPods(namespace string, selector labels.Selector, nodeInfo *schedulertypes.NodeInfo) int {
if len(nodeInfo.Pods()) == 0 || selector.Empty() { if len(nodeInfo.Pods()) == 0 || selector.Empty() {
return 0 return 0
} }

View File

@ -7,7 +7,7 @@ go_library(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//pkg/util/parsers:go_default_library", "//pkg/util/parsers:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",

View File

@ -24,7 +24,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
"k8s.io/kubernetes/pkg/util/parsers" "k8s.io/kubernetes/pkg/util/parsers"
) )
@ -94,7 +94,7 @@ func calculatePriority(sumScores int64) int64 {
// sumImageScores returns the sum of image scores of all the containers that are already on the node. // sumImageScores returns the sum of image scores of all the containers that are already on the node.
// Each image receives a raw score of its size, scaled by scaledImageScore. The raw scores are later used to calculate // Each image receives a raw score of its size, scaled by scaledImageScore. The raw scores are later used to calculate
// the final score. Note that the init containers are not considered for it's rare for users to deploy huge init containers. // the final score. Note that the init containers are not considered for it's rare for users to deploy huge init containers.
func sumImageScores(nodeInfo *schedulernodeinfo.NodeInfo, containers []v1.Container, totalNumNodes int) int64 { func sumImageScores(nodeInfo *schedulertypes.NodeInfo, containers []v1.Container, totalNumNodes int) int64 {
var sum int64 var sum int64
imageStates := nodeInfo.ImageStates() imageStates := nodeInfo.ImageStates()
@ -111,7 +111,7 @@ func sumImageScores(nodeInfo *schedulernodeinfo.NodeInfo, containers []v1.Contai
// The size of the image is used as the base score, scaled by a factor which considers how much nodes the image has "spread" to. // The size of the image is used as the base score, scaled by a factor which considers how much nodes the image has "spread" to.
// This heuristic aims to mitigate the undesirable "node heating problem", i.e., pods get assigned to the same or // This heuristic aims to mitigate the undesirable "node heating problem", i.e., pods get assigned to the same or
// a few nodes due to image locality. // a few nodes due to image locality.
func scaledImageScore(imageState *schedulernodeinfo.ImageStateSummary, totalNumNodes int) int64 { func scaledImageScore(imageState *schedulertypes.ImageStateSummary, totalNumNodes int) int64 {
spread := float64(imageState.NumNodes) / float64(totalNumNodes) spread := float64(imageState.NumNodes) / float64(totalNumNodes)
return int64(float64(imageState.Size) * spread) return int64(float64(imageState.Size) * spread)
} }

View File

@ -13,7 +13,7 @@ go_library(
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/parallelize:go_default_library", "//pkg/scheduler/internal/parallelize:go_default_library",
"//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/util:go_default_library", "//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
@ -36,7 +36,7 @@ go_test(
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",

View File

@ -28,7 +28,7 @@ import (
"k8s.io/klog" "k8s.io/klog"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/parallelize" "k8s.io/kubernetes/pkg/scheduler/internal/parallelize"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
schedutil "k8s.io/kubernetes/pkg/scheduler/util" schedutil "k8s.io/kubernetes/pkg/scheduler/util"
) )
@ -209,7 +209,7 @@ func podMatchesAllAffinityTerms(pod *v1.Pod, terms []*affinityTerm) bool {
// getTPMapMatchingExistingAntiAffinity calculates the following for each existing pod on each node: // getTPMapMatchingExistingAntiAffinity calculates the following for each existing pod on each node:
// (1) Whether it has PodAntiAffinity // (1) Whether it has PodAntiAffinity
// (2) Whether any AffinityTerm matches the incoming pod // (2) Whether any AffinityTerm matches the incoming pod
func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, allNodes []*nodeinfo.NodeInfo) (topologyToMatchedTermCount, error) { func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, allNodes []*schedulertypes.NodeInfo) (topologyToMatchedTermCount, error) {
errCh := schedutil.NewErrorChannel() errCh := schedutil.NewErrorChannel()
var lock sync.Mutex var lock sync.Mutex
topologyMap := make(topologyToMatchedTermCount) topologyMap := make(topologyToMatchedTermCount)
@ -253,7 +253,7 @@ func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, allNodes []*nodeinfo.Node
// It returns a topologyToMatchedTermCount that are checked later by the affinity // It returns a topologyToMatchedTermCount that are checked later by the affinity
// predicate. With this topologyToMatchedTermCount available, the affinity predicate does not // predicate. With this topologyToMatchedTermCount available, the affinity predicate does not
// need to check all the pods in the cluster. // need to check all the pods in the cluster.
func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, allNodes []*nodeinfo.NodeInfo) (topologyToMatchedTermCount, topologyToMatchedTermCount, error) { func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, allNodes []*schedulertypes.NodeInfo) (topologyToMatchedTermCount, topologyToMatchedTermCount, error) {
topologyPairsAffinityPodsMap := make(topologyToMatchedTermCount) topologyPairsAffinityPodsMap := make(topologyToMatchedTermCount)
topologyToMatchedExistingAntiAffinityTerms := make(topologyToMatchedTermCount) topologyToMatchedExistingAntiAffinityTerms := make(topologyToMatchedTermCount)
affinity := pod.Spec.Affinity affinity := pod.Spec.Affinity
@ -328,8 +328,8 @@ func targetPodMatchesAffinityOfPod(pod, targetPod *v1.Pod) bool {
// PreFilter invoked at the prefilter extension point. // PreFilter invoked at the prefilter extension point.
func (pl *InterPodAffinity) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) *framework.Status { func (pl *InterPodAffinity) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) *framework.Status {
var allNodes []*nodeinfo.NodeInfo var allNodes []*schedulertypes.NodeInfo
var havePodsWithAffinityNodes []*nodeinfo.NodeInfo var havePodsWithAffinityNodes []*schedulertypes.NodeInfo
var err error var err error
if allNodes, err = pl.sharedLister.NodeInfos().List(); err != nil { if allNodes, err = pl.sharedLister.NodeInfos().List(); err != nil {
return framework.NewStatus(framework.Error, fmt.Sprintf("failed to list NodeInfos: %v", err)) return framework.NewStatus(framework.Error, fmt.Sprintf("failed to list NodeInfos: %v", err))
@ -366,7 +366,7 @@ func (pl *InterPodAffinity) PreFilterExtensions() framework.PreFilterExtensions
} }
// AddPod from pre-computed data in cycleState. // AddPod from pre-computed data in cycleState.
func (pl *InterPodAffinity) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *InterPodAffinity) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
state, err := getPreFilterState(cycleState) state, err := getPreFilterState(cycleState)
if err != nil { if err != nil {
return framework.NewStatus(framework.Error, err.Error()) return framework.NewStatus(framework.Error, err.Error())
@ -376,7 +376,7 @@ func (pl *InterPodAffinity) AddPod(ctx context.Context, cycleState *framework.Cy
} }
// RemovePod from pre-computed data in cycleState. // RemovePod from pre-computed data in cycleState.
func (pl *InterPodAffinity) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *InterPodAffinity) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
state, err := getPreFilterState(cycleState) state, err := getPreFilterState(cycleState)
if err != nil { if err != nil {
return framework.NewStatus(framework.Error, err.Error()) return framework.NewStatus(framework.Error, err.Error())
@ -401,7 +401,7 @@ func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error
// Checks if scheduling the pod onto this node would break any anti-affinity // Checks if scheduling the pod onto this node would break any anti-affinity
// terms indicated by the existing pods. // terms indicated by the existing pods.
func (pl *InterPodAffinity) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, state *preFilterState, nodeInfo *nodeinfo.NodeInfo) (bool, error) { func (pl *InterPodAffinity) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, state *preFilterState, nodeInfo *schedulertypes.NodeInfo) (bool, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
topologyMap := state.topologyToMatchedExistingAntiAffinityTerms topologyMap := state.topologyToMatchedExistingAntiAffinityTerms
@ -417,7 +417,7 @@ func (pl *InterPodAffinity) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, state
} }
// nodeMatchesAllTopologyTerms checks whether "nodeInfo" matches topology of all the "terms" for the given "pod". // nodeMatchesAllTopologyTerms checks whether "nodeInfo" matches topology of all the "terms" for the given "pod".
func nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPairs topologyToMatchedTermCount, nodeInfo *nodeinfo.NodeInfo, terms []v1.PodAffinityTerm) bool { func nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPairs topologyToMatchedTermCount, nodeInfo *schedulertypes.NodeInfo, terms []v1.PodAffinityTerm) bool {
node := nodeInfo.Node() node := nodeInfo.Node()
for _, term := range terms { for _, term := range terms {
if topologyValue, ok := node.Labels[term.TopologyKey]; ok { if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
@ -434,7 +434,7 @@ func nodeMatchesAllTopologyTerms(pod *v1.Pod, topologyPairs topologyToMatchedTer
// nodeMatchesAnyTopologyTerm checks whether "nodeInfo" matches // nodeMatchesAnyTopologyTerm checks whether "nodeInfo" matches
// topology of any "term" for the given "pod". // topology of any "term" for the given "pod".
func nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPairs topologyToMatchedTermCount, nodeInfo *nodeinfo.NodeInfo, terms []v1.PodAffinityTerm) bool { func nodeMatchesAnyTopologyTerm(pod *v1.Pod, topologyPairs topologyToMatchedTermCount, nodeInfo *schedulertypes.NodeInfo, terms []v1.PodAffinityTerm) bool {
node := nodeInfo.Node() node := nodeInfo.Node()
for _, term := range terms { for _, term := range terms {
if topologyValue, ok := node.Labels[term.TopologyKey]; ok { if topologyValue, ok := node.Labels[term.TopologyKey]; ok {
@ -477,7 +477,7 @@ func getMatchingAntiAffinityTopologyPairsOfPod(newPod *v1.Pod, existingPod *v1.P
// This function returns two boolean flags. The first boolean flag indicates whether the pod matches affinity rules // This function returns two boolean flags. The first boolean flag indicates whether the pod matches affinity rules
// or not. The second boolean flag indicates if the pod matches anti-affinity rules. // or not. The second boolean flag indicates if the pod matches anti-affinity rules.
func (pl *InterPodAffinity) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, func (pl *InterPodAffinity) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
state *preFilterState, nodeInfo *nodeinfo.NodeInfo, state *preFilterState, nodeInfo *schedulertypes.NodeInfo,
affinity *v1.Affinity) (bool, bool, error) { affinity *v1.Affinity) (bool, bool, error) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
@ -513,7 +513,7 @@ func (pl *InterPodAffinity) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod,
// Filter invoked at the filter extension point. // Filter invoked at the filter extension point.
// It checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration. // It checks if a pod can be scheduled on the specified node with pod affinity/anti-affinity configuration.
func (pl *InterPodAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *InterPodAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
state, err := getPreFilterState(cycleState) state, err := getPreFilterState(cycleState)
if err != nil { if err != nil {
return framework.NewStatus(framework.Error, err.Error()) return framework.NewStatus(framework.Error, err.Error())

View File

@ -25,7 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
var ( var (
@ -1636,7 +1636,7 @@ func TestRequiredAffinityMultipleNodes(t *testing.T) {
func TestPreFilterDisabled(t *testing.T) { func TestPreFilterDisabled(t *testing.T) {
pod := &v1.Pod{} pod := &v1.Pod{}
nodeInfo := nodeinfo.NewNodeInfo() nodeInfo := schedulertypes.NewNodeInfo()
node := v1.Node{} node := v1.Node{}
nodeInfo.SetNode(&node) nodeInfo.SetNode(&node)
p := &InterPodAffinity{} p := &InterPodAffinity{}
@ -2211,7 +2211,7 @@ func TestGetTPMapMatchingIncomingAffinityAntiAffinity(t *testing.T) {
} }
} }
func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *nodeinfo.NodeInfo { func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *schedulertypes.NodeInfo {
t.Helper() t.Helper()
nodeInfo, err := snapshot.NodeInfos().Get(name) nodeInfo, err := snapshot.NodeInfos().Get(name)
if err != nil { if err != nil {

View File

@ -25,7 +25,7 @@ import (
"k8s.io/klog" "k8s.io/klog"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/parallelize" "k8s.io/kubernetes/pkg/scheduler/internal/parallelize"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
schedutil "k8s.io/kubernetes/pkg/scheduler/util" schedutil "k8s.io/kubernetes/pkg/scheduler/util"
) )
@ -118,7 +118,7 @@ func (m scoreMap) append(other scoreMap) {
} }
} }
func (pl *InterPodAffinity) processExistingPod(state *preScoreState, existingPod *v1.Pod, existingPodNodeInfo *nodeinfo.NodeInfo, incomingPod *v1.Pod, topoScore scoreMap) error { func (pl *InterPodAffinity) processExistingPod(state *preScoreState, existingPod *v1.Pod, existingPodNodeInfo *schedulertypes.NodeInfo, incomingPod *v1.Pod, topoScore scoreMap) error {
existingPodAffinity := existingPod.Spec.Affinity existingPodAffinity := existingPod.Spec.Affinity
existingHasAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAffinity != nil existingHasAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAffinity != nil
existingHasAntiAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAntiAffinity != nil existingHasAntiAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAntiAffinity != nil

View File

@ -9,7 +9,7 @@ go_library(
"//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/core/v1/helper:go_default_library",
"//pkg/scheduler/framework/plugins/helper:go_default_library", "//pkg/scheduler/framework/plugins/helper:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
@ -38,7 +38,7 @@ go_test(
"//pkg/apis/core:go_default_library", "//pkg/apis/core:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
], ],

View File

@ -26,7 +26,7 @@ import (
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
// NodeAffinity is a plugin that checks if a pod node selector matches the node label. // NodeAffinity is a plugin that checks if a pod node selector matches the node label.
@ -51,7 +51,7 @@ func (pl *NodeAffinity) Name() string {
} }
// Filter invoked at the filter extension point. // Filter invoked at the filter extension point.
func (pl *NodeAffinity) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *NodeAffinity) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return framework.NewStatus(framework.Error, "node not found") return framework.NewStatus(framework.Error, "node not found")

View File

@ -26,7 +26,7 @@ import (
api "k8s.io/kubernetes/pkg/apis/core" api "k8s.io/kubernetes/pkg/apis/core"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
// TODO: Add test case for RequiredDuringSchedulingRequiredDuringExecution after it's implemented. // TODO: Add test case for RequiredDuringSchedulingRequiredDuringExecution after it's implemented.
@ -694,7 +694,7 @@ func TestNodeAffinity(t *testing.T) {
Name: test.nodeName, Name: test.nodeName,
Labels: test.labels, Labels: test.labels,
}} }}
nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo.SetNode(&node) nodeInfo.SetNode(&node)
p, _ := New(nil, nil) p, _ := New(nil, nil)

View File

@ -7,7 +7,7 @@ go_library(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
@ -21,7 +21,7 @@ go_test(
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",

View File

@ -24,7 +24,7 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
// Name of this plugin. // Name of this plugin.
@ -102,7 +102,7 @@ func (pl *NodeLabel) Name() string {
// Alternately, eliminating nodes that have a certain label, regardless of value, is also useful // Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
// A node may have a label with "retiring" as key and the date as the value // A node may have a label with "retiring" as key and the date as the value
// and it may be desirable to avoid scheduling new pods on this node. // and it may be desirable to avoid scheduling new pods on this node.
func (pl *NodeLabel) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *NodeLabel) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return framework.NewStatus(framework.Error, "node not found") return framework.NewStatus(framework.Error, "node not found")

View File

@ -25,7 +25,7 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
func TestValidateNodeLabelArgs(t *testing.T) { func TestValidateNodeLabelArgs(t *testing.T) {
@ -133,7 +133,7 @@ func TestNodeLabelFilter(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
node := v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: label}} node := v1.Node{ObjectMeta: metav1.ObjectMeta{Labels: label}}
nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo.SetNode(&node) nodeInfo.SetNode(&node)
args := &runtime.Unknown{Raw: []byte(test.rawArgs)} args := &runtime.Unknown{Raw: []byte(test.rawArgs)}
@ -248,7 +248,7 @@ func TestNodeLabelScore(t *testing.T) {
func TestNodeLabelFilterWithoutNode(t *testing.T) { func TestNodeLabelFilterWithoutNode(t *testing.T) {
var pod *v1.Pod var pod *v1.Pod
t.Run("node does not exist", func(t *testing.T) { t.Run("node does not exist", func(t *testing.T) {
nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo := schedulertypes.NewNodeInfo()
p, err := New(nil, nil) p, err := New(nil, nil)
if err != nil { if err != nil {
t.Fatalf("Failed to create plugin: %v", err) t.Fatalf("Failed to create plugin: %v", err)

View File

@ -7,7 +7,7 @@ go_library(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
], ],
@ -33,7 +33,7 @@ go_test(
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
], ],

View File

@ -22,7 +22,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
// NodeName is a plugin that checks if a pod spec node name matches the current node. // NodeName is a plugin that checks if a pod spec node name matches the current node.
@ -44,7 +44,7 @@ func (pl *NodeName) Name() string {
} }
// Filter invoked at the filter extension point. // Filter invoked at the filter extension point.
func (pl *NodeName) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *NodeName) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
if nodeInfo.Node() == nil { if nodeInfo.Node() == nil {
return framework.NewStatus(framework.Error, "node not found") return framework.NewStatus(framework.Error, "node not found")
} }
@ -55,7 +55,7 @@ func (pl *NodeName) Filter(ctx context.Context, _ *framework.CycleState, pod *v1
} }
// Fits actually checks if the pod fits the node. // Fits actually checks if the pod fits the node.
func Fits(pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) bool { func Fits(pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) bool {
return len(pod.Spec.NodeName) == 0 || pod.Spec.NodeName == nodeInfo.Node().Name return len(pod.Spec.NodeName) == 0 || pod.Spec.NodeName == nodeInfo.Node().Name
} }

View File

@ -24,7 +24,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
func TestNodeName(t *testing.T) { func TestNodeName(t *testing.T) {
@ -70,7 +70,7 @@ func TestNodeName(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo.SetNode(test.node) nodeInfo.SetNode(test.node)
p, _ := New(nil, nil) p, _ := New(nil, nil)

View File

@ -7,7 +7,7 @@ go_library(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
], ],
@ -33,7 +33,7 @@ go_test(
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
], ],

View File

@ -23,7 +23,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
// NodePorts is a plugin that checks if a node has free ports for the requested pod ports. // NodePorts is a plugin that checks if a node has free ports for the requested pod ports.
@ -98,7 +98,7 @@ func getPreFilterState(cycleState *framework.CycleState) (preFilterState, error)
} }
// Filter invoked at the filter extension point. // Filter invoked at the filter extension point.
func (pl *NodePorts) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *NodePorts) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
wantPorts, err := getPreFilterState(cycleState) wantPorts, err := getPreFilterState(cycleState)
if err != nil { if err != nil {
return framework.NewStatus(framework.Error, err.Error()) return framework.NewStatus(framework.Error, err.Error())
@ -113,11 +113,11 @@ func (pl *NodePorts) Filter(ctx context.Context, cycleState *framework.CycleStat
} }
// Fits checks if the pod fits the node. // Fits checks if the pod fits the node.
func Fits(pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) bool { func Fits(pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) bool {
return fitsPorts(getContainerPorts(pod), nodeInfo) return fitsPorts(getContainerPorts(pod), nodeInfo)
} }
func fitsPorts(wantPorts []*v1.ContainerPort, nodeInfo *nodeinfo.NodeInfo) bool { func fitsPorts(wantPorts []*v1.ContainerPort, nodeInfo *schedulertypes.NodeInfo) bool {
// try to see whether existingPorts and wantPorts will conflict or not // try to see whether existingPorts and wantPorts will conflict or not
existingPorts := nodeInfo.UsedPorts() existingPorts := nodeInfo.UsedPorts()
for _, cp := range wantPorts { for _, cp := range wantPorts {

View File

@ -26,7 +26,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/diff" "k8s.io/apimachinery/pkg/util/diff"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
func newPod(host string, hostPortInfos ...string) *v1.Pod { func newPod(host string, hostPortInfos ...string) *v1.Pod {
@ -56,91 +56,91 @@ func newPod(host string, hostPortInfos ...string) *v1.Pod {
func TestNodePorts(t *testing.T) { func TestNodePorts(t *testing.T) {
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo nodeInfo *schedulertypes.NodeInfo
name string name string
wantStatus *framework.Status wantStatus *framework.Status
}{ }{
{ {
pod: &v1.Pod{}, pod: &v1.Pod{},
nodeInfo: schedulernodeinfo.NewNodeInfo(), nodeInfo: schedulertypes.NewNodeInfo(),
name: "nothing running", name: "nothing running",
}, },
{ {
pod: newPod("m1", "UDP/127.0.0.1/8080"), pod: newPod("m1", "UDP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newPod("m1", "UDP/127.0.0.1/9090")), newPod("m1", "UDP/127.0.0.1/9090")),
name: "other port", name: "other port",
}, },
{ {
pod: newPod("m1", "UDP/127.0.0.1/8080"), pod: newPod("m1", "UDP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newPod("m1", "UDP/127.0.0.1/8080")), newPod("m1", "UDP/127.0.0.1/8080")),
name: "same udp port", name: "same udp port",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
}, },
{ {
pod: newPod("m1", "TCP/127.0.0.1/8080"), pod: newPod("m1", "TCP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8080")), newPod("m1", "TCP/127.0.0.1/8080")),
name: "same tcp port", name: "same tcp port",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
}, },
{ {
pod: newPod("m1", "TCP/127.0.0.1/8080"), pod: newPod("m1", "TCP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newPod("m1", "TCP/127.0.0.2/8080")), newPod("m1", "TCP/127.0.0.2/8080")),
name: "different host ip", name: "different host ip",
}, },
{ {
pod: newPod("m1", "UDP/127.0.0.1/8080"), pod: newPod("m1", "UDP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8080")), newPod("m1", "TCP/127.0.0.1/8080")),
name: "different protocol", name: "different protocol",
}, },
{ {
pod: newPod("m1", "UDP/127.0.0.1/8000", "UDP/127.0.0.1/8080"), pod: newPod("m1", "UDP/127.0.0.1/8000", "UDP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newPod("m1", "UDP/127.0.0.1/8080")), newPod("m1", "UDP/127.0.0.1/8080")),
name: "second udp port conflict", name: "second udp port conflict",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
}, },
{ {
pod: newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8080"), pod: newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8080"),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8081")), newPod("m1", "TCP/127.0.0.1/8001", "UDP/127.0.0.1/8081")),
name: "first tcp port conflict", name: "first tcp port conflict",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
}, },
{ {
pod: newPod("m1", "TCP/0.0.0.0/8001"), pod: newPod("m1", "TCP/0.0.0.0/8001"),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8001")), newPod("m1", "TCP/127.0.0.1/8001")),
name: "first tcp port conflict due to 0.0.0.0 hostIP", name: "first tcp port conflict due to 0.0.0.0 hostIP",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
}, },
{ {
pod: newPod("m1", "TCP/10.0.10.10/8001", "TCP/0.0.0.0/8001"), pod: newPod("m1", "TCP/10.0.10.10/8001", "TCP/0.0.0.0/8001"),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newPod("m1", "TCP/127.0.0.1/8001")), newPod("m1", "TCP/127.0.0.1/8001")),
name: "TCP hostPort conflict due to 0.0.0.0 hostIP", name: "TCP hostPort conflict due to 0.0.0.0 hostIP",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
}, },
{ {
pod: newPod("m1", "TCP/127.0.0.1/8001"), pod: newPod("m1", "TCP/127.0.0.1/8001"),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newPod("m1", "TCP/0.0.0.0/8001")), newPod("m1", "TCP/0.0.0.0/8001")),
name: "second tcp port conflict to 0.0.0.0 hostIP", name: "second tcp port conflict to 0.0.0.0 hostIP",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
}, },
{ {
pod: newPod("m1", "UDP/127.0.0.1/8001"), pod: newPod("m1", "UDP/127.0.0.1/8001"),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newPod("m1", "TCP/0.0.0.0/8001")), newPod("m1", "TCP/0.0.0.0/8001")),
name: "second different protocol", name: "second different protocol",
}, },
{ {
pod: newPod("m1", "UDP/127.0.0.1/8001"), pod: newPod("m1", "UDP/127.0.0.1/8001"),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newPod("m1", "TCP/0.0.0.0/8001", "UDP/0.0.0.0/8001")), newPod("m1", "TCP/0.0.0.0/8001", "UDP/0.0.0.0/8001")),
name: "UDP hostPort conflict due to 0.0.0.0 hostIP", name: "UDP hostPort conflict due to 0.0.0.0 hostIP",
wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason), wantStatus: framework.NewStatus(framework.Unschedulable, ErrReason),
@ -165,7 +165,7 @@ func TestNodePorts(t *testing.T) {
func TestPreFilterDisabled(t *testing.T) { func TestPreFilterDisabled(t *testing.T) {
pod := &v1.Pod{} pod := &v1.Pod{}
nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo := schedulertypes.NewNodeInfo()
node := v1.Node{} node := v1.Node{}
nodeInfo.SetNode(&node) nodeInfo.SetNode(&node)
p, _ := New(nil, nil) p, _ := New(nil, nil)

View File

@ -19,7 +19,7 @@ go_library(
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/scheduler/apis/config:go_default_library", "//pkg/scheduler/apis/config:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/util:go_default_library", "//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
@ -61,7 +61,7 @@ go_test(
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -27,7 +27,7 @@ import (
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
var _ framework.PreFilterPlugin = &Fit{} var _ framework.PreFilterPlugin = &Fit{}
@ -56,7 +56,7 @@ type FitArgs struct {
// preFilterState computed at PreFilter and used at Filter. // preFilterState computed at PreFilter and used at Filter.
type preFilterState struct { type preFilterState struct {
schedulernodeinfo.Resource schedulertypes.Resource
} }
// Clone the prefilter state. // Clone the prefilter state.
@ -69,7 +69,7 @@ func (f *Fit) Name() string {
return FitName return FitName
} }
// computePodResourceRequest returns a schedulernodeinfo.Resource that covers the largest // computePodResourceRequest returns a schedulertypes.Resource that covers the largest
// width in each resource dimension. Because init-containers run sequentially, we collect // width in each resource dimension. Because init-containers run sequentially, we collect
// the max in each dimension iteratively. In contrast, we sum the resource vectors for // the max in each dimension iteratively. In contrast, we sum the resource vectors for
// regular containers since they run simultaneously. // regular containers since they run simultaneously.
@ -143,7 +143,7 @@ func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error
// Filter invoked at the filter extension point. // Filter invoked at the filter extension point.
// Checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod. // Checks if a node has sufficient resources, such as cpu, memory, gpu, opaque int resources etc to run a pod.
// It returns a list of insufficient resources, if empty, then the node has all the resources requested by the pod. // It returns a list of insufficient resources, if empty, then the node has all the resources requested by the pod.
func (f *Fit) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *framework.Status { func (f *Fit) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
s, err := getPreFilterState(cycleState) s, err := getPreFilterState(cycleState)
if err != nil { if err != nil {
return framework.NewStatus(framework.Error, err.Error()) return framework.NewStatus(framework.Error, err.Error())
@ -174,11 +174,11 @@ type InsufficientResource struct {
} }
// Fits checks if node have enough resources to host the pod. // Fits checks if node have enough resources to host the pod.
func Fits(pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo, ignoredExtendedResources sets.String) []InsufficientResource { func Fits(pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo, ignoredExtendedResources sets.String) []InsufficientResource {
return fitsRequest(computePodResourceRequest(pod), nodeInfo, ignoredExtendedResources) return fitsRequest(computePodResourceRequest(pod), nodeInfo, ignoredExtendedResources)
} }
func fitsRequest(podRequest *preFilterState, nodeInfo *schedulernodeinfo.NodeInfo, ignoredExtendedResources sets.String) []InsufficientResource { func fitsRequest(podRequest *preFilterState, nodeInfo *schedulertypes.NodeInfo, ignoredExtendedResources sets.String) []InsufficientResource {
insufficientResources := make([]InsufficientResource, 0, 4) insufficientResources := make([]InsufficientResource, 0, 4)
allowedPodNumber := nodeInfo.AllowedPodNumber() allowedPodNumber := nodeInfo.AllowedPodNumber()

View File

@ -27,7 +27,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
var ( var (
@ -62,7 +62,7 @@ func makeAllocatableResources(milliCPU, memory, pods, extendedA, storage, hugePa
} }
} }
func newResourcePod(usage ...schedulernodeinfo.Resource) *v1.Pod { func newResourcePod(usage ...schedulertypes.Resource) *v1.Pod {
containers := []v1.Container{} containers := []v1.Container{}
for _, req := range usage { for _, req := range usage {
containers = append(containers, v1.Container{ containers = append(containers, v1.Container{
@ -76,7 +76,7 @@ func newResourcePod(usage ...schedulernodeinfo.Resource) *v1.Pod {
} }
} }
func newResourceInitPod(pod *v1.Pod, usage ...schedulernodeinfo.Resource) *v1.Pod { func newResourceInitPod(pod *v1.Pod, usage ...schedulertypes.Resource) *v1.Pod {
pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers
return pod return pod
} }
@ -93,7 +93,7 @@ func getErrReason(rn v1.ResourceName) string {
func TestEnoughRequests(t *testing.T) { func TestEnoughRequests(t *testing.T) {
enoughPodsTests := []struct { enoughPodsTests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo nodeInfo *schedulertypes.NodeInfo
name string name string
ignoredResources []byte ignoredResources []byte
wantInsufficientResources []InsufficientResource wantInsufficientResources []InsufficientResource
@ -101,266 +101,266 @@ func TestEnoughRequests(t *testing.T) {
}{ }{
{ {
pod: &v1.Pod{}, pod: &v1.Pod{},
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})), newResourcePod(schedulertypes.Resource{MilliCPU: 10, Memory: 20})),
name: "no resources requested always fits", name: "no resources requested always fits",
wantInsufficientResources: []InsufficientResource{}, wantInsufficientResources: []InsufficientResource{},
}, },
{ {
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})), newResourcePod(schedulertypes.Resource{MilliCPU: 10, Memory: 20})),
name: "too many resources fails", name: "too many resources fails",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU), getErrReason(v1.ResourceMemory)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU), getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 1, 10, 10}, {v1.ResourceMemory, getErrReason(v1.ResourceMemory), 1, 20, 20}}, wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 1, 10, 10}, {v1.ResourceMemory, getErrReason(v1.ResourceMemory), 1, 20, 20}},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 3, Memory: 1}), pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 3, Memory: 1}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})), newResourcePod(schedulertypes.Resource{MilliCPU: 8, Memory: 19})),
name: "too many resources fails due to init container cpu", name: "too many resources fails due to init container cpu",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 3, 8, 10}}, wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 3, 8, 10}},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 3, Memory: 1}, schedulernodeinfo.Resource{MilliCPU: 2, Memory: 1}), pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 3, Memory: 1}, schedulertypes.Resource{MilliCPU: 2, Memory: 1}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 8, Memory: 19})), newResourcePod(schedulertypes.Resource{MilliCPU: 8, Memory: 19})),
name: "too many resources fails due to highest init container cpu", name: "too many resources fails due to highest init container cpu",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 3, 8, 10}}, wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 3, 8, 10}},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}), pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 1, Memory: 3}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})), newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 19})),
name: "too many resources fails due to init container memory", name: "too many resources fails due to init container memory",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 3, 19, 20}}, wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 3, 19, 20}},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 3}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}), pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 1, Memory: 3}, schedulertypes.Resource{MilliCPU: 1, Memory: 2}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})), newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 19})),
name: "too many resources fails due to highest init container memory", name: "too many resources fails due to highest init container memory",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 3, 19, 20}}, wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 3, 19, 20}},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})), newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 19})),
name: "init container fits because it's the max, not sum, of containers and init containers", name: "init container fits because it's the max, not sum, of containers and init containers",
wantInsufficientResources: []InsufficientResource{}, wantInsufficientResources: []InsufficientResource{},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}, schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}), schedulertypes.Resource{MilliCPU: 1, Memory: 1}, schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 19})), newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 19})),
name: "multiple init containers fit because it's the max, not sum, of containers and init containers", name: "multiple init containers fit because it's the max, not sum, of containers and init containers",
wantInsufficientResources: []InsufficientResource{}, wantInsufficientResources: []InsufficientResource{},
}, },
{ {
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})), newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 5})),
name: "both resources fit", name: "both resources fit",
wantInsufficientResources: []InsufficientResource{}, wantInsufficientResources: []InsufficientResource{},
}, },
{ {
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 1}), pod: newResourcePod(schedulertypes.Resource{MilliCPU: 2, Memory: 1}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 9, Memory: 5})), newResourcePod(schedulertypes.Resource{MilliCPU: 9, Memory: 5})),
name: "one resource memory fits", name: "one resource memory fits",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 2, 9, 10}}, wantInsufficientResources: []InsufficientResource{{v1.ResourceCPU, getErrReason(v1.ResourceCPU), 2, 9, 10}},
}, },
{ {
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 2}), pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 2}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})),
name: "one resource cpu fits", name: "one resource cpu fits",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 2, 19, 20}}, wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 2, 19, 20}},
}, },
{ {
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), pod: newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})),
name: "equal edge case", name: "equal edge case",
wantInsufficientResources: []InsufficientResource{}, wantInsufficientResources: []InsufficientResource{},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 4, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 4, Memory: 1}), schedulertypes.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})),
name: "equal edge case for init container", name: "equal edge case for init container",
wantInsufficientResources: []InsufficientResource{}, wantInsufficientResources: []InsufficientResource{},
}, },
{ {
pod: newResourcePod(schedulernodeinfo.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), pod: newResourcePod(schedulertypes.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{})), nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{})),
name: "extended resource fits", name: "extended resource fits",
wantInsufficientResources: []InsufficientResource{}, wantInsufficientResources: []InsufficientResource{},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), schedulernodeinfo.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}), schedulertypes.Resource{ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{})), nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{})),
name: "extended resource fits for init container", name: "extended resource fits for init container",
wantInsufficientResources: []InsufficientResource{}, wantInsufficientResources: []InsufficientResource{},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}), schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})), newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
name: "extended resource capacity enforced", name: "extended resource capacity enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 10, 0, 5}}, wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 10, 0, 5}},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}), schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 10}}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})), newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 0}})),
name: "extended resource capacity enforced for init container", name: "extended resource capacity enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 10, 0, 5}}, wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 10, 0, 5}},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})), newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
name: "extended resource allocatable enforced", name: "extended resource allocatable enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 1, 5, 5}}, wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 1, 5, 5}},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}), schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 1}}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})), newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 5}})),
name: "extended resource allocatable enforced for init container", name: "extended resource allocatable enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 1, 5, 5}}, wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 1, 5, 5}},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}, schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
name: "extended resource allocatable enforced for multiple containers", name: "extended resource allocatable enforced for multiple containers",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 6, 2, 5}}, wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 6, 2, 5}},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}, schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}},
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
name: "extended resource allocatable admits multiple init containers", name: "extended resource allocatable admits multiple init containers",
wantInsufficientResources: []InsufficientResource{}, wantInsufficientResources: []InsufficientResource{},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 6}}, schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 6}},
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}), schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 3}}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})), newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{extendedResourceA: 2}})),
name: "extended resource allocatable enforced for multiple init containers", name: "extended resource allocatable enforced for multiple init containers",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceA)),
wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 6, 2, 5}}, wantInsufficientResources: []InsufficientResource{{extendedResourceA, getErrReason(extendedResourceA), 6, 2, 5}},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})), newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})),
name: "extended resource allocatable enforced for unknown resource", name: "extended resource allocatable enforced for unknown resource",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
wantInsufficientResources: []InsufficientResource{{extendedResourceB, getErrReason(extendedResourceB), 1, 0, 0}}, wantInsufficientResources: []InsufficientResource{{extendedResourceB, getErrReason(extendedResourceB), 1, 0, 0}},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})), newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})),
name: "extended resource allocatable enforced for unknown resource for init container", name: "extended resource allocatable enforced for unknown resource for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(extendedResourceB)),
wantInsufficientResources: []InsufficientResource{{extendedResourceB, getErrReason(extendedResourceB), 1, 0, 0}}, wantInsufficientResources: []InsufficientResource{{extendedResourceB, getErrReason(extendedResourceB), 1, 0, 0}},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}), schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceA: 10}}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})), newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})),
name: "kubernetes.io resource capacity enforced", name: "kubernetes.io resource capacity enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceA)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceA)),
wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceA, getErrReason(kubernetesIOResourceA), 10, 0, 0}}, wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceA, getErrReason(kubernetesIOResourceA), 10, 0, 0}},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}), schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{kubernetesIOResourceB: 10}}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})), newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})),
name: "kubernetes.io resource capacity enforced for init container", name: "kubernetes.io resource capacity enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceB)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(kubernetesIOResourceB)),
wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceB, getErrReason(kubernetesIOResourceB), 10, 0, 0}}, wantInsufficientResources: []InsufficientResource{{kubernetesIOResourceB, getErrReason(kubernetesIOResourceB), 10, 0, 0}},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}), schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})), newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
name: "hugepages resource capacity enforced", name: "hugepages resource capacity enforced",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 10, 0, 5}}, wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 10, 0, 5}},
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{}), pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{}),
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}), schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 10}}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})), newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 0}})),
name: "hugepages resource capacity enforced for init container", name: "hugepages resource capacity enforced for init container",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 10, 0, 5}}, wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 10, 0, 5}},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}, schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}},
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}), schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 3}}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})), newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0, ScalarResources: map[v1.ResourceName]int64{hugePageResourceA: 2}})),
name: "hugepages resource allocatable enforced for multiple containers", name: "hugepages resource allocatable enforced for multiple containers",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(hugePageResourceA)),
wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 6, 2, 5}}, wantInsufficientResources: []InsufficientResource{{hugePageResourceA, getErrReason(hugePageResourceA), 6, 2, 5}},
}, },
{ {
pod: newResourcePod( pod: newResourcePod(
schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}), schedulertypes.Resource{MilliCPU: 1, Memory: 1, ScalarResources: map[v1.ResourceName]int64{extendedResourceB: 1}}),
nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 0, Memory: 0})), nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 0, Memory: 0})),
ignoredResources: []byte(`{"IgnoredResources" : ["example.com/bbb"]}`), ignoredResources: []byte(`{"IgnoredResources" : ["example.com/bbb"]}`),
name: "skip checking ignored extended resource", name: "skip checking ignored extended resource",
wantInsufficientResources: []InsufficientResource{}, wantInsufficientResources: []InsufficientResource{},
}, },
{ {
pod: newResourceOverheadPod( pod: newResourceOverheadPod(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
v1.ResourceList{v1.ResourceCPU: resource.MustParse("3m"), v1.ResourceMemory: resource.MustParse("13")}, v1.ResourceList{v1.ResourceCPU: resource.MustParse("3m"), v1.ResourceMemory: resource.MustParse("13")},
), ),
nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})), nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 5})),
name: "resources + pod overhead fits", name: "resources + pod overhead fits",
wantInsufficientResources: []InsufficientResource{}, wantInsufficientResources: []InsufficientResource{},
}, },
{ {
pod: newResourceOverheadPod( pod: newResourceOverheadPod(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
v1.ResourceList{v1.ResourceCPU: resource.MustParse("1m"), v1.ResourceMemory: resource.MustParse("15")}, v1.ResourceList{v1.ResourceCPU: resource.MustParse("1m"), v1.ResourceMemory: resource.MustParse("15")},
), ),
nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})), nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 5})),
name: "requests + overhead does not fit for memory", name: "requests + overhead does not fit for memory",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceMemory)),
wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 16, 5, 20}}, wantInsufficientResources: []InsufficientResource{{v1.ResourceMemory, getErrReason(v1.ResourceMemory), 16, 5, 20}},
@ -395,7 +395,7 @@ func TestEnoughRequests(t *testing.T) {
func TestPreFilterDisabled(t *testing.T) { func TestPreFilterDisabled(t *testing.T) {
pod := &v1.Pod{} pod := &v1.Pod{}
nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo := schedulertypes.NewNodeInfo()
node := v1.Node{} node := v1.Node{}
nodeInfo.SetNode(&node) nodeInfo.SetNode(&node)
p, _ := NewFit(nil, nil) p, _ := NewFit(nil, nil)
@ -410,32 +410,32 @@ func TestPreFilterDisabled(t *testing.T) {
func TestNotEnoughRequests(t *testing.T) { func TestNotEnoughRequests(t *testing.T) {
notEnoughPodsTests := []struct { notEnoughPodsTests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo nodeInfo *schedulertypes.NodeInfo
fits bool fits bool
name string name string
wantStatus *framework.Status wantStatus *framework.Status
}{ }{
{ {
pod: &v1.Pod{}, pod: &v1.Pod{},
nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 20})), nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 10, Memory: 20})),
name: "even without specified resources predicate fails when there's no space for additional pod", name: "even without specified resources predicate fails when there's no space for additional pod",
wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"), wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
}, },
{ {
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 5})), nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 5})),
name: "even if both resources fit predicate fails when there's no space for additional pod", name: "even if both resources fit predicate fails when there's no space for additional pod",
wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"), wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
}, },
{ {
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), pod: newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})),
name: "even for equal edge case predicate fails when there's no space for additional pod", name: "even for equal edge case predicate fails when there's no space for additional pod",
wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"), wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
}, },
{ {
pod: newResourceInitPod(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), schedulernodeinfo.Resource{MilliCPU: 5, Memory: 1}), pod: newResourceInitPod(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 1}), schedulertypes.Resource{MilliCPU: 5, Memory: 1}),
nodeInfo: schedulernodeinfo.NewNodeInfo(newResourcePod(schedulernodeinfo.Resource{MilliCPU: 5, Memory: 19})), nodeInfo: schedulertypes.NewNodeInfo(newResourcePod(schedulertypes.Resource{MilliCPU: 5, Memory: 19})),
name: "even for equal edge case predicate fails when there's no space for additional pod due to init container", name: "even for equal edge case predicate fails when there's no space for additional pod due to init container",
wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"), wantStatus: framework.NewStatus(framework.Unschedulable, "Too many pods"),
}, },
@ -464,34 +464,34 @@ func TestNotEnoughRequests(t *testing.T) {
func TestStorageRequests(t *testing.T) { func TestStorageRequests(t *testing.T) {
storagePodsTests := []struct { storagePodsTests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo nodeInfo *schedulertypes.NodeInfo
name string name string
wantStatus *framework.Status wantStatus *framework.Status
}{ }{
{ {
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 10, Memory: 10})), newResourcePod(schedulertypes.Resource{MilliCPU: 10, Memory: 10})),
name: "due to container scratch disk", name: "due to container scratch disk",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceCPU)),
}, },
{ {
pod: newResourcePod(schedulernodeinfo.Resource{MilliCPU: 1, Memory: 1}), pod: newResourcePod(schedulertypes.Resource{MilliCPU: 1, Memory: 1}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 10})), newResourcePod(schedulertypes.Resource{MilliCPU: 2, Memory: 10})),
name: "pod fit", name: "pod fit",
}, },
{ {
pod: newResourcePod(schedulernodeinfo.Resource{EphemeralStorage: 25}), pod: newResourcePod(schedulertypes.Resource{EphemeralStorage: 25}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 2})), newResourcePod(schedulertypes.Resource{MilliCPU: 2, Memory: 2})),
name: "storage ephemeral local storage request exceeds allocatable", name: "storage ephemeral local storage request exceeds allocatable",
wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceEphemeralStorage)), wantStatus: framework.NewStatus(framework.Unschedulable, getErrReason(v1.ResourceEphemeralStorage)),
}, },
{ {
pod: newResourcePod(schedulernodeinfo.Resource{EphemeralStorage: 10}), pod: newResourcePod(schedulertypes.Resource{EphemeralStorage: 10}),
nodeInfo: schedulernodeinfo.NewNodeInfo( nodeInfo: schedulertypes.NewNodeInfo(
newResourcePod(schedulernodeinfo.Resource{MilliCPU: 2, Memory: 2})), newResourcePod(schedulertypes.Resource{MilliCPU: 2, Memory: 2})),
name: "pod fits", name: "pod fits",
}, },
} }

View File

@ -23,7 +23,7 @@ import (
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
schedutil "k8s.io/kubernetes/pkg/scheduler/util" schedutil "k8s.io/kubernetes/pkg/scheduler/util"
) )
@ -46,7 +46,7 @@ type resourceToValueMap map[v1.ResourceName]int64
// score will use `scorer` function to calculate the score. // score will use `scorer` function to calculate the score.
func (r *resourceAllocationScorer) score( func (r *resourceAllocationScorer) score(
pod *v1.Pod, pod *v1.Pod,
nodeInfo *schedulernodeinfo.NodeInfo) (int64, *framework.Status) { nodeInfo *schedulertypes.NodeInfo) (int64, *framework.Status) {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return 0, framework.NewStatus(framework.Error, "node not found") return 0, framework.NewStatus(framework.Error, "node not found")
@ -90,7 +90,7 @@ func (r *resourceAllocationScorer) score(
} }
// calculateResourceAllocatableRequest returns resources Allocatable and Requested values // calculateResourceAllocatableRequest returns resources Allocatable and Requested values
func calculateResourceAllocatableRequest(nodeInfo *schedulernodeinfo.NodeInfo, pod *v1.Pod, resource v1.ResourceName) (int64, int64) { func calculateResourceAllocatableRequest(nodeInfo *schedulertypes.NodeInfo, pod *v1.Pod, resource v1.ResourceName) (int64, int64) {
allocatable := nodeInfo.AllocatableResource() allocatable := nodeInfo.AllocatableResource()
requested := nodeInfo.RequestedResource() requested := nodeInfo.RequestedResource()
podRequest := calculatePodResourceRequest(pod, resource) podRequest := calculatePodResourceRequest(pod, resource)

View File

@ -23,7 +23,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
// ResourceLimits is a score plugin that increases score of input node by 1 if the node satisfies // ResourceLimits is a score plugin that increases score of input node by 1 if the node satisfies
@ -46,7 +46,7 @@ const (
// preScoreState computed at PreScore and used at Score. // preScoreState computed at PreScore and used at Score.
type preScoreState struct { type preScoreState struct {
podResourceRequest *schedulernodeinfo.Resource podResourceRequest *schedulertypes.Resource
} }
// Clone the preScore state. // Clone the preScore state.
@ -81,7 +81,7 @@ func (rl *ResourceLimits) PreScore(
return nil return nil
} }
func getPodResource(cycleState *framework.CycleState) (*schedulernodeinfo.Resource, error) { func getPodResource(cycleState *framework.CycleState) (*schedulertypes.Resource, error) {
c, err := cycleState.Read(preScoreStateKey) c, err := cycleState.Read(preScoreStateKey)
if err != nil { if err != nil {
return nil, fmt.Errorf("Error reading %q from cycleState: %v", preScoreStateKey, err) return nil, fmt.Errorf("Error reading %q from cycleState: %v", preScoreStateKey, err)
@ -136,9 +136,9 @@ func NewResourceLimits(_ *runtime.Unknown, h framework.FrameworkHandle) (framewo
// getResourceLimits computes resource limits for input pod. // getResourceLimits computes resource limits for input pod.
// The reason to create this new function is to be consistent with other // The reason to create this new function is to be consistent with other
// priority functions because most or perhaps all priority functions work // priority functions because most or perhaps all priority functions work
// with schedulernodeinfo.Resource. // with schedulertypes.Resource.
func getResourceLimits(pod *v1.Pod) *schedulernodeinfo.Resource { func getResourceLimits(pod *v1.Pod) *schedulertypes.Resource {
result := &schedulernodeinfo.Resource{} result := &schedulertypes.Resource{}
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
result.Add(container.Resources.Limits) result.Add(container.Resources.Limits)
} }

View File

@ -8,7 +8,7 @@ go_library(
deps = [ deps = [
"//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/core/v1/helper:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
], ],
@ -20,7 +20,7 @@ go_test(
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
], ],
) )

View File

@ -23,7 +23,7 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
// NodeUnschedulable is a plugin that priorities nodes according to the node annotation // NodeUnschedulable is a plugin that priorities nodes according to the node annotation
@ -49,7 +49,7 @@ func (pl *NodeUnschedulable) Name() string {
} }
// Filter invoked at the filter extension point. // Filter invoked at the filter extension point.
func (pl *NodeUnschedulable) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *NodeUnschedulable) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
if nodeInfo == nil || nodeInfo.Node() == nil { if nodeInfo == nil || nodeInfo.Node() == nil {
return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonUnknownCondition) return framework.NewStatus(framework.UnschedulableAndUnresolvable, ErrReasonUnknownCondition)
} }

View File

@ -23,7 +23,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
func TestNodeUnschedulable(t *testing.T) { func TestNodeUnschedulable(t *testing.T) {
@ -73,7 +73,7 @@ func TestNodeUnschedulable(t *testing.T) {
} }
for _, test := range testCases { for _, test := range testCases {
nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo.SetNode(test.node) nodeInfo.SetNode(test.node)
p, _ := New(nil, nil) p, _ := New(nil, nil)

View File

@ -13,7 +13,7 @@ go_library(
"//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//pkg/volume/util:go_default_library", "//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library",
@ -41,7 +41,7 @@ go_test(
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/listers/fake:go_default_library", "//pkg/scheduler/listers/fake:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//pkg/volume/util:go_default_library", "//pkg/volume/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library",

View File

@ -29,8 +29,7 @@ import (
csitrans "k8s.io/csi-translation-lib" csitrans "k8s.io/csi-translation-lib"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
volumeutil "k8s.io/kubernetes/pkg/volume/util" volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/klog" "k8s.io/klog"
@ -69,7 +68,7 @@ func (pl *CSILimits) Name() string {
} }
// Filter invoked at the filter extension point. // Filter invoked at the filter extension point.
func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *CSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
// If the new pod doesn't have any volume attached to it, the predicate will always be true // If the new pod doesn't have any volume attached to it, the predicate will always be true
if len(pod.Spec.Volumes) == 0 { if len(pod.Spec.Volumes) == 0 {
return nil return nil
@ -286,7 +285,7 @@ func NewCSI(_ *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plu
}, nil }, nil
} }
func getVolumeLimits(nodeInfo *schedulernodeinfo.NodeInfo, csiNode *storagev1.CSINode) map[v1.ResourceName]int64 { func getVolumeLimits(nodeInfo *schedulertypes.NodeInfo, csiNode *storagev1.CSINode) map[v1.ResourceName]int64 {
// TODO: stop getting values from Node object in v1.18 // TODO: stop getting values from Node object in v1.18
nodeVolumeLimits := nodeInfo.VolumeLimits() nodeVolumeLimits := nodeInfo.VolumeLimits()
if csiNode != nil { if csiNode != nil {

View File

@ -36,7 +36,7 @@ import (
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake" fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
volumeutil "k8s.io/kubernetes/pkg/volume/util" volumeutil "k8s.io/kubernetes/pkg/volume/util"
utilpointer "k8s.io/utils/pointer" utilpointer "k8s.io/utils/pointer"
) )
@ -579,8 +579,8 @@ func getFakeCSINodeLister(csiNode *storagev1.CSINode) fakelisters.CSINodeLister
return fakelisters.CSINodeLister{} return fakelisters.CSINodeLister{}
} }
func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*schedulernodeinfo.NodeInfo, *storagev1.CSINode) { func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*schedulertypes.NodeInfo, *storagev1.CSINode) {
nodeInfo := schedulernodeinfo.NewNodeInfo(pods...) nodeInfo := schedulertypes.NewNodeInfo(pods...)
node := &v1.Node{ node := &v1.Node{
ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"}, ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"},
Status: v1.NodeStatus{ Status: v1.NodeStatus{

View File

@ -36,7 +36,7 @@ import (
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
kubefeatures "k8s.io/kubernetes/pkg/features" kubefeatures "k8s.io/kubernetes/pkg/features"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
volumeutil "k8s.io/kubernetes/pkg/volume/util" volumeutil "k8s.io/kubernetes/pkg/volume/util"
) )
@ -196,7 +196,7 @@ func (pl *nonCSILimits) Name() string {
} }
// Filter invoked at the filter extension point. // Filter invoked at the filter extension point.
func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *nonCSILimits) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
// If a pod doesn't have any volume attached to it, the predicate will always be true. // If a pod doesn't have any volume attached to it, the predicate will always be true.
// Thus we make a fast path for it, to avoid unnecessary computations in this case. // Thus we make a fast path for it, to avoid unnecessary computations in this case.
if len(pod.Spec.Volumes) == 0 { if len(pod.Spec.Volumes) == 0 {

View File

@ -15,7 +15,7 @@ go_library(
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/parallelize:go_default_library", "//pkg/scheduler/internal/parallelize:go_default_library",
"//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/validation:go_default_library",
@ -42,8 +42,8 @@ go_test(
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/internal/parallelize:go_default_library", "//pkg/scheduler/internal/parallelize:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/testing:go_default_library", "//pkg/scheduler/testing:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -28,7 +28,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/parallelize" "k8s.io/kubernetes/pkg/scheduler/internal/parallelize"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
const preFilterStateKey = "PreFilter" + Name const preFilterStateKey = "PreFilter" + Name
@ -160,7 +160,7 @@ func (pl *PodTopologySpread) PreFilterExtensions() framework.PreFilterExtensions
} }
// AddPod from pre-computed data in cycleState. // AddPod from pre-computed data in cycleState.
func (pl *PodTopologySpread) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *PodTopologySpread) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
s, err := getPreFilterState(cycleState) s, err := getPreFilterState(cycleState)
if err != nil { if err != nil {
return framework.NewStatus(framework.Error, err.Error()) return framework.NewStatus(framework.Error, err.Error())
@ -171,7 +171,7 @@ func (pl *PodTopologySpread) AddPod(ctx context.Context, cycleState *framework.C
} }
// RemovePod from pre-computed data in cycleState. // RemovePod from pre-computed data in cycleState.
func (pl *PodTopologySpread) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *PodTopologySpread) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
s, err := getPreFilterState(cycleState) s, err := getPreFilterState(cycleState)
if err != nil { if err != nil {
return framework.NewStatus(framework.Error, err.Error()) return framework.NewStatus(framework.Error, err.Error())
@ -275,7 +275,7 @@ func (pl *PodTopologySpread) calPreFilterState(pod *v1.Pod) (*preFilterState, er
} }
// Filter invoked at the filter extension point. // Filter invoked at the filter extension point.
func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *PodTopologySpread) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return framework.NewStatus(framework.Error, "node not found") return framework.NewStatus(framework.Error, "node not found")

View File

@ -31,8 +31,8 @@ import (
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
"k8s.io/kubernetes/pkg/scheduler/internal/parallelize" "k8s.io/kubernetes/pkg/scheduler/internal/parallelize"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
st "k8s.io/kubernetes/pkg/scheduler/testing" st "k8s.io/kubernetes/pkg/scheduler/testing"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
"k8s.io/utils/pointer" "k8s.io/utils/pointer"
) )
@ -1619,7 +1619,7 @@ func TestMultipleConstraints(t *testing.T) {
func TestPreFilterDisabled(t *testing.T) { func TestPreFilterDisabled(t *testing.T) {
pod := &v1.Pod{} pod := &v1.Pod{}
nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo := schedulertypes.NewNodeInfo()
node := v1.Node{} node := v1.Node{}
nodeInfo.SetNode(&node) nodeInfo.SetNode(&node)
p := &PodTopologySpread{} p := &PodTopologySpread{}

View File

@ -9,7 +9,7 @@ go_library(
"//pkg/scheduler/framework/plugins/helper:go_default_library", "//pkg/scheduler/framework/plugins/helper:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
@ -25,7 +25,7 @@ go_test(
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/listers/fake:go_default_library", "//pkg/scheduler/listers/fake:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
], ],

View File

@ -27,7 +27,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
const ( const (
@ -146,7 +146,7 @@ func (pl *ServiceAffinity) PreFilterExtensions() framework.PreFilterExtensions {
} }
// AddPod from pre-computed data in cycleState. // AddPod from pre-computed data in cycleState.
func (pl *ServiceAffinity) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *ServiceAffinity) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
s, err := getPreFilterState(cycleState) s, err := getPreFilterState(cycleState)
if err != nil { if err != nil {
return framework.NewStatus(framework.Error, err.Error()) return framework.NewStatus(framework.Error, err.Error())
@ -167,7 +167,7 @@ func (pl *ServiceAffinity) AddPod(ctx context.Context, cycleState *framework.Cyc
} }
// RemovePod from pre-computed data in cycleState. // RemovePod from pre-computed data in cycleState.
func (pl *ServiceAffinity) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *ServiceAffinity) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
s, err := getPreFilterState(cycleState) s, err := getPreFilterState(cycleState)
if err != nil { if err != nil {
return framework.NewStatus(framework.Error, err.Error()) return framework.NewStatus(framework.Error, err.Error())
@ -230,7 +230,7 @@ func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error
// - L is a label that the ServiceAffinity object needs as a matching constraint. // - L is a label that the ServiceAffinity object needs as a matching constraint.
// - L is not defined in the pod itself already. // - L is not defined in the pod itself already.
// - and SOME pod, from a service, in the same namespace, ALREADY scheduled onto a node, has a matching value. // - and SOME pod, from a service, in the same namespace, ALREADY scheduled onto a node, has a matching value.
func (pl *ServiceAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *ServiceAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
if len(pl.args.AffinityLabels) == 0 { if len(pl.args.AffinityLabels) == 0 {
return nil return nil
} }

View File

@ -27,7 +27,7 @@ import (
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake" fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
func TestServiceAffinity(t *testing.T) { func TestServiceAffinity(t *testing.T) {
@ -591,7 +591,7 @@ func sortNodeScoreList(out framework.NodeScoreList) {
}) })
} }
func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *nodeinfo.NodeInfo { func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *schedulertypes.NodeInfo {
t.Helper() t.Helper()
nodeInfo, err := snapshot.NodeInfos().Get(name) nodeInfo, err := snapshot.NodeInfos().Get(name)
if err != nil { if err != nil {
@ -602,7 +602,7 @@ func mustGetNodeInfo(t *testing.T, snapshot *cache.Snapshot, name string) *nodei
func TestPreFilterDisabled(t *testing.T) { func TestPreFilterDisabled(t *testing.T) {
pod := &v1.Pod{} pod := &v1.Pod{}
nodeInfo := nodeinfo.NewNodeInfo() nodeInfo := schedulertypes.NewNodeInfo()
node := v1.Node{} node := v1.Node{}
nodeInfo.SetNode(&node) nodeInfo.SetNode(&node)
p := &ServiceAffinity{ p := &ServiceAffinity{

View File

@ -9,7 +9,7 @@ go_library(
"//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/core/v1/helper:go_default_library",
"//pkg/scheduler/framework/plugins/helper:go_default_library", "//pkg/scheduler/framework/plugins/helper:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
], ],
@ -36,7 +36,7 @@ go_test(
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
], ],

View File

@ -25,7 +25,7 @@ import (
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper" pluginhelper "k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
// TaintToleration is a plugin that checks if a pod tolerates a node's taints. // TaintToleration is a plugin that checks if a pod tolerates a node's taints.
@ -52,7 +52,7 @@ func (pl *TaintToleration) Name() string {
} }
// Filter invoked at the filter extension point. // Filter invoked at the filter extension point.
func (pl *TaintToleration) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *TaintToleration) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
if nodeInfo == nil || nodeInfo.Node() == nil { if nodeInfo == nil || nodeInfo.Node() == nil {
return framework.NewStatus(framework.Error, "invalid nodeInfo") return framework.NewStatus(framework.Error, "invalid nodeInfo")
} }

View File

@ -25,7 +25,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/internal/cache" "k8s.io/kubernetes/pkg/scheduler/internal/cache"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
func nodeWithTaints(nodeName string, taints []v1.Taint) *v1.Node { func nodeWithTaints(nodeName string, taints []v1.Taint) *v1.Node {
@ -330,7 +330,7 @@ func TestTaintTolerationFilter(t *testing.T) {
} }
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo.SetNode(test.node) nodeInfo.SetNode(test.node)
p, _ := New(nil, nil) p, _ := New(nil, nil)
gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), nil, test.pod, nodeInfo) gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), nil, test.pod, nodeInfo)

View File

@ -8,7 +8,7 @@ go_library(
deps = [ deps = [
"//pkg/controller/volume/scheduling:go_default_library", "//pkg/controller/volume/scheduling:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
], ],
@ -35,7 +35,7 @@ go_test(
deps = [ deps = [
"//pkg/controller/volume/scheduling:go_default_library", "//pkg/controller/volume/scheduling:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
], ],
) )

View File

@ -23,7 +23,7 @@ import (
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/controller/volume/scheduling" "k8s.io/kubernetes/pkg/controller/volume/scheduling"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
// VolumeBinding is a plugin that binds pod volumes in scheduling. // VolumeBinding is a plugin that binds pod volumes in scheduling.
@ -62,7 +62,7 @@ func podHasPVCs(pod *v1.Pod) bool {
// //
// The predicate returns true if all bound PVCs have compatible PVs with the node, and if all unbound // The predicate returns true if all bound PVCs have compatible PVs with the node, and if all unbound
// PVCs can be matched with an available and node-compatible PV. // PVCs can be matched with an available and node-compatible PV.
func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *framework.Status { func (pl *VolumeBinding) Filter(ctx context.Context, cs *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
node := nodeInfo.Node() node := nodeInfo.Node()
if node == nil { if node == nil {
return framework.NewStatus(framework.Error, "node not found") return framework.NewStatus(framework.Error, "node not found")

View File

@ -25,7 +25,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/kubernetes/pkg/controller/volume/scheduling" "k8s.io/kubernetes/pkg/controller/volume/scheduling"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
func TestVolumeBinding(t *testing.T) { func TestVolumeBinding(t *testing.T) {
@ -99,7 +99,7 @@ func TestVolumeBinding(t *testing.T) {
for _, item := range table { for _, item := range table {
t.Run(item.name, func(t *testing.T) { t.Run(item.name, func(t *testing.T) {
nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo.SetNode(item.node) nodeInfo.SetNode(item.node)
fakeVolumeBinder := scheduling.NewFakeVolumeBinder(item.volumeBinderConfig) fakeVolumeBinder := scheduling.NewFakeVolumeBinder(item.volumeBinderConfig)
p := &VolumeBinding{ p := &VolumeBinding{

View File

@ -7,7 +7,7 @@ go_library(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
], ],
@ -33,7 +33,7 @@ go_test(
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
], ],
) )

View File

@ -22,7 +22,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
// VolumeRestrictions is a plugin that checks volume restrictions. // VolumeRestrictions is a plugin that checks volume restrictions.
@ -118,7 +118,7 @@ func haveOverlap(a1, a2 []string) bool {
// - AWS EBS forbids any two pods mounting the same volume ID // - AWS EBS forbids any two pods mounting the same volume ID
// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image, and the image is read-only // - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image, and the image is read-only
// - ISCSI forbids if any two pods share at least same IQN and ISCSI volume is read-only // - ISCSI forbids if any two pods share at least same IQN and ISCSI volume is read-only
func (pl *VolumeRestrictions) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *VolumeRestrictions) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
for _, v := range pod.Spec.Volumes { for _, v := range pod.Spec.Volumes {
for _, ev := range nodeInfo.Pods() { for _, ev := range nodeInfo.Pods() {
if isVolumeConflict(v, ev) { if isVolumeConflict(v, ev) {

View File

@ -23,7 +23,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
func TestGCEDiskConflicts(t *testing.T) { func TestGCEDiskConflicts(t *testing.T) {
@ -52,15 +52,15 @@ func TestGCEDiskConflicts(t *testing.T) {
errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict) errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo nodeInfo *schedulertypes.NodeInfo
isOk bool isOk bool
name string name string
wantStatus *framework.Status wantStatus *framework.Status
}{ }{
{&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing", nil}, {&v1.Pod{}, schedulertypes.NewNodeInfo(), true, "nothing", nil},
{&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil}, {&v1.Pod{}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus}, {&v1.Pod{Spec: volState}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil}, {&v1.Pod{Spec: volState2}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
} }
for _, test := range tests { for _, test := range tests {
@ -100,15 +100,15 @@ func TestAWSDiskConflicts(t *testing.T) {
errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict) errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo nodeInfo *schedulertypes.NodeInfo
isOk bool isOk bool
name string name string
wantStatus *framework.Status wantStatus *framework.Status
}{ }{
{&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing", nil}, {&v1.Pod{}, schedulertypes.NewNodeInfo(), true, "nothing", nil},
{&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil}, {&v1.Pod{}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus}, {&v1.Pod{Spec: volState}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil}, {&v1.Pod{Spec: volState2}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
} }
for _, test := range tests { for _, test := range tests {
@ -154,15 +154,15 @@ func TestRBDDiskConflicts(t *testing.T) {
errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict) errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo nodeInfo *schedulertypes.NodeInfo
isOk bool isOk bool
name string name string
wantStatus *framework.Status wantStatus *framework.Status
}{ }{
{&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing", nil}, {&v1.Pod{}, schedulertypes.NewNodeInfo(), true, "nothing", nil},
{&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil}, {&v1.Pod{}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus}, {&v1.Pod{Spec: volState}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil}, {&v1.Pod{Spec: volState2}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
} }
for _, test := range tests { for _, test := range tests {
@ -208,15 +208,15 @@ func TestISCSIDiskConflicts(t *testing.T) {
errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict) errStatus := framework.NewStatus(framework.Unschedulable, ErrReasonDiskConflict)
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
nodeInfo *schedulernodeinfo.NodeInfo nodeInfo *schedulertypes.NodeInfo
isOk bool isOk bool
name string name string
wantStatus *framework.Status wantStatus *framework.Status
}{ }{
{&v1.Pod{}, schedulernodeinfo.NewNodeInfo(), true, "nothing", nil}, {&v1.Pod{}, schedulertypes.NewNodeInfo(), true, "nothing", nil},
{&v1.Pod{}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil}, {&v1.Pod{}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state", nil},
{&v1.Pod{Spec: volState}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus}, {&v1.Pod{Spec: volState}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state", errStatus},
{&v1.Pod{Spec: volState2}, schedulernodeinfo.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil}, {&v1.Pod{Spec: volState2}, schedulertypes.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state", nil},
} }
for _, test := range tests { for _, test := range tests {

View File

@ -8,7 +8,7 @@ go_library(
deps = [ deps = [
"//pkg/apis/core/v1/helper:go_default_library", "//pkg/apis/core/v1/helper:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
@ -27,7 +27,7 @@ go_test(
deps = [ deps = [
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/listers/fake:go_default_library", "//pkg/scheduler/listers/fake:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",

View File

@ -30,7 +30,7 @@ import (
"k8s.io/klog" "k8s.io/klog"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
// VolumeZone is a plugin that checks volume zone. // VolumeZone is a plugin that checks volume zone.
@ -78,7 +78,7 @@ func (pl *VolumeZone) Name() string {
// determining the zone of a volume during scheduling, and that is likely to // determining the zone of a volume during scheduling, and that is likely to
// require calling out to the cloud provider. It seems that we are moving away // require calling out to the cloud provider. It seems that we are moving away
// from inline volume declarations anyway. // from inline volume declarations anyway.
func (pl *VolumeZone) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (pl *VolumeZone) Filter(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
// If a pod doesn't have any volume attached to it, the predicate will always be true. // If a pod doesn't have any volume attached to it, the predicate will always be true.
// Thus we make a fast path for it, to avoid unnecessary computations in this case. // Thus we make a fast path for it, to avoid unnecessary computations in this case.
if len(pod.Spec.Volumes) == 0 { if len(pod.Spec.Volumes) == 0 {

View File

@ -26,7 +26,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake" fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
func createPodWithVolume(pod, pv, pvc string) *v1.Pod { func createPodWithVolume(pod, pv, pvc string) *v1.Pod {
@ -208,7 +208,7 @@ func TestSingleZone(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
node := &schedulernodeinfo.NodeInfo{} node := &schedulertypes.NodeInfo{}
node.SetNode(test.Node) node.SetNode(test.Node)
p := &VolumeZone{ p := &VolumeZone{
pvLister, pvLister,
@ -330,7 +330,7 @@ func TestMultiZone(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
node := &schedulernodeinfo.NodeInfo{} node := &schedulertypes.NodeInfo{}
node.SetNode(test.Node) node.SetNode(test.Node)
p := &VolumeZone{ p := &VolumeZone{
pvLister, pvLister,
@ -439,7 +439,7 @@ func TestWithBinding(t *testing.T) {
for _, test := range tests { for _, test := range tests {
t.Run(test.name, func(t *testing.T) { t.Run(test.name, func(t *testing.T) {
node := &schedulernodeinfo.NodeInfo{} node := &schedulertypes.NodeInfo{}
node.SetNode(test.Node) node.SetNode(test.Node)
p := &VolumeZone{ p := &VolumeZone{
pvLister, pvLister,

View File

@ -18,7 +18,7 @@ go_library(
"//pkg/scheduler/internal/parallelize:go_default_library", "//pkg/scheduler/internal/parallelize:go_default_library",
"//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/metrics:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/util:go_default_library", "//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
@ -59,7 +59,7 @@ go_test(
deps = [ deps = [
"//pkg/scheduler/apis/config:go_default_library", "//pkg/scheduler/apis/config:go_default_library",
"//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/metrics:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",

View File

@ -34,7 +34,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/internal/parallelize" "k8s.io/kubernetes/pkg/scheduler/internal/parallelize"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
"k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/metrics"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
schedutil "k8s.io/kubernetes/pkg/scheduler/util" schedutil "k8s.io/kubernetes/pkg/scheduler/util"
) )
@ -353,7 +353,7 @@ func (f *framework) RunPreFilterExtensionAddPod(
state *CycleState, state *CycleState,
podToSchedule *v1.Pod, podToSchedule *v1.Pod,
podToAdd *v1.Pod, podToAdd *v1.Pod,
nodeInfo *schedulernodeinfo.NodeInfo, nodeInfo *schedulertypes.NodeInfo,
) (status *Status) { ) (status *Status) {
for _, pl := range f.preFilterPlugins { for _, pl := range f.preFilterPlugins {
if pl.PreFilterExtensions() == nil { if pl.PreFilterExtensions() == nil {
@ -371,7 +371,7 @@ func (f *framework) RunPreFilterExtensionAddPod(
return nil return nil
} }
func (f *framework) runPreFilterExtensionAddPod(ctx context.Context, pl PreFilterPlugin, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status { func (f *framework) runPreFilterExtensionAddPod(ctx context.Context, pl PreFilterPlugin, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status {
if !state.ShouldRecordPluginMetrics() { if !state.ShouldRecordPluginMetrics() {
return pl.PreFilterExtensions().AddPod(ctx, state, podToSchedule, podToAdd, nodeInfo) return pl.PreFilterExtensions().AddPod(ctx, state, podToSchedule, podToAdd, nodeInfo)
} }
@ -389,7 +389,7 @@ func (f *framework) RunPreFilterExtensionRemovePod(
state *CycleState, state *CycleState,
podToSchedule *v1.Pod, podToSchedule *v1.Pod,
podToRemove *v1.Pod, podToRemove *v1.Pod,
nodeInfo *schedulernodeinfo.NodeInfo, nodeInfo *schedulertypes.NodeInfo,
) (status *Status) { ) (status *Status) {
for _, pl := range f.preFilterPlugins { for _, pl := range f.preFilterPlugins {
if pl.PreFilterExtensions() == nil { if pl.PreFilterExtensions() == nil {
@ -407,7 +407,7 @@ func (f *framework) RunPreFilterExtensionRemovePod(
return nil return nil
} }
func (f *framework) runPreFilterExtensionRemovePod(ctx context.Context, pl PreFilterPlugin, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status { func (f *framework) runPreFilterExtensionRemovePod(ctx context.Context, pl PreFilterPlugin, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status {
if !state.ShouldRecordPluginMetrics() { if !state.ShouldRecordPluginMetrics() {
return pl.PreFilterExtensions().RemovePod(ctx, state, podToSchedule, podToAdd, nodeInfo) return pl.PreFilterExtensions().RemovePod(ctx, state, podToSchedule, podToAdd, nodeInfo)
} }
@ -425,7 +425,7 @@ func (f *framework) RunFilterPlugins(
ctx context.Context, ctx context.Context,
state *CycleState, state *CycleState,
pod *v1.Pod, pod *v1.Pod,
nodeInfo *schedulernodeinfo.NodeInfo, nodeInfo *schedulertypes.NodeInfo,
) PluginToStatus { ) PluginToStatus {
var firstFailedStatus *Status var firstFailedStatus *Status
statuses := make(PluginToStatus) statuses := make(PluginToStatus)
@ -452,7 +452,7 @@ func (f *framework) RunFilterPlugins(
return statuses return statuses
} }
func (f *framework) runFilterPlugin(ctx context.Context, pl FilterPlugin, state *CycleState, pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status { func (f *framework) runFilterPlugin(ctx context.Context, pl FilterPlugin, state *CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status {
if !state.ShouldRecordPluginMetrics() { if !state.ShouldRecordPluginMetrics() {
return pl.Filter(ctx, state, pod, nodeInfo) return pl.Filter(ctx, state, pod, nodeInfo)
} }

View File

@ -32,7 +32,7 @@ import (
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/metrics"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
const ( const (
@ -138,10 +138,10 @@ type TestPluginPreFilterExtension struct {
inj injectedResult inj injectedResult
} }
func (e *TestPluginPreFilterExtension) AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status { func (e *TestPluginPreFilterExtension) AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status {
return NewStatus(Code(e.inj.PreFilterAddPodStatus), "injected status") return NewStatus(Code(e.inj.PreFilterAddPodStatus), "injected status")
} }
func (e *TestPluginPreFilterExtension) RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status { func (e *TestPluginPreFilterExtension) RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status {
return NewStatus(Code(e.inj.PreFilterRemovePodStatus), "injected status") return NewStatus(Code(e.inj.PreFilterRemovePodStatus), "injected status")
} }
@ -165,7 +165,7 @@ func (pl *TestPlugin) PreFilterExtensions() PreFilterExtensions {
return &TestPluginPreFilterExtension{inj: pl.inj} return &TestPluginPreFilterExtension{inj: pl.inj}
} }
func (pl *TestPlugin) Filter(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status { func (pl *TestPlugin) Filter(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status {
return NewStatus(Code(pl.inj.FilterStatus), "injected filter status") return NewStatus(Code(pl.inj.FilterStatus), "injected filter status")
} }
@ -228,13 +228,13 @@ func (pl *TestPreFilterWithExtensionsPlugin) PreFilter(ctx context.Context, stat
} }
func (pl *TestPreFilterWithExtensionsPlugin) AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, func (pl *TestPreFilterWithExtensionsPlugin) AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod,
podToAdd *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status { podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status {
pl.AddCalled++ pl.AddCalled++
return nil return nil
} }
func (pl *TestPreFilterWithExtensionsPlugin) RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, func (pl *TestPreFilterWithExtensionsPlugin) RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod,
podToRemove *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status { podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status {
pl.RemoveCalled++ pl.RemoveCalled++
return nil return nil
} }

View File

@ -32,7 +32,7 @@ import (
"k8s.io/kubernetes/pkg/controller/volume/scheduling" "k8s.io/kubernetes/pkg/controller/volume/scheduling"
"k8s.io/kubernetes/pkg/scheduler/apis/config" "k8s.io/kubernetes/pkg/scheduler/apis/config"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
// NodeScoreList declares a list of nodes and their scores. // NodeScoreList declares a list of nodes and their scores.
@ -256,10 +256,10 @@ type QueueSortPlugin interface {
type PreFilterExtensions interface { type PreFilterExtensions interface {
// AddPod is called by the framework while trying to evaluate the impact // AddPod is called by the framework while trying to evaluate the impact
// of adding podToAdd to the node while scheduling podToSchedule. // of adding podToAdd to the node while scheduling podToSchedule.
AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status AddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status
// RemovePod is called by the framework while trying to evaluate the impact // RemovePod is called by the framework while trying to evaluate the impact
// of removing podToRemove from the node while scheduling podToSchedule. // of removing podToRemove from the node while scheduling podToSchedule.
RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status RemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status
} }
// PreFilterPlugin is an interface that must be implemented by "prefilter" plugins. // PreFilterPlugin is an interface that must be implemented by "prefilter" plugins.
@ -299,7 +299,7 @@ type FilterPlugin interface {
// For example, during preemption, we may pass a copy of the original // For example, during preemption, we may pass a copy of the original
// nodeInfo object that has some pods removed from it to evaluate the // nodeInfo object that has some pods removed from it to evaluate the
// possibility of preempting them to schedule the target pod. // possibility of preempting them to schedule the target pod.
Filter(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status Filter(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status
} }
// PreScorePlugin is an interface for Pre-score plugin. Pre-score is an // PreScorePlugin is an interface for Pre-score plugin. Pre-score is an
@ -425,17 +425,17 @@ type Framework interface {
// preemption, we may pass a copy of the original nodeInfo object that has some pods // preemption, we may pass a copy of the original nodeInfo object that has some pods
// removed from it to evaluate the possibility of preempting them to // removed from it to evaluate the possibility of preempting them to
// schedule the target pod. // schedule the target pod.
RunFilterPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) PluginToStatus RunFilterPlugins(ctx context.Context, state *CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) PluginToStatus
// RunPreFilterExtensionAddPod calls the AddPod interface for the set of configured // RunPreFilterExtensionAddPod calls the AddPod interface for the set of configured
// PreFilter plugins. It returns directly if any of the plugins return any // PreFilter plugins. It returns directly if any of the plugins return any
// status other than Success. // status other than Success.
RunPreFilterExtensionAddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status RunPreFilterExtensionAddPod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status
// RunPreFilterExtensionRemovePod calls the RemovePod interface for the set of configured // RunPreFilterExtensionRemovePod calls the RemovePod interface for the set of configured
// PreFilter plugins. It returns directly if any of the plugins return any // PreFilter plugins. It returns directly if any of the plugins return any
// status other than Success. // status other than Success.
RunPreFilterExtensionRemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *Status RunPreFilterExtensionRemovePod(ctx context.Context, state *CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *Status
// RunPreScorePlugins runs the set of configured pre-score plugins. If any // RunPreScorePlugins runs the set of configured pre-score plugins. If any
// of these plugins returns any status other than "Success", the given pod is rejected. // of these plugins returns any status other than "Success", the given pod is rejected.

View File

@ -14,7 +14,7 @@ go_library(
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/metrics:go_default_library", "//pkg/scheduler/metrics:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//pkg/util/node:go_default_library", "//pkg/util/node:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
@ -35,7 +35,7 @@ go_test(
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/features:go_default_library", "//pkg/features:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//pkg/scheduler/util:go_default_library", "//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",

View File

@ -30,7 +30,7 @@ import (
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
"k8s.io/kubernetes/pkg/scheduler/metrics" "k8s.io/kubernetes/pkg/scheduler/metrics"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
var ( var (
@ -51,7 +51,7 @@ func New(ttl time.Duration, stop <-chan struct{}) Cache {
// linked list. When a NodeInfo is updated, it goes to the head of the list. // linked list. When a NodeInfo is updated, it goes to the head of the list.
// The items closer to the head are the most recently updated items. // The items closer to the head are the most recently updated items.
type nodeInfoListItem struct { type nodeInfoListItem struct {
info *schedulernodeinfo.NodeInfo info *schedulertypes.NodeInfo
next *nodeInfoListItem next *nodeInfoListItem
prev *nodeInfoListItem prev *nodeInfoListItem
} }
@ -93,8 +93,8 @@ type imageState struct {
} }
// createImageStateSummary returns a summarizing snapshot of the given image's state. // createImageStateSummary returns a summarizing snapshot of the given image's state.
func (cache *schedulerCache) createImageStateSummary(state *imageState) *schedulernodeinfo.ImageStateSummary { func (cache *schedulerCache) createImageStateSummary(state *imageState) *schedulertypes.ImageStateSummary {
return &schedulernodeinfo.ImageStateSummary{ return &schedulertypes.ImageStateSummary{
Size: state.size, Size: state.size,
NumNodes: len(state.nodes), NumNodes: len(state.nodes),
} }
@ -115,7 +115,7 @@ func newSchedulerCache(ttl, period time.Duration, stop <-chan struct{}) *schedul
} }
// newNodeInfoListItem initializes a new nodeInfoListItem. // newNodeInfoListItem initializes a new nodeInfoListItem.
func newNodeInfoListItem(ni *schedulernodeinfo.NodeInfo) *nodeInfoListItem { func newNodeInfoListItem(ni *schedulertypes.NodeInfo) *nodeInfoListItem {
return &nodeInfoListItem{ return &nodeInfoListItem{
info: ni, info: ni,
} }
@ -180,7 +180,7 @@ func (cache *schedulerCache) Dump() *Dump {
cache.mu.RLock() cache.mu.RLock()
defer cache.mu.RUnlock() defer cache.mu.RUnlock()
nodes := make(map[string]*schedulernodeinfo.NodeInfo, len(cache.nodes)) nodes := make(map[string]*schedulertypes.NodeInfo, len(cache.nodes))
for k, v := range cache.nodes { for k, v := range cache.nodes {
nodes[k] = v.info.Clone() nodes[k] = v.info.Clone()
} }
@ -231,7 +231,7 @@ func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error {
existing, ok := nodeSnapshot.nodeInfoMap[np.Name] existing, ok := nodeSnapshot.nodeInfoMap[np.Name]
if !ok { if !ok {
updateAllLists = true updateAllLists = true
existing = &schedulernodeinfo.NodeInfo{} existing = &schedulertypes.NodeInfo{}
nodeSnapshot.nodeInfoMap[np.Name] = existing nodeSnapshot.nodeInfoMap[np.Name] = existing
} }
clone := node.info.Clone() clone := node.info.Clone()
@ -277,10 +277,10 @@ func (cache *schedulerCache) UpdateSnapshot(nodeSnapshot *Snapshot) error {
} }
func (cache *schedulerCache) updateNodeInfoSnapshotList(snapshot *Snapshot, updateAll bool) { func (cache *schedulerCache) updateNodeInfoSnapshotList(snapshot *Snapshot, updateAll bool) {
snapshot.havePodsWithAffinityNodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes) snapshot.havePodsWithAffinityNodeInfoList = make([]*schedulertypes.NodeInfo, 0, cache.nodeTree.numNodes)
if updateAll { if updateAll {
// Take a snapshot of the nodes order in the tree // Take a snapshot of the nodes order in the tree
snapshot.nodeInfoList = make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes) snapshot.nodeInfoList = make([]*schedulertypes.NodeInfo, 0, cache.nodeTree.numNodes)
for i := 0; i < cache.nodeTree.numNodes; i++ { for i := 0; i < cache.nodeTree.numNodes; i++ {
nodeName := cache.nodeTree.next() nodeName := cache.nodeTree.next()
if n := snapshot.nodeInfoMap[nodeName]; n != nil { if n := snapshot.nodeInfoMap[nodeName]; n != nil {
@ -342,7 +342,7 @@ func (cache *schedulerCache) FilteredList(podFilter schedulerlisters.PodFilter,
} }
func (cache *schedulerCache) AssumePod(pod *v1.Pod) error { func (cache *schedulerCache) AssumePod(pod *v1.Pod) error {
key, err := schedulernodeinfo.GetPodKey(pod) key, err := schedulertypes.GetPodKey(pod)
if err != nil { if err != nil {
return err return err
} }
@ -368,7 +368,7 @@ func (cache *schedulerCache) FinishBinding(pod *v1.Pod) error {
// finishBinding exists to make tests determinitistic by injecting now as an argument // finishBinding exists to make tests determinitistic by injecting now as an argument
func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error { func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error {
key, err := schedulernodeinfo.GetPodKey(pod) key, err := schedulertypes.GetPodKey(pod)
if err != nil { if err != nil {
return err return err
} }
@ -387,7 +387,7 @@ func (cache *schedulerCache) finishBinding(pod *v1.Pod, now time.Time) error {
} }
func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error { func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error {
key, err := schedulernodeinfo.GetPodKey(pod) key, err := schedulertypes.GetPodKey(pod)
if err != nil { if err != nil {
return err return err
} }
@ -419,7 +419,7 @@ func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error {
func (cache *schedulerCache) addPod(pod *v1.Pod) { func (cache *schedulerCache) addPod(pod *v1.Pod) {
n, ok := cache.nodes[pod.Spec.NodeName] n, ok := cache.nodes[pod.Spec.NodeName]
if !ok { if !ok {
n = newNodeInfoListItem(schedulernodeinfo.NewNodeInfo()) n = newNodeInfoListItem(schedulertypes.NewNodeInfo())
cache.nodes[pod.Spec.NodeName] = n cache.nodes[pod.Spec.NodeName] = n
} }
n.info.AddPod(pod) n.info.AddPod(pod)
@ -452,7 +452,7 @@ func (cache *schedulerCache) removePod(pod *v1.Pod) error {
} }
func (cache *schedulerCache) AddPod(pod *v1.Pod) error { func (cache *schedulerCache) AddPod(pod *v1.Pod) error {
key, err := schedulernodeinfo.GetPodKey(pod) key, err := schedulertypes.GetPodKey(pod)
if err != nil { if err != nil {
return err return err
} }
@ -489,7 +489,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error {
} }
func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error { func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error {
key, err := schedulernodeinfo.GetPodKey(oldPod) key, err := schedulertypes.GetPodKey(oldPod)
if err != nil { if err != nil {
return err return err
} }
@ -517,7 +517,7 @@ func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error {
} }
func (cache *schedulerCache) RemovePod(pod *v1.Pod) error { func (cache *schedulerCache) RemovePod(pod *v1.Pod) error {
key, err := schedulernodeinfo.GetPodKey(pod) key, err := schedulertypes.GetPodKey(pod)
if err != nil { if err != nil {
return err return err
} }
@ -546,7 +546,7 @@ func (cache *schedulerCache) RemovePod(pod *v1.Pod) error {
} }
func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) { func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) {
key, err := schedulernodeinfo.GetPodKey(pod) key, err := schedulertypes.GetPodKey(pod)
if err != nil { if err != nil {
return false, err return false, err
} }
@ -564,7 +564,7 @@ func (cache *schedulerCache) IsAssumedPod(pod *v1.Pod) (bool, error) {
// GetPod might return a pod for which its node has already been deleted from // GetPod might return a pod for which its node has already been deleted from
// the main cache. This is useful to properly process pod update events. // the main cache. This is useful to properly process pod update events.
func (cache *schedulerCache) GetPod(pod *v1.Pod) (*v1.Pod, error) { func (cache *schedulerCache) GetPod(pod *v1.Pod) (*v1.Pod, error) {
key, err := schedulernodeinfo.GetPodKey(pod) key, err := schedulertypes.GetPodKey(pod)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -586,7 +586,7 @@ func (cache *schedulerCache) AddNode(node *v1.Node) error {
n, ok := cache.nodes[node.Name] n, ok := cache.nodes[node.Name]
if !ok { if !ok {
n = newNodeInfoListItem(schedulernodeinfo.NewNodeInfo()) n = newNodeInfoListItem(schedulertypes.NewNodeInfo())
cache.nodes[node.Name] = n cache.nodes[node.Name] = n
} else { } else {
cache.removeNodeImageStates(n.info.Node()) cache.removeNodeImageStates(n.info.Node())
@ -604,7 +604,7 @@ func (cache *schedulerCache) UpdateNode(oldNode, newNode *v1.Node) error {
n, ok := cache.nodes[newNode.Name] n, ok := cache.nodes[newNode.Name]
if !ok { if !ok {
n = newNodeInfoListItem(schedulernodeinfo.NewNodeInfo()) n = newNodeInfoListItem(schedulertypes.NewNodeInfo())
cache.nodes[newNode.Name] = n cache.nodes[newNode.Name] = n
cache.nodeTree.addNode(newNode) cache.nodeTree.addNode(newNode)
} else { } else {
@ -641,8 +641,8 @@ func (cache *schedulerCache) RemoveNode(node *v1.Node) error {
// addNodeImageStates adds states of the images on given node to the given nodeInfo and update the imageStates in // addNodeImageStates adds states of the images on given node to the given nodeInfo and update the imageStates in
// scheduler cache. This function assumes the lock to scheduler cache has been acquired. // scheduler cache. This function assumes the lock to scheduler cache has been acquired.
func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *schedulernodeinfo.NodeInfo) { func (cache *schedulerCache) addNodeImageStates(node *v1.Node, nodeInfo *schedulertypes.NodeInfo) {
newSum := make(map[string]*schedulernodeinfo.ImageStateSummary) newSum := make(map[string]*schedulertypes.ImageStateSummary)
for _, image := range node.Status.Images { for _, image := range node.Status.Images {
for _, name := range image.Names { for _, name := range image.Names {

View File

@ -31,11 +31,11 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature" utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing" featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/features"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
schedutil "k8s.io/kubernetes/pkg/scheduler/util" schedutil "k8s.io/kubernetes/pkg/scheduler/util"
) )
func deepEqualWithoutGeneration(actual *nodeInfoListItem, expected *schedulernodeinfo.NodeInfo) error { func deepEqualWithoutGeneration(actual *nodeInfoListItem, expected *schedulertypes.NodeInfo) error {
if (actual == nil) != (expected == nil) { if (actual == nil) != (expected == nil) {
return errors.New("one of the actual or expected is nil and the other is not") return errors.New("one of the actual or expected is nil and the other is not")
} }
@ -70,21 +70,21 @@ func (b *hostPortInfoBuilder) add(protocol, ip string, port int32) *hostPortInfo
return b return b
} }
func (b *hostPortInfoBuilder) build() schedulernodeinfo.HostPortInfo { func (b *hostPortInfoBuilder) build() schedulertypes.HostPortInfo {
res := make(schedulernodeinfo.HostPortInfo) res := make(schedulertypes.HostPortInfo)
for _, param := range b.inputs { for _, param := range b.inputs {
res.Add(param.ip, param.protocol, param.port) res.Add(param.ip, param.protocol, param.port)
} }
return res return res
} }
func newNodeInfo(requestedResource *schedulernodeinfo.Resource, func newNodeInfo(requestedResource *schedulertypes.Resource,
nonzeroRequest *schedulernodeinfo.Resource, nonzeroRequest *schedulertypes.Resource,
pods []*v1.Pod, pods []*v1.Pod,
usedPorts schedulernodeinfo.HostPortInfo, usedPorts schedulertypes.HostPortInfo,
imageStates map[string]*schedulernodeinfo.ImageStateSummary, imageStates map[string]*schedulertypes.ImageStateSummary,
) *schedulernodeinfo.NodeInfo { ) *schedulertypes.NodeInfo {
nodeInfo := schedulernodeinfo.NewNodeInfo(pods...) nodeInfo := schedulertypes.NewNodeInfo(pods...)
nodeInfo.SetRequestedResource(requestedResource) nodeInfo.SetRequestedResource(requestedResource)
nodeInfo.SetNonZeroRequest(nonzeroRequest) nodeInfo.SetNonZeroRequest(nonzeroRequest)
nodeInfo.SetUsedPorts(usedPorts) nodeInfo.SetUsedPorts(usedPorts)
@ -112,98 +112,98 @@ func TestAssumePodScheduled(t *testing.T) {
tests := []struct { tests := []struct {
pods []*v1.Pod pods []*v1.Pod
wNodeInfo *schedulernodeinfo.NodeInfo wNodeInfo *schedulertypes.NodeInfo
}{{ }{{
pods: []*v1.Pod{testPods[0]}, pods: []*v1.Pod{testPods[0]},
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
[]*v1.Pod{testPods[0]}, []*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulernodeinfo.ImageStateSummary), make(map[string]*schedulertypes.ImageStateSummary),
), ),
}, { }, {
pods: []*v1.Pod{testPods[1], testPods[2]}, pods: []*v1.Pod{testPods[1], testPods[2]},
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 300, MilliCPU: 300,
Memory: 1524, Memory: 1524,
}, },
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 300, MilliCPU: 300,
Memory: 1524, Memory: 1524,
}, },
[]*v1.Pod{testPods[1], testPods[2]}, []*v1.Pod{testPods[1], testPods[2]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulernodeinfo.ImageStateSummary), make(map[string]*schedulertypes.ImageStateSummary),
), ),
}, { // test non-zero request }, { // test non-zero request
pods: []*v1.Pod{testPods[3]}, pods: []*v1.Pod{testPods[3]},
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 0, MilliCPU: 0,
Memory: 0, Memory: 0,
}, },
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: schedutil.DefaultMilliCPURequest, MilliCPU: schedutil.DefaultMilliCPURequest,
Memory: schedutil.DefaultMemoryRequest, Memory: schedutil.DefaultMemoryRequest,
}, },
[]*v1.Pod{testPods[3]}, []*v1.Pod{testPods[3]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulernodeinfo.ImageStateSummary), make(map[string]*schedulertypes.ImageStateSummary),
), ),
}, { }, {
pods: []*v1.Pod{testPods[4]}, pods: []*v1.Pod{testPods[4]},
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 3}, ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 3},
}, },
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
[]*v1.Pod{testPods[4]}, []*v1.Pod{testPods[4]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulernodeinfo.ImageStateSummary), make(map[string]*schedulertypes.ImageStateSummary),
), ),
}, { }, {
pods: []*v1.Pod{testPods[4], testPods[5]}, pods: []*v1.Pod{testPods[4], testPods[5]},
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 300, MilliCPU: 300,
Memory: 1524, Memory: 1524,
ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 8}, ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 8},
}, },
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 300, MilliCPU: 300,
Memory: 1524, Memory: 1524,
}, },
[]*v1.Pod{testPods[4], testPods[5]}, []*v1.Pod{testPods[4], testPods[5]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulernodeinfo.ImageStateSummary), make(map[string]*schedulertypes.ImageStateSummary),
), ),
}, { }, {
pods: []*v1.Pod{testPods[6]}, pods: []*v1.Pod{testPods[6]},
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
[]*v1.Pod{testPods[6]}, []*v1.Pod{testPods[6]},
newHostPortInfoBuilder().build(), newHostPortInfoBuilder().build(),
make(map[string]*schedulernodeinfo.ImageStateSummary), make(map[string]*schedulertypes.ImageStateSummary),
), ),
}, },
} }
@ -263,13 +263,13 @@ func TestExpirePod(t *testing.T) {
pods []*testExpirePodStruct pods []*testExpirePodStruct
cleanupTime time.Time cleanupTime time.Time
wNodeInfo *schedulernodeinfo.NodeInfo wNodeInfo *schedulertypes.NodeInfo
}{{ // assumed pod would expires }{{ // assumed pod would expires
pods: []*testExpirePodStruct{ pods: []*testExpirePodStruct{
{pod: testPods[0], finishBind: true, assumedTime: now}, {pod: testPods[0], finishBind: true, assumedTime: now},
}, },
cleanupTime: now.Add(2 * ttl), cleanupTime: now.Add(2 * ttl),
wNodeInfo: schedulernodeinfo.NewNodeInfo(), wNodeInfo: schedulertypes.NewNodeInfo(),
}, { // first one would expire, second and third would not. }, { // first one would expire, second and third would not.
pods: []*testExpirePodStruct{ pods: []*testExpirePodStruct{
{pod: testPods[0], finishBind: true, assumedTime: now}, {pod: testPods[0], finishBind: true, assumedTime: now},
@ -278,18 +278,18 @@ func TestExpirePod(t *testing.T) {
}, },
cleanupTime: now.Add(2 * ttl), cleanupTime: now.Add(2 * ttl),
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 400, MilliCPU: 400,
Memory: 2048, Memory: 2048,
}, },
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 400, MilliCPU: 400,
Memory: 2048, Memory: 2048,
}, },
// Order gets altered when removing pods. // Order gets altered when removing pods.
[]*v1.Pod{testPods[2], testPods[1]}, []*v1.Pod{testPods[2], testPods[1]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulernodeinfo.ImageStateSummary), make(map[string]*schedulertypes.ImageStateSummary),
), ),
}} }}
@ -336,22 +336,22 @@ func TestAddPodWillConfirm(t *testing.T) {
podsToAssume []*v1.Pod podsToAssume []*v1.Pod
podsToAdd []*v1.Pod podsToAdd []*v1.Pod
wNodeInfo *schedulernodeinfo.NodeInfo wNodeInfo *schedulertypes.NodeInfo
}{{ // two pod were assumed at same time. But first one is called Add() and gets confirmed. }{{ // two pod were assumed at same time. But first one is called Add() and gets confirmed.
podsToAssume: []*v1.Pod{testPods[0], testPods[1]}, podsToAssume: []*v1.Pod{testPods[0], testPods[1]},
podsToAdd: []*v1.Pod{testPods[0]}, podsToAdd: []*v1.Pod{testPods[0]},
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
[]*v1.Pod{testPods[0]}, []*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulernodeinfo.ImageStateSummary), make(map[string]*schedulertypes.ImageStateSummary),
), ),
}} }}
@ -438,25 +438,25 @@ func TestAddPodWillReplaceAssumed(t *testing.T) {
podsToAdd []*v1.Pod podsToAdd []*v1.Pod
podsToUpdate [][]*v1.Pod podsToUpdate [][]*v1.Pod
wNodeInfo map[string]*schedulernodeinfo.NodeInfo wNodeInfo map[string]*schedulertypes.NodeInfo
}{{ }{{
podsToAssume: []*v1.Pod{assumedPod.DeepCopy()}, podsToAssume: []*v1.Pod{assumedPod.DeepCopy()},
podsToAdd: []*v1.Pod{addedPod.DeepCopy()}, podsToAdd: []*v1.Pod{addedPod.DeepCopy()},
podsToUpdate: [][]*v1.Pod{{addedPod.DeepCopy(), updatedPod.DeepCopy()}}, podsToUpdate: [][]*v1.Pod{{addedPod.DeepCopy(), updatedPod.DeepCopy()}},
wNodeInfo: map[string]*schedulernodeinfo.NodeInfo{ wNodeInfo: map[string]*schedulertypes.NodeInfo{
"assumed-node": nil, "assumed-node": nil,
"actual-node": newNodeInfo( "actual-node": newNodeInfo(
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 200, MilliCPU: 200,
Memory: 500, Memory: 500,
}, },
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 200, MilliCPU: 200,
Memory: 500, Memory: 500,
}, },
[]*v1.Pod{updatedPod.DeepCopy()}, []*v1.Pod{updatedPod.DeepCopy()},
newHostPortInfoBuilder().add("TCP", "0.0.0.0", 90).build(), newHostPortInfoBuilder().add("TCP", "0.0.0.0", 90).build(),
make(map[string]*schedulernodeinfo.ImageStateSummary), make(map[string]*schedulertypes.ImageStateSummary),
), ),
}, },
}} }}
@ -499,21 +499,21 @@ func TestAddPodAfterExpiration(t *testing.T) {
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
wNodeInfo *schedulernodeinfo.NodeInfo wNodeInfo *schedulertypes.NodeInfo
}{{ }{{
pod: basePod, pod: basePod,
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
[]*v1.Pod{basePod}, []*v1.Pod{basePod},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulernodeinfo.ImageStateSummary), make(map[string]*schedulertypes.ImageStateSummary),
), ),
}} }}
@ -555,34 +555,34 @@ func TestUpdatePod(t *testing.T) {
podsToAdd []*v1.Pod podsToAdd []*v1.Pod
podsToUpdate []*v1.Pod podsToUpdate []*v1.Pod
wNodeInfo []*schedulernodeinfo.NodeInfo wNodeInfo []*schedulertypes.NodeInfo
}{{ // add a pod and then update it twice }{{ // add a pod and then update it twice
podsToAdd: []*v1.Pod{testPods[0]}, podsToAdd: []*v1.Pod{testPods[0]},
podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]}, podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]},
wNodeInfo: []*schedulernodeinfo.NodeInfo{newNodeInfo( wNodeInfo: []*schedulertypes.NodeInfo{newNodeInfo(
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 200, MilliCPU: 200,
Memory: 1024, Memory: 1024,
}, },
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 200, MilliCPU: 200,
Memory: 1024, Memory: 1024,
}, },
[]*v1.Pod{testPods[1]}, []*v1.Pod{testPods[1]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulernodeinfo.ImageStateSummary), make(map[string]*schedulertypes.ImageStateSummary),
), newNodeInfo( ), newNodeInfo(
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
[]*v1.Pod{testPods[0]}, []*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulernodeinfo.ImageStateSummary), make(map[string]*schedulertypes.ImageStateSummary),
)}, )},
}} }}
@ -686,35 +686,35 @@ func TestExpireAddUpdatePod(t *testing.T) {
podsToAdd []*v1.Pod podsToAdd []*v1.Pod
podsToUpdate []*v1.Pod podsToUpdate []*v1.Pod
wNodeInfo []*schedulernodeinfo.NodeInfo wNodeInfo []*schedulertypes.NodeInfo
}{{ // Pod is assumed, expired, and added. Then it would be updated twice. }{{ // Pod is assumed, expired, and added. Then it would be updated twice.
podsToAssume: []*v1.Pod{testPods[0]}, podsToAssume: []*v1.Pod{testPods[0]},
podsToAdd: []*v1.Pod{testPods[0]}, podsToAdd: []*v1.Pod{testPods[0]},
podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]}, podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]},
wNodeInfo: []*schedulernodeinfo.NodeInfo{newNodeInfo( wNodeInfo: []*schedulertypes.NodeInfo{newNodeInfo(
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 200, MilliCPU: 200,
Memory: 1024, Memory: 1024,
}, },
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 200, MilliCPU: 200,
Memory: 1024, Memory: 1024,
}, },
[]*v1.Pod{testPods[1]}, []*v1.Pod{testPods[1]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulernodeinfo.ImageStateSummary), make(map[string]*schedulertypes.ImageStateSummary),
), newNodeInfo( ), newNodeInfo(
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
[]*v1.Pod{testPods[0]}, []*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulernodeinfo.ImageStateSummary), make(map[string]*schedulertypes.ImageStateSummary),
)}, )},
}} }}
@ -780,21 +780,21 @@ func TestEphemeralStorageResource(t *testing.T) {
podE := makePodWithEphemeralStorage(nodeName, "500") podE := makePodWithEphemeralStorage(nodeName, "500")
tests := []struct { tests := []struct {
pod *v1.Pod pod *v1.Pod
wNodeInfo *schedulernodeinfo.NodeInfo wNodeInfo *schedulertypes.NodeInfo
}{ }{
{ {
pod: podE, pod: podE,
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
EphemeralStorage: 500, EphemeralStorage: 500,
}, },
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: schedutil.DefaultMilliCPURequest, MilliCPU: schedutil.DefaultMilliCPURequest,
Memory: schedutil.DefaultMemoryRequest, Memory: schedutil.DefaultMemoryRequest,
}, },
[]*v1.Pod{podE}, []*v1.Pod{podE},
schedulernodeinfo.HostPortInfo{}, schedulertypes.HostPortInfo{},
make(map[string]*schedulernodeinfo.ImageStateSummary), make(map[string]*schedulertypes.ImageStateSummary),
), ),
}, },
} }
@ -827,7 +827,7 @@ func TestRemovePod(t *testing.T) {
tests := []struct { tests := []struct {
nodes []*v1.Node nodes []*v1.Node
pod *v1.Pod pod *v1.Pod
wNodeInfo *schedulernodeinfo.NodeInfo wNodeInfo *schedulertypes.NodeInfo
}{{ }{{
nodes: []*v1.Node{ nodes: []*v1.Node{
{ {
@ -839,17 +839,17 @@ func TestRemovePod(t *testing.T) {
}, },
pod: basePod, pod: basePod,
wNodeInfo: newNodeInfo( wNodeInfo: newNodeInfo(
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
&schedulernodeinfo.Resource{ &schedulertypes.Resource{
MilliCPU: 100, MilliCPU: 100,
Memory: 500, Memory: 500,
}, },
[]*v1.Pod{basePod}, []*v1.Pod{basePod},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(), newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulernodeinfo.ImageStateSummary), make(map[string]*schedulertypes.ImageStateSummary),
), ),
}} }}
@ -930,7 +930,7 @@ func TestForgetPod(t *testing.T) {
// getResourceRequest returns the resource request of all containers in Pods; // getResourceRequest returns the resource request of all containers in Pods;
// excluding initContainers. // excluding initContainers.
func getResourceRequest(pod *v1.Pod) v1.ResourceList { func getResourceRequest(pod *v1.Pod) v1.ResourceList {
result := &schedulernodeinfo.Resource{} result := &schedulertypes.Resource{}
for _, container := range pod.Spec.Containers { for _, container := range pod.Spec.Containers {
result.Add(container.Resources.Requests) result.Add(container.Resources.Requests)
} }
@ -939,13 +939,13 @@ func getResourceRequest(pod *v1.Pod) v1.ResourceList {
} }
// buildNodeInfo creates a NodeInfo by simulating node operations in cache. // buildNodeInfo creates a NodeInfo by simulating node operations in cache.
func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *schedulernodeinfo.NodeInfo { func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *schedulertypes.NodeInfo {
expected := schedulernodeinfo.NewNodeInfo() expected := schedulertypes.NewNodeInfo()
// Simulate SetNode. // Simulate SetNode.
expected.SetNode(node) expected.SetNode(node)
expected.SetAllocatableResource(schedulernodeinfo.NewResource(node.Status.Allocatable)) expected.SetAllocatableResource(schedulertypes.NewResource(node.Status.Allocatable))
expected.SetTaints(node.Spec.Taints) expected.SetTaints(node.Spec.Taints)
expected.SetGeneration(expected.GetGeneration() + 1) expected.SetGeneration(expected.GetGeneration() + 1)
@ -1156,7 +1156,7 @@ func TestNodeOperators(t *testing.T) {
} }
got, found = cache.nodes[node.Name] got, found = cache.nodes[node.Name]
if !found { if !found {
t.Errorf("Failed to find node %v in schedulernodeinfo after UpdateNode.", node.Name) t.Errorf("Failed to find node %v in schedulertypes after UpdateNode.", node.Name)
} }
if got.info.GetGeneration() <= expected.GetGeneration() { if got.info.GetGeneration() <= expected.GetGeneration() {
t.Errorf("Generation is not incremented. got: %v, expected: %v", got.info.GetGeneration(), expected.GetGeneration()) t.Errorf("Generation is not incremented. got: %v, expected: %v", got.info.GetGeneration(), expected.GetGeneration())
@ -1164,7 +1164,7 @@ func TestNodeOperators(t *testing.T) {
expected.SetGeneration(got.info.GetGeneration()) expected.SetGeneration(got.info.GetGeneration())
if !reflect.DeepEqual(got.info, expected) { if !reflect.DeepEqual(got.info, expected) {
t.Errorf("Failed to update node in schedulernodeinfo:\n got: %+v \nexpected: %+v", got, expected) t.Errorf("Failed to update node in schedulertypes:\n got: %+v \nexpected: %+v", got, expected)
} }
// Check nodeTree after update // Check nodeTree after update
if cache.nodeTree.numNodes != 1 || cache.nodeTree.next() != node.Name { if cache.nodeTree.numNodes != 1 || cache.nodeTree.next() != node.Name {
@ -1533,8 +1533,8 @@ func compareCacheWithNodeInfoSnapshot(cache *schedulerCache, snapshot *Snapshot)
return fmt.Errorf("unexpected number of nodes in NodeInfoList. Expected: %v, got: %v", len(cache.nodes), len(snapshot.nodeInfoList)) return fmt.Errorf("unexpected number of nodes in NodeInfoList. Expected: %v, got: %v", len(cache.nodes), len(snapshot.nodeInfoList))
} }
expectedNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes) expectedNodeInfoList := make([]*schedulertypes.NodeInfo, 0, cache.nodeTree.numNodes)
expectedHavePodsWithAffinityNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes) expectedHavePodsWithAffinityNodeInfoList := make([]*schedulertypes.NodeInfo, 0, cache.nodeTree.numNodes)
for i := 0; i < cache.nodeTree.numNodes; i++ { for i := 0; i < cache.nodeTree.numNodes; i++ {
nodeName := cache.nodeTree.next() nodeName := cache.nodeTree.next()
if n := snapshot.nodeInfoMap[nodeName]; n != nil { if n := snapshot.nodeInfoMap[nodeName]; n != nil {

View File

@ -14,7 +14,7 @@ go_library(
deps = [ deps = [
"//pkg/scheduler/internal/cache:go_default_library", "//pkg/scheduler/internal/cache:go_default_library",
"//pkg/scheduler/internal/queue:go_default_library", "//pkg/scheduler/internal/queue:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
@ -27,7 +27,7 @@ go_test(
srcs = ["comparer_test.go"], srcs = ["comparer_test.go"],
embed = [":go_default_library"], embed = [":go_default_library"],
deps = [ deps = [
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
], ],

View File

@ -26,7 +26,7 @@ import (
"k8s.io/klog" "k8s.io/klog"
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
// CacheComparer is an implementation of the Scheduler's cache comparer. // CacheComparer is an implementation of the Scheduler's cache comparer.
@ -68,7 +68,7 @@ func (c *CacheComparer) Compare() error {
} }
// CompareNodes compares actual nodes with cached nodes. // CompareNodes compares actual nodes with cached nodes.
func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*schedulernodeinfo.NodeInfo) (missed, redundant []string) { func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*schedulertypes.NodeInfo) (missed, redundant []string) {
actual := []string{} actual := []string{}
for _, node := range nodes { for _, node := range nodes {
actual = append(actual, node.Name) actual = append(actual, node.Name)
@ -83,7 +83,7 @@ func (c *CacheComparer) CompareNodes(nodes []*v1.Node, nodeinfos map[string]*sch
} }
// ComparePods compares actual pods with cached pods. // ComparePods compares actual pods with cached pods.
func (c *CacheComparer) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*schedulernodeinfo.NodeInfo) (missed, redundant []string) { func (c *CacheComparer) ComparePods(pods, waitingPods []*v1.Pod, nodeinfos map[string]*schedulertypes.NodeInfo) (missed, redundant []string) {
actual := []string{} actual := []string{}
for _, pod := range pods { for _, pod := range pods {
actual = append(actual, string(pod.UID)) actual = append(actual, string(pod.UID))

View File

@ -22,7 +22,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
func TestCompareNodes(t *testing.T) { func TestCompareNodes(t *testing.T) {
@ -72,9 +72,9 @@ func testCompareNodes(actual, cached, missing, redundant []string, t *testing.T)
nodes = append(nodes, node) nodes = append(nodes, node)
} }
nodeInfo := make(map[string]*schedulernodeinfo.NodeInfo) nodeInfo := make(map[string]*schedulertypes.NodeInfo)
for _, nodeName := range cached { for _, nodeName := range cached {
nodeInfo[nodeName] = &schedulernodeinfo.NodeInfo{} nodeInfo[nodeName] = &schedulertypes.NodeInfo{}
} }
m, r := compare.CompareNodes(nodes, nodeInfo) m, r := compare.CompareNodes(nodes, nodeInfo)
@ -170,14 +170,14 @@ func testComparePods(actual, cached, queued, missing, redundant []string, t *tes
queuedPods = append(queuedPods, pod) queuedPods = append(queuedPods, pod)
} }
nodeInfo := make(map[string]*schedulernodeinfo.NodeInfo) nodeInfo := make(map[string]*schedulertypes.NodeInfo)
for _, uid := range cached { for _, uid := range cached {
pod := &v1.Pod{} pod := &v1.Pod{}
pod.UID = types.UID(uid) pod.UID = types.UID(uid)
pod.Namespace = "ns" pod.Namespace = "ns"
pod.Name = uid pod.Name = uid
nodeInfo[uid] = schedulernodeinfo.NewNodeInfo(pod) nodeInfo[uid] = schedulertypes.NewNodeInfo(pod)
} }
m, r := compare.ComparePods(pods, queuedPods, nodeInfo) m, r := compare.ComparePods(pods, queuedPods, nodeInfo)

View File

@ -25,7 +25,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
"k8s.io/kubernetes/pkg/scheduler/internal/queue" "k8s.io/kubernetes/pkg/scheduler/internal/queue"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
// CacheDumper writes some information from the scheduler cache and the scheduling queue to the // CacheDumper writes some information from the scheduler cache and the scheduling queue to the
@ -61,7 +61,7 @@ func (d *CacheDumper) dumpSchedulingQueue() {
} }
// printNodeInfo writes parts of NodeInfo to a string. // printNodeInfo writes parts of NodeInfo to a string.
func (d *CacheDumper) printNodeInfo(n *schedulernodeinfo.NodeInfo) string { func (d *CacheDumper) printNodeInfo(n *schedulertypes.NodeInfo) string {
var nodeData strings.Builder var nodeData strings.Builder
nodeData.WriteString(fmt.Sprintf("\nNode name: %+v\nRequested Resources: %+v\nAllocatable Resources:%+v\nScheduled Pods(number: %v):\n", nodeData.WriteString(fmt.Sprintf("\nNode name: %+v\nRequested Resources: %+v\nAllocatable Resources:%+v\nScheduled Pods(number: %v):\n",
n.Node().Name, n.RequestedResource(), n.AllocatableResource(), len(n.Pods()))) n.Node().Name, n.RequestedResource(), n.AllocatableResource(), len(n.Pods())))

View File

@ -19,7 +19,7 @@ package cache
import ( import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
// Cache collects pods' information and provides node-level aggregated information. // Cache collects pods' information and provides node-level aggregated information.
@ -108,5 +108,5 @@ type Cache interface {
// Dump is a dump of the cache state. // Dump is a dump of the cache state.
type Dump struct { type Dump struct {
AssumedPods map[string]bool AssumedPods map[string]bool
Nodes map[string]*schedulernodeinfo.NodeInfo Nodes map[string]*schedulertypes.NodeInfo
} }

View File

@ -23,18 +23,18 @@ import (
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
// Snapshot is a snapshot of cache NodeInfo and NodeTree order. The scheduler takes a // Snapshot is a snapshot of cache NodeInfo and NodeTree order. The scheduler takes a
// snapshot at the beginning of each scheduling cycle and uses it for its operations in that cycle. // snapshot at the beginning of each scheduling cycle and uses it for its operations in that cycle.
type Snapshot struct { type Snapshot struct {
// nodeInfoMap a map of node name to a snapshot of its NodeInfo. // nodeInfoMap a map of node name to a snapshot of its NodeInfo.
nodeInfoMap map[string]*schedulernodeinfo.NodeInfo nodeInfoMap map[string]*schedulertypes.NodeInfo
// nodeInfoList is the list of nodes as ordered in the cache's nodeTree. // nodeInfoList is the list of nodes as ordered in the cache's nodeTree.
nodeInfoList []*schedulernodeinfo.NodeInfo nodeInfoList []*schedulertypes.NodeInfo
// havePodsWithAffinityNodeInfoList is the list of nodes with at least one pod declaring affinity terms. // havePodsWithAffinityNodeInfoList is the list of nodes with at least one pod declaring affinity terms.
havePodsWithAffinityNodeInfoList []*schedulernodeinfo.NodeInfo havePodsWithAffinityNodeInfoList []*schedulertypes.NodeInfo
generation int64 generation int64
} }
@ -43,15 +43,15 @@ var _ schedulerlisters.SharedLister = &Snapshot{}
// NewEmptySnapshot initializes a Snapshot struct and returns it. // NewEmptySnapshot initializes a Snapshot struct and returns it.
func NewEmptySnapshot() *Snapshot { func NewEmptySnapshot() *Snapshot {
return &Snapshot{ return &Snapshot{
nodeInfoMap: make(map[string]*schedulernodeinfo.NodeInfo), nodeInfoMap: make(map[string]*schedulertypes.NodeInfo),
} }
} }
// NewSnapshot initializes a Snapshot struct and returns it. // NewSnapshot initializes a Snapshot struct and returns it.
func NewSnapshot(pods []*v1.Pod, nodes []*v1.Node) *Snapshot { func NewSnapshot(pods []*v1.Pod, nodes []*v1.Node) *Snapshot {
nodeInfoMap := createNodeInfoMap(pods, nodes) nodeInfoMap := createNodeInfoMap(pods, nodes)
nodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoMap)) nodeInfoList := make([]*schedulertypes.NodeInfo, 0, len(nodeInfoMap))
havePodsWithAffinityNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, len(nodeInfoMap)) havePodsWithAffinityNodeInfoList := make([]*schedulertypes.NodeInfo, 0, len(nodeInfoMap))
for _, v := range nodeInfoMap { for _, v := range nodeInfoMap {
nodeInfoList = append(nodeInfoList, v) nodeInfoList = append(nodeInfoList, v)
if len(v.PodsWithAffinity()) > 0 { if len(v.PodsWithAffinity()) > 0 {
@ -70,12 +70,12 @@ func NewSnapshot(pods []*v1.Pod, nodes []*v1.Node) *Snapshot {
// createNodeInfoMap obtains a list of pods and pivots that list into a map // createNodeInfoMap obtains a list of pods and pivots that list into a map
// where the keys are node names and the values are the aggregated information // where the keys are node names and the values are the aggregated information
// for that node. // for that node.
func createNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*schedulernodeinfo.NodeInfo { func createNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*schedulertypes.NodeInfo {
nodeNameToInfo := make(map[string]*schedulernodeinfo.NodeInfo) nodeNameToInfo := make(map[string]*schedulertypes.NodeInfo)
for _, pod := range pods { for _, pod := range pods {
nodeName := pod.Spec.NodeName nodeName := pod.Spec.NodeName
if _, ok := nodeNameToInfo[nodeName]; !ok { if _, ok := nodeNameToInfo[nodeName]; !ok {
nodeNameToInfo[nodeName] = schedulernodeinfo.NewNodeInfo() nodeNameToInfo[nodeName] = schedulertypes.NewNodeInfo()
} }
nodeNameToInfo[nodeName].AddPod(pod) nodeNameToInfo[nodeName].AddPod(pod)
} }
@ -83,7 +83,7 @@ func createNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*schedulerno
for _, node := range nodes { for _, node := range nodes {
if _, ok := nodeNameToInfo[node.Name]; !ok { if _, ok := nodeNameToInfo[node.Name]; !ok {
nodeNameToInfo[node.Name] = schedulernodeinfo.NewNodeInfo() nodeNameToInfo[node.Name] = schedulertypes.NewNodeInfo()
} }
nodeInfo := nodeNameToInfo[node.Name] nodeInfo := nodeNameToInfo[node.Name]
nodeInfo.SetNode(node) nodeInfo.SetNode(node)
@ -93,12 +93,12 @@ func createNodeInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*schedulerno
} }
// getNodeImageStates returns the given node's image states based on the given imageExistence map. // getNodeImageStates returns the given node's image states based on the given imageExistence map.
func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.String) map[string]*schedulernodeinfo.ImageStateSummary { func getNodeImageStates(node *v1.Node, imageExistenceMap map[string]sets.String) map[string]*schedulertypes.ImageStateSummary {
imageStates := make(map[string]*schedulernodeinfo.ImageStateSummary) imageStates := make(map[string]*schedulertypes.ImageStateSummary)
for _, image := range node.Status.Images { for _, image := range node.Status.Images {
for _, name := range image.Names { for _, name := range image.Names {
imageStates[name] = &schedulernodeinfo.ImageStateSummary{ imageStates[name] = &schedulertypes.ImageStateSummary{
Size: image.SizeBytes, Size: image.SizeBytes,
NumNodes: len(imageExistenceMap[name]), NumNodes: len(imageExistenceMap[name]),
} }
@ -139,7 +139,7 @@ func (s *Snapshot) NumNodes() int {
return len(s.nodeInfoList) return len(s.nodeInfoList)
} }
type podLister []*schedulernodeinfo.NodeInfo type podLister []*schedulertypes.NodeInfo
// List returns the list of pods in the snapshot. // List returns the list of pods in the snapshot.
func (p podLister) List(selector labels.Selector) ([]*v1.Pod, error) { func (p podLister) List(selector labels.Selector) ([]*v1.Pod, error) {
@ -168,17 +168,17 @@ func (p podLister) FilteredList(filter schedulerlisters.PodFilter, selector labe
} }
// List returns the list of nodes in the snapshot. // List returns the list of nodes in the snapshot.
func (s *Snapshot) List() ([]*schedulernodeinfo.NodeInfo, error) { func (s *Snapshot) List() ([]*schedulertypes.NodeInfo, error) {
return s.nodeInfoList, nil return s.nodeInfoList, nil
} }
// HavePodsWithAffinityList returns the list of nodes with at least one pods with inter-pod affinity // HavePodsWithAffinityList returns the list of nodes with at least one pods with inter-pod affinity
func (s *Snapshot) HavePodsWithAffinityList() ([]*schedulernodeinfo.NodeInfo, error) { func (s *Snapshot) HavePodsWithAffinityList() ([]*schedulertypes.NodeInfo, error) {
return s.havePodsWithAffinityNodeInfoList, nil return s.havePodsWithAffinityNodeInfoList, nil
} }
// Get returns the NodeInfo of the given node name. // Get returns the NodeInfo of the given node name.
func (s *Snapshot) Get(nodeName string) (*schedulernodeinfo.NodeInfo, error) { func (s *Snapshot) Get(nodeName string) (*schedulertypes.NodeInfo, error) {
if v, ok := s.nodeInfoMap[nodeName]; ok && v.Node() != nil { if v, ok := s.nodeInfoMap[nodeName]; ok && v.Node() != nil {
return v, nil return v, nil
} }

View File

@ -23,7 +23,7 @@ import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
const mb int64 = 1024 * 1024 const mb int64 = 1024 * 1024
@ -32,7 +32,7 @@ func TestGetNodeImageStates(t *testing.T) {
tests := []struct { tests := []struct {
node *v1.Node node *v1.Node
imageExistenceMap map[string]sets.String imageExistenceMap map[string]sets.String
expected map[string]*schedulernodeinfo.ImageStateSummary expected map[string]*schedulertypes.ImageStateSummary
}{ }{
{ {
node: &v1.Node{ node: &v1.Node{
@ -58,7 +58,7 @@ func TestGetNodeImageStates(t *testing.T) {
"gcr.io/10:v1": sets.NewString("node-0", "node-1"), "gcr.io/10:v1": sets.NewString("node-0", "node-1"),
"gcr.io/200:v1": sets.NewString("node-0"), "gcr.io/200:v1": sets.NewString("node-0"),
}, },
expected: map[string]*schedulernodeinfo.ImageStateSummary{ expected: map[string]*schedulertypes.ImageStateSummary{
"gcr.io/10:v1": { "gcr.io/10:v1": {
Size: int64(10 * mb), Size: int64(10 * mb),
NumNodes: 2, NumNodes: 2,
@ -78,7 +78,7 @@ func TestGetNodeImageStates(t *testing.T) {
"gcr.io/10:v1": sets.NewString("node-1"), "gcr.io/10:v1": sets.NewString("node-1"),
"gcr.io/200:v1": sets.NewString(), "gcr.io/200:v1": sets.NewString(),
}, },
expected: map[string]*schedulernodeinfo.ImageStateSummary{}, expected: map[string]*schedulertypes.ImageStateSummary{},
}, },
} }

View File

@ -6,7 +6,7 @@ go_library(
importpath = "k8s.io/kubernetes/pkg/scheduler/listers", importpath = "k8s.io/kubernetes/pkg/scheduler/listers",
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library", "//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",

View File

@ -7,7 +7,7 @@ go_library(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/scheduler/listers:go_default_library", "//pkg/scheduler/listers:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/api/storage/v1:go_default_library", "//staging/src/k8s.io/api/storage/v1:go_default_library",

View File

@ -28,7 +28,7 @@ import (
corelisters "k8s.io/client-go/listers/core/v1" corelisters "k8s.io/client-go/listers/core/v1"
storagelisters "k8s.io/client-go/listers/storage/v1" storagelisters "k8s.io/client-go/listers/storage/v1"
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers" schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
var _ schedulerlisters.PodLister = &PodLister{} var _ schedulerlisters.PodLister = &PodLister{}
@ -247,11 +247,11 @@ func (pvcs PersistentVolumeClaimLister) PersistentVolumeClaims(namespace string)
} }
} }
// NodeInfoLister declares a schedulernodeinfo.NodeInfo type for testing. // NodeInfoLister declares a schedulertypes.NodeInfo type for testing.
type NodeInfoLister []*schedulernodeinfo.NodeInfo type NodeInfoLister []*schedulertypes.NodeInfo
// Get returns a fake node object in the fake nodes. // Get returns a fake node object in the fake nodes.
func (nodes NodeInfoLister) Get(nodeName string) (*schedulernodeinfo.NodeInfo, error) { func (nodes NodeInfoLister) Get(nodeName string) (*schedulertypes.NodeInfo, error) {
for _, node := range nodes { for _, node := range nodes {
if node != nil && node.Node().Name == nodeName { if node != nil && node.Node().Name == nodeName {
return node, nil return node, nil
@ -261,21 +261,21 @@ func (nodes NodeInfoLister) Get(nodeName string) (*schedulernodeinfo.NodeInfo, e
} }
// List lists all nodes. // List lists all nodes.
func (nodes NodeInfoLister) List() ([]*schedulernodeinfo.NodeInfo, error) { func (nodes NodeInfoLister) List() ([]*schedulertypes.NodeInfo, error) {
return nodes, nil return nodes, nil
} }
// HavePodsWithAffinityList is supposed to list nodes with at least one pod with affinity. For the fake lister // HavePodsWithAffinityList is supposed to list nodes with at least one pod with affinity. For the fake lister
// we just return everything. // we just return everything.
func (nodes NodeInfoLister) HavePodsWithAffinityList() ([]*schedulernodeinfo.NodeInfo, error) { func (nodes NodeInfoLister) HavePodsWithAffinityList() ([]*schedulertypes.NodeInfo, error) {
return nodes, nil return nodes, nil
} }
// NewNodeInfoLister create a new fake NodeInfoLister from a slice of v1.Nodes. // NewNodeInfoLister create a new fake NodeInfoLister from a slice of v1.Nodes.
func NewNodeInfoLister(nodes []*v1.Node) schedulerlisters.NodeInfoLister { func NewNodeInfoLister(nodes []*v1.Node) schedulerlisters.NodeInfoLister {
nodeInfoList := make([]*schedulernodeinfo.NodeInfo, len(nodes)) nodeInfoList := make([]*schedulertypes.NodeInfo, len(nodes))
for _, node := range nodes { for _, node := range nodes {
nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo.SetNode(node) nodeInfo.SetNode(node)
nodeInfoList = append(nodeInfoList, nodeInfo) nodeInfoList = append(nodeInfoList, nodeInfo)
} }

View File

@ -20,7 +20,7 @@ import (
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/labels"
v1listers "k8s.io/client-go/listers/core/v1" v1listers "k8s.io/client-go/listers/core/v1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
// PodFilter is a function to filter a pod. If pod passed return true else return false. // PodFilter is a function to filter a pod. If pod passed return true else return false.
@ -38,11 +38,11 @@ type PodLister interface {
// NodeInfoLister interface represents anything that can list/get NodeInfo objects from node name. // NodeInfoLister interface represents anything that can list/get NodeInfo objects from node name.
type NodeInfoLister interface { type NodeInfoLister interface {
// Returns the list of NodeInfos. // Returns the list of NodeInfos.
List() ([]*schedulernodeinfo.NodeInfo, error) List() ([]*schedulertypes.NodeInfo, error)
// Returns the list of NodeInfos of nodes with pods with affinity terms. // Returns the list of NodeInfos of nodes with pods with affinity terms.
HavePodsWithAffinityList() ([]*schedulernodeinfo.NodeInfo, error) HavePodsWithAffinityList() ([]*schedulertypes.NodeInfo, error)
// Returns the NodeInfo of the given node name. // Returns the NodeInfo of the given node name.
Get(nodeName string) (*schedulernodeinfo.NodeInfo, error) Get(nodeName string) (*schedulertypes.NodeInfo, error)
} }
// SharedLister groups scheduler-specific listers. // SharedLister groups scheduler-specific listers.

View File

@ -1,36 +1,13 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library( go_library(
name = "go_default_library", name = "go_default_library",
srcs = [ srcs = ["node_info.go"],
"host_ports.go",
"node_info.go",
],
importpath = "k8s.io/kubernetes/pkg/scheduler/nodeinfo", importpath = "k8s.io/kubernetes/pkg/scheduler/nodeinfo",
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/apis/core/v1/helper:go_default_library", "//pkg/scheduler/types:go_default_library",
"//pkg/features:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"host_ports_test.go",
"node_info_test.go",
],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
], ],
) )

View File

@ -17,675 +17,27 @@ limitations under the License.
package nodeinfo package nodeinfo
import ( import (
"errors"
"fmt"
"sync"
"sync/atomic"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
) )
var ( // TODO(#89528): This file defines temporary aliases of types used by kubelet.
emptyResource = Resource{} // Those will be removed and the underlying types defined in scheduler/types will be used directly.
generation int64
)
// ImageStateSummary provides summarized information about the state of an image.
type ImageStateSummary struct {
// Size of the image
Size int64
// Used to track how many nodes have this image
NumNodes int
}
// NodeInfo is node level aggregated information. // NodeInfo is node level aggregated information.
type NodeInfo struct { type NodeInfo = schedulertypes.NodeInfo
// Overall node information.
node *v1.Node
pods []*v1.Pod
podsWithAffinity []*v1.Pod
usedPorts HostPortInfo
// Total requested resources of all pods on this node. This includes assumed
// pods, which scheduler has sent for binding, but may not be scheduled yet.
requestedResource *Resource
// Total requested resources of all pods on this node with a minimum value
// applied to each container's CPU and memory requests. This does not reflect
// the actual resource requests for this node, but is used to avoid scheduling
// many zero-request pods onto one node.
nonzeroRequest *Resource
// We store allocatedResources (which is Node.Status.Allocatable.*) explicitly
// as int64, to avoid conversions and accessing map.
allocatableResource *Resource
// Cached taints of the node for faster lookup.
taints []v1.Taint
taintsErr error
// imageStates holds the entry of an image if and only if this image is on the node. The entry can be used for
// checking an image's existence and advanced usage (e.g., image locality scheduling policy) based on the image
// state information.
imageStates map[string]*ImageStateSummary
// TransientInfo holds the information pertaining to a scheduling cycle. This will be destructed at the end of
// scheduling cycle.
// TODO: @ravig. Remove this once we have a clear approach for message passing across predicates and priorities.
TransientInfo *TransientSchedulerInfo
// Cached conditions of node for faster lookup.
memoryPressureCondition v1.ConditionStatus
diskPressureCondition v1.ConditionStatus
pidPressureCondition v1.ConditionStatus
// Whenever NodeInfo changes, generation is bumped.
// This is used to avoid cloning it if the object didn't change.
generation int64
}
//initializeNodeTransientInfo initializes transient information pertaining to node.
func initializeNodeTransientInfo() nodeTransientInfo {
return nodeTransientInfo{AllocatableVolumesCount: 0, RequestedVolumes: 0}
}
// nextGeneration: Let's make sure history never forgets the name...
// Increments the generation number monotonically ensuring that generation numbers never collide.
// Collision of the generation numbers would be particularly problematic if a node was deleted and
// added back with the same name. See issue#63262.
func nextGeneration() int64 {
return atomic.AddInt64(&generation, 1)
}
// nodeTransientInfo contains transient node information while scheduling.
type nodeTransientInfo struct {
// AllocatableVolumesCount contains number of volumes that could be attached to node.
AllocatableVolumesCount int
// Requested number of volumes on a particular node.
RequestedVolumes int
}
// TransientSchedulerInfo is a transient structure which is destructed at the end of each scheduling cycle.
// It consists of items that are valid for a scheduling cycle and is used for message passing across predicates and
// priorities. Some examples which could be used as fields are number of volumes being used on node, current utilization
// on node etc.
// IMPORTANT NOTE: Make sure that each field in this structure is documented along with usage. Expand this structure
// only when absolutely needed as this data structure will be created and destroyed during every scheduling cycle.
type TransientSchedulerInfo struct {
TransientLock sync.Mutex
// NodeTransInfo holds the information related to nodeTransientInformation. NodeName is the key here.
TransNodeInfo nodeTransientInfo
}
// NewTransientSchedulerInfo returns a new scheduler transient structure with initialized values.
func NewTransientSchedulerInfo() *TransientSchedulerInfo {
tsi := &TransientSchedulerInfo{
TransNodeInfo: initializeNodeTransientInfo(),
}
return tsi
}
// ResetTransientSchedulerInfo resets the TransientSchedulerInfo.
func (transientSchedInfo *TransientSchedulerInfo) ResetTransientSchedulerInfo() {
transientSchedInfo.TransientLock.Lock()
defer transientSchedInfo.TransientLock.Unlock()
// Reset TransientNodeInfo.
transientSchedInfo.TransNodeInfo.AllocatableVolumesCount = 0
transientSchedInfo.TransNodeInfo.RequestedVolumes = 0
}
// Resource is a collection of compute resource. // Resource is a collection of compute resource.
type Resource struct { type Resource = schedulertypes.Resource
MilliCPU int64
Memory int64
EphemeralStorage int64
// We store allowedPodNumber (which is Node.Status.Allocatable.Pods().Value())
// explicitly as int, to avoid conversions and improve performance.
AllowedPodNumber int
// ScalarResources
ScalarResources map[v1.ResourceName]int64
}
// NewResource creates a Resource from ResourceList // NewResource creates a Resource from ResourceList
func NewResource(rl v1.ResourceList) *Resource { func NewResource(rl v1.ResourceList) *Resource {
r := &Resource{} return schedulertypes.NewResource(rl)
r.Add(rl)
return r
}
// Add adds ResourceList into Resource.
func (r *Resource) Add(rl v1.ResourceList) {
if r == nil {
return
}
for rName, rQuant := range rl {
switch rName {
case v1.ResourceCPU:
r.MilliCPU += rQuant.MilliValue()
case v1.ResourceMemory:
r.Memory += rQuant.Value()
case v1.ResourcePods:
r.AllowedPodNumber += int(rQuant.Value())
case v1.ResourceEphemeralStorage:
r.EphemeralStorage += rQuant.Value()
default:
if v1helper.IsScalarResourceName(rName) {
r.AddScalar(rName, rQuant.Value())
}
}
}
}
// ResourceList returns a resource list of this resource.
func (r *Resource) ResourceList() v1.ResourceList {
result := v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(r.Memory, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(int64(r.AllowedPodNumber), resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(r.EphemeralStorage, resource.BinarySI),
}
for rName, rQuant := range r.ScalarResources {
if v1helper.IsHugePageResourceName(rName) {
result[rName] = *resource.NewQuantity(rQuant, resource.BinarySI)
} else {
result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
}
}
return result
}
// Clone returns a copy of this resource.
func (r *Resource) Clone() *Resource {
res := &Resource{
MilliCPU: r.MilliCPU,
Memory: r.Memory,
AllowedPodNumber: r.AllowedPodNumber,
EphemeralStorage: r.EphemeralStorage,
}
if r.ScalarResources != nil {
res.ScalarResources = make(map[v1.ResourceName]int64)
for k, v := range r.ScalarResources {
res.ScalarResources[k] = v
}
}
return res
}
// AddScalar adds a resource by a scalar value of this resource.
func (r *Resource) AddScalar(name v1.ResourceName, quantity int64) {
r.SetScalar(name, r.ScalarResources[name]+quantity)
}
// SetScalar sets a resource by a scalar value of this resource.
func (r *Resource) SetScalar(name v1.ResourceName, quantity int64) {
// Lazily allocate scalar resource map.
if r.ScalarResources == nil {
r.ScalarResources = map[v1.ResourceName]int64{}
}
r.ScalarResources[name] = quantity
}
// SetMaxResource compares with ResourceList and takes max value for each Resource.
func (r *Resource) SetMaxResource(rl v1.ResourceList) {
if r == nil {
return
}
for rName, rQuantity := range rl {
switch rName {
case v1.ResourceMemory:
if mem := rQuantity.Value(); mem > r.Memory {
r.Memory = mem
}
case v1.ResourceCPU:
if cpu := rQuantity.MilliValue(); cpu > r.MilliCPU {
r.MilliCPU = cpu
}
case v1.ResourceEphemeralStorage:
if ephemeralStorage := rQuantity.Value(); ephemeralStorage > r.EphemeralStorage {
r.EphemeralStorage = ephemeralStorage
}
default:
if v1helper.IsScalarResourceName(rName) {
value := rQuantity.Value()
if value > r.ScalarResources[rName] {
r.SetScalar(rName, value)
}
}
}
}
} }
// NewNodeInfo returns a ready to use empty NodeInfo object. // NewNodeInfo returns a ready to use empty NodeInfo object.
// If any pods are given in arguments, their information will be aggregated in // If any pods are given in arguments, their information will be aggregated in
// the returned object. // the returned object.
func NewNodeInfo(pods ...*v1.Pod) *NodeInfo { func NewNodeInfo(pods ...*v1.Pod) *NodeInfo {
ni := &NodeInfo{ return schedulertypes.NewNodeInfo(pods...)
requestedResource: &Resource{},
nonzeroRequest: &Resource{},
allocatableResource: &Resource{},
TransientInfo: NewTransientSchedulerInfo(),
generation: nextGeneration(),
usedPorts: make(HostPortInfo),
imageStates: make(map[string]*ImageStateSummary),
}
for _, pod := range pods {
ni.AddPod(pod)
}
return ni
}
// Node returns overall information about this node.
func (n *NodeInfo) Node() *v1.Node {
if n == nil {
return nil
}
return n.node
}
// Pods return all pods scheduled (including assumed to be) on this node.
func (n *NodeInfo) Pods() []*v1.Pod {
if n == nil {
return nil
}
return n.pods
}
// SetPods sets all pods scheduled (including assumed to be) on this node.
func (n *NodeInfo) SetPods(pods []*v1.Pod) {
n.pods = pods
}
// UsedPorts returns used ports on this node.
func (n *NodeInfo) UsedPorts() HostPortInfo {
if n == nil {
return nil
}
return n.usedPorts
}
// SetUsedPorts sets the used ports on this node.
func (n *NodeInfo) SetUsedPorts(newUsedPorts HostPortInfo) {
n.usedPorts = newUsedPorts
}
// ImageStates returns the state information of all images.
func (n *NodeInfo) ImageStates() map[string]*ImageStateSummary {
if n == nil {
return nil
}
return n.imageStates
}
// SetImageStates sets the state information of all images.
func (n *NodeInfo) SetImageStates(newImageStates map[string]*ImageStateSummary) {
n.imageStates = newImageStates
}
// PodsWithAffinity return all pods with (anti)affinity constraints on this node.
func (n *NodeInfo) PodsWithAffinity() []*v1.Pod {
if n == nil {
return nil
}
return n.podsWithAffinity
}
// AllowedPodNumber returns the number of the allowed pods on this node.
func (n *NodeInfo) AllowedPodNumber() int {
if n == nil || n.allocatableResource == nil {
return 0
}
return n.allocatableResource.AllowedPodNumber
}
// Taints returns the taints list on this node.
func (n *NodeInfo) Taints() ([]v1.Taint, error) {
if n == nil {
return nil, nil
}
return n.taints, n.taintsErr
}
// SetTaints sets the taints list on this node.
func (n *NodeInfo) SetTaints(newTaints []v1.Taint) {
n.taints = newTaints
}
// RequestedResource returns aggregated resource request of pods on this node.
func (n *NodeInfo) RequestedResource() Resource {
if n == nil {
return emptyResource
}
return *n.requestedResource
}
// SetRequestedResource sets the aggregated resource request of pods on this node.
func (n *NodeInfo) SetRequestedResource(newResource *Resource) {
n.requestedResource = newResource
}
// NonZeroRequest returns aggregated nonzero resource request of pods on this node.
func (n *NodeInfo) NonZeroRequest() Resource {
if n == nil {
return emptyResource
}
return *n.nonzeroRequest
}
// SetNonZeroRequest sets the aggregated nonzero resource request of pods on this node.
func (n *NodeInfo) SetNonZeroRequest(newResource *Resource) {
n.nonzeroRequest = newResource
}
// AllocatableResource returns allocatable resources on a given node.
func (n *NodeInfo) AllocatableResource() Resource {
if n == nil {
return emptyResource
}
return *n.allocatableResource
}
// SetAllocatableResource sets the allocatableResource information of given node.
func (n *NodeInfo) SetAllocatableResource(allocatableResource *Resource) {
n.allocatableResource = allocatableResource
n.generation = nextGeneration()
}
// GetGeneration returns the generation on this node.
func (n *NodeInfo) GetGeneration() int64 {
if n == nil {
return 0
}
return n.generation
}
// SetGeneration sets the generation on this node. This is for testing only.
func (n *NodeInfo) SetGeneration(newGeneration int64) {
n.generation = newGeneration
}
// Clone returns a copy of this node.
func (n *NodeInfo) Clone() *NodeInfo {
clone := &NodeInfo{
node: n.node,
requestedResource: n.requestedResource.Clone(),
nonzeroRequest: n.nonzeroRequest.Clone(),
allocatableResource: n.allocatableResource.Clone(),
taintsErr: n.taintsErr,
TransientInfo: n.TransientInfo,
memoryPressureCondition: n.memoryPressureCondition,
diskPressureCondition: n.diskPressureCondition,
pidPressureCondition: n.pidPressureCondition,
usedPorts: make(HostPortInfo),
imageStates: n.imageStates,
generation: n.generation,
}
if len(n.pods) > 0 {
clone.pods = append([]*v1.Pod(nil), n.pods...)
}
if len(n.usedPorts) > 0 {
// HostPortInfo is a map-in-map struct
// make sure it's deep copied
for ip, portMap := range n.usedPorts {
clone.usedPorts[ip] = make(map[ProtocolPort]struct{})
for protocolPort, v := range portMap {
clone.usedPorts[ip][protocolPort] = v
}
}
}
if len(n.podsWithAffinity) > 0 {
clone.podsWithAffinity = append([]*v1.Pod(nil), n.podsWithAffinity...)
}
if len(n.taints) > 0 {
clone.taints = append([]v1.Taint(nil), n.taints...)
}
return clone
}
// VolumeLimits returns volume limits associated with the node
func (n *NodeInfo) VolumeLimits() map[v1.ResourceName]int64 {
volumeLimits := map[v1.ResourceName]int64{}
for k, v := range n.AllocatableResource().ScalarResources {
if v1helper.IsAttachableVolumeResourceName(k) {
volumeLimits[k] = v
}
}
return volumeLimits
}
// String returns representation of human readable format of this NodeInfo.
func (n *NodeInfo) String() string {
podKeys := make([]string, len(n.pods))
for i, pod := range n.pods {
podKeys[i] = pod.Name
}
return fmt.Sprintf("&NodeInfo{Pods:%v, RequestedResource:%#v, NonZeroRequest: %#v, UsedPort: %#v, AllocatableResource:%#v}",
podKeys, n.requestedResource, n.nonzeroRequest, n.usedPorts, n.allocatableResource)
}
func hasPodAffinityConstraints(pod *v1.Pod) bool {
affinity := pod.Spec.Affinity
return affinity != nil && (affinity.PodAffinity != nil || affinity.PodAntiAffinity != nil)
}
// AddPod adds pod information to this NodeInfo.
func (n *NodeInfo) AddPod(pod *v1.Pod) {
res, non0CPU, non0Mem := calculateResource(pod)
n.requestedResource.MilliCPU += res.MilliCPU
n.requestedResource.Memory += res.Memory
n.requestedResource.EphemeralStorage += res.EphemeralStorage
if n.requestedResource.ScalarResources == nil && len(res.ScalarResources) > 0 {
n.requestedResource.ScalarResources = map[v1.ResourceName]int64{}
}
for rName, rQuant := range res.ScalarResources {
n.requestedResource.ScalarResources[rName] += rQuant
}
n.nonzeroRequest.MilliCPU += non0CPU
n.nonzeroRequest.Memory += non0Mem
n.pods = append(n.pods, pod)
if hasPodAffinityConstraints(pod) {
n.podsWithAffinity = append(n.podsWithAffinity, pod)
}
// Consume ports when pods added.
n.UpdateUsedPorts(pod, true)
n.generation = nextGeneration()
}
// RemovePod subtracts pod information from this NodeInfo.
func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
k1, err := GetPodKey(pod)
if err != nil {
return err
}
for i := range n.podsWithAffinity {
k2, err := GetPodKey(n.podsWithAffinity[i])
if err != nil {
klog.Errorf("Cannot get pod key, err: %v", err)
continue
}
if k1 == k2 {
// delete the element
n.podsWithAffinity[i] = n.podsWithAffinity[len(n.podsWithAffinity)-1]
n.podsWithAffinity = n.podsWithAffinity[:len(n.podsWithAffinity)-1]
break
}
}
for i := range n.pods {
k2, err := GetPodKey(n.pods[i])
if err != nil {
klog.Errorf("Cannot get pod key, err: %v", err)
continue
}
if k1 == k2 {
// delete the element
n.pods[i] = n.pods[len(n.pods)-1]
n.pods = n.pods[:len(n.pods)-1]
// reduce the resource data
res, non0CPU, non0Mem := calculateResource(pod)
n.requestedResource.MilliCPU -= res.MilliCPU
n.requestedResource.Memory -= res.Memory
n.requestedResource.EphemeralStorage -= res.EphemeralStorage
if len(res.ScalarResources) > 0 && n.requestedResource.ScalarResources == nil {
n.requestedResource.ScalarResources = map[v1.ResourceName]int64{}
}
for rName, rQuant := range res.ScalarResources {
n.requestedResource.ScalarResources[rName] -= rQuant
}
n.nonzeroRequest.MilliCPU -= non0CPU
n.nonzeroRequest.Memory -= non0Mem
// Release ports when remove Pods.
n.UpdateUsedPorts(pod, false)
n.generation = nextGeneration()
n.resetSlicesIfEmpty()
return nil
}
}
return fmt.Errorf("no corresponding pod %s in pods of node %s", pod.Name, n.node.Name)
}
// resets the slices to nil so that we can do DeepEqual in unit tests.
func (n *NodeInfo) resetSlicesIfEmpty() {
if len(n.podsWithAffinity) == 0 {
n.podsWithAffinity = nil
}
if len(n.pods) == 0 {
n.pods = nil
}
}
func calculateResource(pod *v1.Pod) (res Resource, non0CPU int64, non0Mem int64) {
resPtr := &res
for _, c := range pod.Spec.Containers {
resPtr.Add(c.Resources.Requests)
non0CPUReq, non0MemReq := schedutil.GetNonzeroRequests(&c.Resources.Requests)
non0CPU += non0CPUReq
non0Mem += non0MemReq
// No non-zero resources for GPUs or opaque resources.
}
// If Overhead is being utilized, add to the total requests for the pod
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
resPtr.Add(pod.Spec.Overhead)
if _, found := pod.Spec.Overhead[v1.ResourceCPU]; found {
non0CPU += pod.Spec.Overhead.Cpu().MilliValue()
}
if _, found := pod.Spec.Overhead[v1.ResourceMemory]; found {
non0Mem += pod.Spec.Overhead.Memory().Value()
}
}
return
}
// UpdateUsedPorts updates the UsedPorts of NodeInfo.
func (n *NodeInfo) UpdateUsedPorts(pod *v1.Pod, add bool) {
for j := range pod.Spec.Containers {
container := &pod.Spec.Containers[j]
for k := range container.Ports {
podPort := &container.Ports[k]
if add {
n.usedPorts.Add(podPort.HostIP, string(podPort.Protocol), podPort.HostPort)
} else {
n.usedPorts.Remove(podPort.HostIP, string(podPort.Protocol), podPort.HostPort)
}
}
}
}
// SetNode sets the overall node information.
func (n *NodeInfo) SetNode(node *v1.Node) error {
n.node = node
n.allocatableResource = NewResource(node.Status.Allocatable)
n.taints = node.Spec.Taints
for i := range node.Status.Conditions {
cond := &node.Status.Conditions[i]
switch cond.Type {
case v1.NodeMemoryPressure:
n.memoryPressureCondition = cond.Status
case v1.NodeDiskPressure:
n.diskPressureCondition = cond.Status
case v1.NodePIDPressure:
n.pidPressureCondition = cond.Status
default:
// We ignore other conditions.
}
}
n.TransientInfo = NewTransientSchedulerInfo()
n.generation = nextGeneration()
return nil
}
// FilterOutPods receives a list of pods and filters out those whose node names
// are equal to the node of this NodeInfo, but are not found in the pods of this NodeInfo.
//
// Preemption logic simulates removal of pods on a node by removing them from the
// corresponding NodeInfo. In order for the simulation to work, we call this method
// on the pods returned from SchedulerCache, so that predicate functions see
// only the pods that are not removed from the NodeInfo.
func (n *NodeInfo) FilterOutPods(pods []*v1.Pod) []*v1.Pod {
node := n.Node()
if node == nil {
return pods
}
filtered := make([]*v1.Pod, 0, len(pods))
for _, p := range pods {
if p.Spec.NodeName != node.Name {
filtered = append(filtered, p)
continue
}
// If pod is on the given node, add it to 'filtered' only if it is present in nodeInfo.
podKey, err := GetPodKey(p)
if err != nil {
continue
}
for _, np := range n.Pods() {
npodkey, _ := GetPodKey(np)
if npodkey == podKey {
filtered = append(filtered, p)
break
}
}
}
return filtered
}
// GetPodKey returns the string key of a pod.
func GetPodKey(pod *v1.Pod) (string, error) {
uid := string(pod.UID)
if len(uid) == 0 {
return "", errors.New("Cannot get cache key for pod with empty UID")
}
return uid, nil
}
// Filter implements PodFilter interface. It returns false only if the pod node name
// matches NodeInfo.node and the pod is not found in the pods list. Otherwise,
// returns true.
func (n *NodeInfo) Filter(pod *v1.Pod) bool {
if pod.Spec.NodeName != n.node.Name {
return true
}
for _, p := range n.pods {
if p.Name == pod.Name && p.Namespace == pod.Namespace {
return true
}
}
return false
} }

View File

@ -58,9 +58,9 @@ import (
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache" internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake" fakecache "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake"
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue" internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
"k8s.io/kubernetes/pkg/scheduler/profile" "k8s.io/kubernetes/pkg/scheduler/profile"
st "k8s.io/kubernetes/pkg/scheduler/testing" st "k8s.io/kubernetes/pkg/scheduler/testing"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
) )
type fakePodConditionUpdater struct{} type fakePodConditionUpdater struct{}
@ -401,7 +401,7 @@ func (s *fakeNodeSelector) Name() string {
return "FakeNodeSelector" return "FakeNodeSelector"
} }
func (s *fakeNodeSelector) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status { func (s *fakeNodeSelector) Filter(_ context.Context, _ *framework.CycleState, _ *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
if nodeInfo.Node().Name != s.NodeName { if nodeInfo.Node().Name != s.NodeName {
return framework.NewStatus(framework.UnschedulableAndUnresolvable) return framework.NewStatus(framework.UnschedulableAndUnresolvable)
} }

49
pkg/scheduler/types/BUILD Normal file
View File

@ -0,0 +1,49 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"host_ports.go",
"node_info.go",
],
importpath = "k8s.io/kubernetes/pkg/scheduler/types",
visibility = ["//visibility:public"],
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/features:go_default_library",
"//pkg/scheduler/util:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"host_ports_test.go",
"node_info_test.go",
],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package nodeinfo package types
import ( import (
"k8s.io/api/core/v1" "k8s.io/api/core/v1"

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package nodeinfo package types
import ( import (
"testing" "testing"

View File

@ -0,0 +1,691 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package types
import (
"errors"
"fmt"
"sync"
"sync/atomic"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/klog"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
"k8s.io/kubernetes/pkg/features"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)
var (
emptyResource = Resource{}
generation int64
)
// ImageStateSummary provides summarized information about the state of an image.
type ImageStateSummary struct {
// Size of the image
Size int64
// Used to track how many nodes have this image
NumNodes int
}
// NodeInfo is node level aggregated information.
type NodeInfo struct {
// Overall node information.
node *v1.Node
pods []*v1.Pod
podsWithAffinity []*v1.Pod
usedPorts HostPortInfo
// Total requested resources of all pods on this node. This includes assumed
// pods, which scheduler has sent for binding, but may not be scheduled yet.
requestedResource *Resource
// Total requested resources of all pods on this node with a minimum value
// applied to each container's CPU and memory requests. This does not reflect
// the actual resource requests for this node, but is used to avoid scheduling
// many zero-request pods onto one node.
nonzeroRequest *Resource
// We store allocatedResources (which is Node.Status.Allocatable.*) explicitly
// as int64, to avoid conversions and accessing map.
allocatableResource *Resource
// Cached taints of the node for faster lookup.
taints []v1.Taint
taintsErr error
// imageStates holds the entry of an image if and only if this image is on the node. The entry can be used for
// checking an image's existence and advanced usage (e.g., image locality scheduling policy) based on the image
// state information.
imageStates map[string]*ImageStateSummary
// TransientInfo holds the information pertaining to a scheduling cycle. This will be destructed at the end of
// scheduling cycle.
// TODO: @ravig. Remove this once we have a clear approach for message passing across predicates and priorities.
TransientInfo *TransientSchedulerInfo
// Cached conditions of node for faster lookup.
memoryPressureCondition v1.ConditionStatus
diskPressureCondition v1.ConditionStatus
pidPressureCondition v1.ConditionStatus
// Whenever NodeInfo changes, generation is bumped.
// This is used to avoid cloning it if the object didn't change.
generation int64
}
//initializeNodeTransientInfo initializes transient information pertaining to node.
func initializeNodeTransientInfo() nodeTransientInfo {
return nodeTransientInfo{AllocatableVolumesCount: 0, RequestedVolumes: 0}
}
// nextGeneration: Let's make sure history never forgets the name...
// Increments the generation number monotonically ensuring that generation numbers never collide.
// Collision of the generation numbers would be particularly problematic if a node was deleted and
// added back with the same name. See issue#63262.
func nextGeneration() int64 {
return atomic.AddInt64(&generation, 1)
}
// nodeTransientInfo contains transient node information while scheduling.
type nodeTransientInfo struct {
// AllocatableVolumesCount contains number of volumes that could be attached to node.
AllocatableVolumesCount int
// Requested number of volumes on a particular node.
RequestedVolumes int
}
// TransientSchedulerInfo is a transient structure which is destructed at the end of each scheduling cycle.
// It consists of items that are valid for a scheduling cycle and is used for message passing across predicates and
// priorities. Some examples which could be used as fields are number of volumes being used on node, current utilization
// on node etc.
// IMPORTANT NOTE: Make sure that each field in this structure is documented along with usage. Expand this structure
// only when absolutely needed as this data structure will be created and destroyed during every scheduling cycle.
type TransientSchedulerInfo struct {
TransientLock sync.Mutex
// NodeTransInfo holds the information related to nodeTransientInformation. NodeName is the key here.
TransNodeInfo nodeTransientInfo
}
// NewTransientSchedulerInfo returns a new scheduler transient structure with initialized values.
func NewTransientSchedulerInfo() *TransientSchedulerInfo {
tsi := &TransientSchedulerInfo{
TransNodeInfo: initializeNodeTransientInfo(),
}
return tsi
}
// ResetTransientSchedulerInfo resets the TransientSchedulerInfo.
func (transientSchedInfo *TransientSchedulerInfo) ResetTransientSchedulerInfo() {
transientSchedInfo.TransientLock.Lock()
defer transientSchedInfo.TransientLock.Unlock()
// Reset TransientNodeInfo.
transientSchedInfo.TransNodeInfo.AllocatableVolumesCount = 0
transientSchedInfo.TransNodeInfo.RequestedVolumes = 0
}
// Resource is a collection of compute resource.
type Resource struct {
MilliCPU int64
Memory int64
EphemeralStorage int64
// We store allowedPodNumber (which is Node.Status.Allocatable.Pods().Value())
// explicitly as int, to avoid conversions and improve performance.
AllowedPodNumber int
// ScalarResources
ScalarResources map[v1.ResourceName]int64
}
// NewResource creates a Resource from ResourceList
func NewResource(rl v1.ResourceList) *Resource {
r := &Resource{}
r.Add(rl)
return r
}
// Add adds ResourceList into Resource.
func (r *Resource) Add(rl v1.ResourceList) {
if r == nil {
return
}
for rName, rQuant := range rl {
switch rName {
case v1.ResourceCPU:
r.MilliCPU += rQuant.MilliValue()
case v1.ResourceMemory:
r.Memory += rQuant.Value()
case v1.ResourcePods:
r.AllowedPodNumber += int(rQuant.Value())
case v1.ResourceEphemeralStorage:
r.EphemeralStorage += rQuant.Value()
default:
if v1helper.IsScalarResourceName(rName) {
r.AddScalar(rName, rQuant.Value())
}
}
}
}
// ResourceList returns a resource list of this resource.
func (r *Resource) ResourceList() v1.ResourceList {
result := v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(r.Memory, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(int64(r.AllowedPodNumber), resource.BinarySI),
v1.ResourceEphemeralStorage: *resource.NewQuantity(r.EphemeralStorage, resource.BinarySI),
}
for rName, rQuant := range r.ScalarResources {
if v1helper.IsHugePageResourceName(rName) {
result[rName] = *resource.NewQuantity(rQuant, resource.BinarySI)
} else {
result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
}
}
return result
}
// Clone returns a copy of this resource.
func (r *Resource) Clone() *Resource {
res := &Resource{
MilliCPU: r.MilliCPU,
Memory: r.Memory,
AllowedPodNumber: r.AllowedPodNumber,
EphemeralStorage: r.EphemeralStorage,
}
if r.ScalarResources != nil {
res.ScalarResources = make(map[v1.ResourceName]int64)
for k, v := range r.ScalarResources {
res.ScalarResources[k] = v
}
}
return res
}
// AddScalar adds a resource by a scalar value of this resource.
func (r *Resource) AddScalar(name v1.ResourceName, quantity int64) {
r.SetScalar(name, r.ScalarResources[name]+quantity)
}
// SetScalar sets a resource by a scalar value of this resource.
func (r *Resource) SetScalar(name v1.ResourceName, quantity int64) {
// Lazily allocate scalar resource map.
if r.ScalarResources == nil {
r.ScalarResources = map[v1.ResourceName]int64{}
}
r.ScalarResources[name] = quantity
}
// SetMaxResource compares with ResourceList and takes max value for each Resource.
func (r *Resource) SetMaxResource(rl v1.ResourceList) {
if r == nil {
return
}
for rName, rQuantity := range rl {
switch rName {
case v1.ResourceMemory:
if mem := rQuantity.Value(); mem > r.Memory {
r.Memory = mem
}
case v1.ResourceCPU:
if cpu := rQuantity.MilliValue(); cpu > r.MilliCPU {
r.MilliCPU = cpu
}
case v1.ResourceEphemeralStorage:
if ephemeralStorage := rQuantity.Value(); ephemeralStorage > r.EphemeralStorage {
r.EphemeralStorage = ephemeralStorage
}
default:
if v1helper.IsScalarResourceName(rName) {
value := rQuantity.Value()
if value > r.ScalarResources[rName] {
r.SetScalar(rName, value)
}
}
}
}
}
// NewNodeInfo returns a ready to use empty NodeInfo object.
// If any pods are given in arguments, their information will be aggregated in
// the returned object.
func NewNodeInfo(pods ...*v1.Pod) *NodeInfo {
ni := &NodeInfo{
requestedResource: &Resource{},
nonzeroRequest: &Resource{},
allocatableResource: &Resource{},
TransientInfo: NewTransientSchedulerInfo(),
generation: nextGeneration(),
usedPorts: make(HostPortInfo),
imageStates: make(map[string]*ImageStateSummary),
}
for _, pod := range pods {
ni.AddPod(pod)
}
return ni
}
// Node returns overall information about this node.
func (n *NodeInfo) Node() *v1.Node {
if n == nil {
return nil
}
return n.node
}
// Pods return all pods scheduled (including assumed to be) on this node.
func (n *NodeInfo) Pods() []*v1.Pod {
if n == nil {
return nil
}
return n.pods
}
// SetPods sets all pods scheduled (including assumed to be) on this node.
func (n *NodeInfo) SetPods(pods []*v1.Pod) {
n.pods = pods
}
// UsedPorts returns used ports on this node.
func (n *NodeInfo) UsedPorts() HostPortInfo {
if n == nil {
return nil
}
return n.usedPorts
}
// SetUsedPorts sets the used ports on this node.
func (n *NodeInfo) SetUsedPorts(newUsedPorts HostPortInfo) {
n.usedPorts = newUsedPorts
}
// ImageStates returns the state information of all images.
func (n *NodeInfo) ImageStates() map[string]*ImageStateSummary {
if n == nil {
return nil
}
return n.imageStates
}
// SetImageStates sets the state information of all images.
func (n *NodeInfo) SetImageStates(newImageStates map[string]*ImageStateSummary) {
n.imageStates = newImageStates
}
// PodsWithAffinity return all pods with (anti)affinity constraints on this node.
func (n *NodeInfo) PodsWithAffinity() []*v1.Pod {
if n == nil {
return nil
}
return n.podsWithAffinity
}
// AllowedPodNumber returns the number of the allowed pods on this node.
func (n *NodeInfo) AllowedPodNumber() int {
if n == nil || n.allocatableResource == nil {
return 0
}
return n.allocatableResource.AllowedPodNumber
}
// Taints returns the taints list on this node.
func (n *NodeInfo) Taints() ([]v1.Taint, error) {
if n == nil {
return nil, nil
}
return n.taints, n.taintsErr
}
// SetTaints sets the taints list on this node.
func (n *NodeInfo) SetTaints(newTaints []v1.Taint) {
n.taints = newTaints
}
// RequestedResource returns aggregated resource request of pods on this node.
func (n *NodeInfo) RequestedResource() Resource {
if n == nil {
return emptyResource
}
return *n.requestedResource
}
// SetRequestedResource sets the aggregated resource request of pods on this node.
func (n *NodeInfo) SetRequestedResource(newResource *Resource) {
n.requestedResource = newResource
}
// NonZeroRequest returns aggregated nonzero resource request of pods on this node.
func (n *NodeInfo) NonZeroRequest() Resource {
if n == nil {
return emptyResource
}
return *n.nonzeroRequest
}
// SetNonZeroRequest sets the aggregated nonzero resource request of pods on this node.
func (n *NodeInfo) SetNonZeroRequest(newResource *Resource) {
n.nonzeroRequest = newResource
}
// AllocatableResource returns allocatable resources on a given node.
func (n *NodeInfo) AllocatableResource() Resource {
if n == nil {
return emptyResource
}
return *n.allocatableResource
}
// SetAllocatableResource sets the allocatableResource information of given node.
func (n *NodeInfo) SetAllocatableResource(allocatableResource *Resource) {
n.allocatableResource = allocatableResource
n.generation = nextGeneration()
}
// GetGeneration returns the generation on this node.
func (n *NodeInfo) GetGeneration() int64 {
if n == nil {
return 0
}
return n.generation
}
// SetGeneration sets the generation on this node. This is for testing only.
func (n *NodeInfo) SetGeneration(newGeneration int64) {
n.generation = newGeneration
}
// Clone returns a copy of this node.
func (n *NodeInfo) Clone() *NodeInfo {
clone := &NodeInfo{
node: n.node,
requestedResource: n.requestedResource.Clone(),
nonzeroRequest: n.nonzeroRequest.Clone(),
allocatableResource: n.allocatableResource.Clone(),
taintsErr: n.taintsErr,
TransientInfo: n.TransientInfo,
memoryPressureCondition: n.memoryPressureCondition,
diskPressureCondition: n.diskPressureCondition,
pidPressureCondition: n.pidPressureCondition,
usedPorts: make(HostPortInfo),
imageStates: n.imageStates,
generation: n.generation,
}
if len(n.pods) > 0 {
clone.pods = append([]*v1.Pod(nil), n.pods...)
}
if len(n.usedPorts) > 0 {
// HostPortInfo is a map-in-map struct
// make sure it's deep copied
for ip, portMap := range n.usedPorts {
clone.usedPorts[ip] = make(map[ProtocolPort]struct{})
for protocolPort, v := range portMap {
clone.usedPorts[ip][protocolPort] = v
}
}
}
if len(n.podsWithAffinity) > 0 {
clone.podsWithAffinity = append([]*v1.Pod(nil), n.podsWithAffinity...)
}
if len(n.taints) > 0 {
clone.taints = append([]v1.Taint(nil), n.taints...)
}
return clone
}
// VolumeLimits returns volume limits associated with the node
func (n *NodeInfo) VolumeLimits() map[v1.ResourceName]int64 {
volumeLimits := map[v1.ResourceName]int64{}
for k, v := range n.AllocatableResource().ScalarResources {
if v1helper.IsAttachableVolumeResourceName(k) {
volumeLimits[k] = v
}
}
return volumeLimits
}
// String returns representation of human readable format of this NodeInfo.
func (n *NodeInfo) String() string {
podKeys := make([]string, len(n.pods))
for i, pod := range n.pods {
podKeys[i] = pod.Name
}
return fmt.Sprintf("&NodeInfo{Pods:%v, RequestedResource:%#v, NonZeroRequest: %#v, UsedPort: %#v, AllocatableResource:%#v}",
podKeys, n.requestedResource, n.nonzeroRequest, n.usedPorts, n.allocatableResource)
}
func hasPodAffinityConstraints(pod *v1.Pod) bool {
affinity := pod.Spec.Affinity
return affinity != nil && (affinity.PodAffinity != nil || affinity.PodAntiAffinity != nil)
}
// AddPod adds pod information to this NodeInfo.
func (n *NodeInfo) AddPod(pod *v1.Pod) {
res, non0CPU, non0Mem := calculateResource(pod)
n.requestedResource.MilliCPU += res.MilliCPU
n.requestedResource.Memory += res.Memory
n.requestedResource.EphemeralStorage += res.EphemeralStorage
if n.requestedResource.ScalarResources == nil && len(res.ScalarResources) > 0 {
n.requestedResource.ScalarResources = map[v1.ResourceName]int64{}
}
for rName, rQuant := range res.ScalarResources {
n.requestedResource.ScalarResources[rName] += rQuant
}
n.nonzeroRequest.MilliCPU += non0CPU
n.nonzeroRequest.Memory += non0Mem
n.pods = append(n.pods, pod)
if hasPodAffinityConstraints(pod) {
n.podsWithAffinity = append(n.podsWithAffinity, pod)
}
// Consume ports when pods added.
n.UpdateUsedPorts(pod, true)
n.generation = nextGeneration()
}
// RemovePod subtracts pod information from this NodeInfo.
func (n *NodeInfo) RemovePod(pod *v1.Pod) error {
k1, err := GetPodKey(pod)
if err != nil {
return err
}
for i := range n.podsWithAffinity {
k2, err := GetPodKey(n.podsWithAffinity[i])
if err != nil {
klog.Errorf("Cannot get pod key, err: %v", err)
continue
}
if k1 == k2 {
// delete the element
n.podsWithAffinity[i] = n.podsWithAffinity[len(n.podsWithAffinity)-1]
n.podsWithAffinity = n.podsWithAffinity[:len(n.podsWithAffinity)-1]
break
}
}
for i := range n.pods {
k2, err := GetPodKey(n.pods[i])
if err != nil {
klog.Errorf("Cannot get pod key, err: %v", err)
continue
}
if k1 == k2 {
// delete the element
n.pods[i] = n.pods[len(n.pods)-1]
n.pods = n.pods[:len(n.pods)-1]
// reduce the resource data
res, non0CPU, non0Mem := calculateResource(pod)
n.requestedResource.MilliCPU -= res.MilliCPU
n.requestedResource.Memory -= res.Memory
n.requestedResource.EphemeralStorage -= res.EphemeralStorage
if len(res.ScalarResources) > 0 && n.requestedResource.ScalarResources == nil {
n.requestedResource.ScalarResources = map[v1.ResourceName]int64{}
}
for rName, rQuant := range res.ScalarResources {
n.requestedResource.ScalarResources[rName] -= rQuant
}
n.nonzeroRequest.MilliCPU -= non0CPU
n.nonzeroRequest.Memory -= non0Mem
// Release ports when remove Pods.
n.UpdateUsedPorts(pod, false)
n.generation = nextGeneration()
n.resetSlicesIfEmpty()
return nil
}
}
return fmt.Errorf("no corresponding pod %s in pods of node %s", pod.Name, n.node.Name)
}
// resets the slices to nil so that we can do DeepEqual in unit tests.
func (n *NodeInfo) resetSlicesIfEmpty() {
if len(n.podsWithAffinity) == 0 {
n.podsWithAffinity = nil
}
if len(n.pods) == 0 {
n.pods = nil
}
}
func calculateResource(pod *v1.Pod) (res Resource, non0CPU int64, non0Mem int64) {
resPtr := &res
for _, c := range pod.Spec.Containers {
resPtr.Add(c.Resources.Requests)
non0CPUReq, non0MemReq := schedutil.GetNonzeroRequests(&c.Resources.Requests)
non0CPU += non0CPUReq
non0Mem += non0MemReq
// No non-zero resources for GPUs or opaque resources.
}
// If Overhead is being utilized, add to the total requests for the pod
if pod.Spec.Overhead != nil && utilfeature.DefaultFeatureGate.Enabled(features.PodOverhead) {
resPtr.Add(pod.Spec.Overhead)
if _, found := pod.Spec.Overhead[v1.ResourceCPU]; found {
non0CPU += pod.Spec.Overhead.Cpu().MilliValue()
}
if _, found := pod.Spec.Overhead[v1.ResourceMemory]; found {
non0Mem += pod.Spec.Overhead.Memory().Value()
}
}
return
}
// UpdateUsedPorts updates the UsedPorts of NodeInfo.
func (n *NodeInfo) UpdateUsedPorts(pod *v1.Pod, add bool) {
for j := range pod.Spec.Containers {
container := &pod.Spec.Containers[j]
for k := range container.Ports {
podPort := &container.Ports[k]
if add {
n.usedPorts.Add(podPort.HostIP, string(podPort.Protocol), podPort.HostPort)
} else {
n.usedPorts.Remove(podPort.HostIP, string(podPort.Protocol), podPort.HostPort)
}
}
}
}
// SetNode sets the overall node information.
func (n *NodeInfo) SetNode(node *v1.Node) error {
n.node = node
n.allocatableResource = NewResource(node.Status.Allocatable)
n.taints = node.Spec.Taints
for i := range node.Status.Conditions {
cond := &node.Status.Conditions[i]
switch cond.Type {
case v1.NodeMemoryPressure:
n.memoryPressureCondition = cond.Status
case v1.NodeDiskPressure:
n.diskPressureCondition = cond.Status
case v1.NodePIDPressure:
n.pidPressureCondition = cond.Status
default:
// We ignore other conditions.
}
}
n.TransientInfo = NewTransientSchedulerInfo()
n.generation = nextGeneration()
return nil
}
// FilterOutPods receives a list of pods and filters out those whose node names
// are equal to the node of this NodeInfo, but are not found in the pods of this NodeInfo.
//
// Preemption logic simulates removal of pods on a node by removing them from the
// corresponding NodeInfo. In order for the simulation to work, we call this method
// on the pods returned from SchedulerCache, so that predicate functions see
// only the pods that are not removed from the NodeInfo.
func (n *NodeInfo) FilterOutPods(pods []*v1.Pod) []*v1.Pod {
node := n.Node()
if node == nil {
return pods
}
filtered := make([]*v1.Pod, 0, len(pods))
for _, p := range pods {
if p.Spec.NodeName != node.Name {
filtered = append(filtered, p)
continue
}
// If pod is on the given node, add it to 'filtered' only if it is present in nodeInfo.
podKey, err := GetPodKey(p)
if err != nil {
continue
}
for _, np := range n.Pods() {
npodkey, _ := GetPodKey(np)
if npodkey == podKey {
filtered = append(filtered, p)
break
}
}
}
return filtered
}
// GetPodKey returns the string key of a pod.
func GetPodKey(pod *v1.Pod) (string, error) {
uid := string(pod.UID)
if len(uid) == 0 {
return "", errors.New("Cannot get cache key for pod with empty UID")
}
return uid, nil
}
// Filter implements PodFilter interface. It returns false only if the pod node name
// matches NodeInfo.node and the pod is not found in the pods list. Otherwise,
// returns true.
func (n *NodeInfo) Filter(pod *v1.Pod) bool {
if pod.Spec.NodeName != n.node.Name {
return true
}
for _, p := range n.pods {
if p.Name == pod.Name && p.Namespace == pod.Namespace {
return true
}
}
return false
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
*/ */
package nodeinfo package types
import ( import (
"fmt" "fmt"

View File

@ -36,7 +36,7 @@ go_library(
"//pkg/controller/replicaset:go_default_library", "//pkg/controller/replicaset:go_default_library",
"//pkg/controller/replication:go_default_library", "//pkg/controller/replication:go_default_library",
"//pkg/master/ports:go_default_library", "//pkg/master/ports:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/apps/v1:go_default_library", "//staging/src/k8s.io/api/apps/v1:go_default_library",
"//staging/src/k8s.io/api/autoscaling/v1:go_default_library", "//staging/src/k8s.io/api/autoscaling/v1:go_default_library",
"//staging/src/k8s.io/api/batch/v1:go_default_library", "//staging/src/k8s.io/api/batch/v1:go_default_library",

View File

@ -36,7 +36,7 @@ import (
podutil "k8s.io/kubernetes/pkg/api/v1/pod" podutil "k8s.io/kubernetes/pkg/api/v1/pod"
extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions" extensionsinternal "k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/controller/daemon" "k8s.io/kubernetes/pkg/controller/daemon"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
"k8s.io/kubernetes/test/e2e/framework" "k8s.io/kubernetes/test/e2e/framework"
e2enode "k8s.io/kubernetes/test/e2e/framework/node" e2enode "k8s.io/kubernetes/test/e2e/framework/node"
e2eresource "k8s.io/kubernetes/test/e2e/framework/resource" e2eresource "k8s.io/kubernetes/test/e2e/framework/resource"
@ -688,7 +688,7 @@ func checkAtLeastOneNewPod(c clientset.Interface, ns string, label map[string]st
// canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node // canScheduleOnNode checks if a given DaemonSet can schedule pods on the given node
func canScheduleOnNode(node v1.Node, ds *appsv1.DaemonSet) bool { func canScheduleOnNode(node v1.Node, ds *appsv1.DaemonSet) bool {
newPod := daemon.NewPod(ds, node.Name) newPod := daemon.NewPod(ds, node.Name)
nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo := schedulertypes.NewNodeInfo()
nodeInfo.SetNode(&node) nodeInfo.SetNode(&node)
taints, err := nodeInfo.Taints() taints, err := nodeInfo.Taints()
if err != nil { if err != nil {

View File

@ -191,6 +191,7 @@
"k8s.io/kubernetes/pkg/scheduler/listers", "k8s.io/kubernetes/pkg/scheduler/listers",
"k8s.io/kubernetes/pkg/scheduler/metrics", "k8s.io/kubernetes/pkg/scheduler/metrics",
"k8s.io/kubernetes/pkg/scheduler/nodeinfo", "k8s.io/kubernetes/pkg/scheduler/nodeinfo",
"k8s.io/kubernetes/pkg/scheduler/types",
"k8s.io/kubernetes/pkg/scheduler/util", "k8s.io/kubernetes/pkg/scheduler/util",
"k8s.io/kubernetes/pkg/scheduler/volumebinder", "k8s.io/kubernetes/pkg/scheduler/volumebinder",
"k8s.io/kubernetes/pkg/security/apparmor", "k8s.io/kubernetes/pkg/security/apparmor",

View File

@ -10,7 +10,7 @@ go_library(
importpath = "k8s.io/kubernetes/test/e2e/framework/node", importpath = "k8s.io/kubernetes/test/e2e/framework/node",
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/fields:go_default_library",

View File

@ -30,7 +30,7 @@ import (
"k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/sets"
clientset "k8s.io/client-go/kubernetes" clientset "k8s.io/client-go/kubernetes"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
e2elog "k8s.io/kubernetes/test/e2e/framework/log" e2elog "k8s.io/kubernetes/test/e2e/framework/log"
"k8s.io/kubernetes/test/e2e/system" "k8s.io/kubernetes/test/e2e/system"
) )
@ -391,7 +391,7 @@ func isNodeUntaintedWithNonblocking(node *v1.Node, nonblockingTaints string) boo
}, },
} }
nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo := schedulertypes.NewNodeInfo()
// Simple lookup for nonblocking taints based on comma-delimited list. // Simple lookup for nonblocking taints based on comma-delimited list.
nonblockingTaintsMap := map[string]struct{}{} nonblockingTaintsMap := map[string]struct{}{}

View File

@ -44,7 +44,7 @@ go_library(
"//pkg/kubeapiserver:go_default_library", "//pkg/kubeapiserver:go_default_library",
"//pkg/kubelet/client:go_default_library", "//pkg/kubelet/client:go_default_library",
"//pkg/master:go_default_library", "//pkg/master:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library", "//pkg/scheduler/types:go_default_library",
"//pkg/util/env:go_default_library", "//pkg/util/env:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",

View File

@ -34,7 +34,7 @@ import (
"k8s.io/klog" "k8s.io/klog"
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle" nodectlr "k8s.io/kubernetes/pkg/controller/nodelifecycle"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
testutils "k8s.io/kubernetes/test/utils" testutils "k8s.io/kubernetes/test/utils"
) )
@ -250,7 +250,7 @@ func isNodeUntainted(node *v1.Node) bool {
}, },
} }
nodeInfo := schedulernodeinfo.NewNodeInfo() nodeInfo := schedulertypes.NewNodeInfo()
// Simple lookup for nonblocking taints based on comma-delimited list. // Simple lookup for nonblocking taints based on comma-delimited list.
nonblockingTaintsMap := map[string]struct{}{} nonblockingTaintsMap := map[string]struct{}{}

View File

@ -31,9 +31,9 @@ go_test(
"//pkg/scheduler/apis/config:go_default_library", "//pkg/scheduler/apis/config:go_default_library",
"//pkg/scheduler/framework/plugins/defaultbinder:go_default_library", "//pkg/scheduler/framework/plugins/defaultbinder:go_default_library",
"//pkg/scheduler/framework/v1alpha1:go_default_library", "//pkg/scheduler/framework/v1alpha1:go_default_library",
"//pkg/scheduler/nodeinfo:go_default_library",
"//pkg/scheduler/profile:go_default_library", "//pkg/scheduler/profile:go_default_library",
"//pkg/scheduler/testing:go_default_library", "//pkg/scheduler/testing:go_default_library",
"//pkg/scheduler/types:go_default_library",
"//plugin/pkg/admission/podtolerationrestriction:go_default_library", "//plugin/pkg/admission/podtolerationrestriction:go_default_library",
"//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library", "//plugin/pkg/admission/podtolerationrestriction/apis/podtolerationrestriction:go_default_library",
"//plugin/pkg/admission/priority:go_default_library", "//plugin/pkg/admission/priority:go_default_library",

View File

@ -32,7 +32,7 @@ import (
schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder" "k8s.io/kubernetes/pkg/scheduler/framework/plugins/defaultbinder"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
testutils "k8s.io/kubernetes/test/integration/util" testutils "k8s.io/kubernetes/test/integration/util"
) )
@ -214,7 +214,7 @@ func (fp *FilterPlugin) reset() {
// Filter is a test function that returns an error or nil, depending on the // Filter is a test function that returns an error or nil, depending on the
// value of "failFilter". // value of "failFilter".
func (fp *FilterPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *framework.Status { func (fp *FilterPlugin) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
fp.numFilterCalled++ fp.numFilterCalled++
if fp.failFilter { if fp.failFilter {

View File

@ -43,7 +43,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler" "k8s.io/kubernetes/pkg/scheduler"
schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" schedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1" framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo" schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
"k8s.io/kubernetes/plugin/pkg/admission/priority" "k8s.io/kubernetes/plugin/pkg/admission/priority"
testutils "k8s.io/kubernetes/test/integration/util" testutils "k8s.io/kubernetes/test/integration/util"
utils "k8s.io/kubernetes/test/utils" utils "k8s.io/kubernetes/test/utils"
@ -84,7 +84,7 @@ func (fp *tokenFilter) Name() string {
} }
func (fp *tokenFilter) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod, func (fp *tokenFilter) Filter(ctx context.Context, state *framework.CycleState, pod *v1.Pod,
nodeInfo *schedulernodeinfo.NodeInfo) *framework.Status { nodeInfo *schedulertypes.NodeInfo) *framework.Status {
if fp.Tokens > 0 { if fp.Tokens > 0 {
fp.Tokens-- fp.Tokens--
return nil return nil
@ -101,13 +101,13 @@ func (fp *tokenFilter) PreFilter(ctx context.Context, state *framework.CycleStat
} }
func (fp *tokenFilter) AddPod(ctx context.Context, state *framework.CycleState, podToSchedule *v1.Pod, func (fp *tokenFilter) AddPod(ctx context.Context, state *framework.CycleState, podToSchedule *v1.Pod,
podToAdd *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *framework.Status { podToAdd *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
fp.Tokens-- fp.Tokens--
return nil return nil
} }
func (fp *tokenFilter) RemovePod(ctx context.Context, state *framework.CycleState, podToSchedule *v1.Pod, func (fp *tokenFilter) RemovePod(ctx context.Context, state *framework.CycleState, podToSchedule *v1.Pod,
podToRemove *v1.Pod, nodeInfo *schedulernodeinfo.NodeInfo) *framework.Status { podToRemove *v1.Pod, nodeInfo *schedulertypes.NodeInfo) *framework.Status {
fp.Tokens++ fp.Tokens++
return nil return nil
} }