move nodeinfo type to framework pkg

This commit is contained in:
Abdullah Gharaibeh
2020-04-06 21:40:15 -04:00
parent 87966c39f1
commit a5d8172715
98 changed files with 837 additions and 1053 deletions

View File

@@ -31,11 +31,11 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/kubernetes/pkg/features"
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
)
func deepEqualWithoutGeneration(actual *nodeInfoListItem, expected *schedulertypes.NodeInfo) error {
func deepEqualWithoutGeneration(actual *nodeInfoListItem, expected *framework.NodeInfo) error {
if (actual == nil) != (expected == nil) {
return errors.New("one of the actual or expected is nil and the other is not")
}
@@ -70,21 +70,21 @@ func (b *hostPortInfoBuilder) add(protocol, ip string, port int32) *hostPortInfo
return b
}
func (b *hostPortInfoBuilder) build() schedulertypes.HostPortInfo {
res := make(schedulertypes.HostPortInfo)
func (b *hostPortInfoBuilder) build() framework.HostPortInfo {
res := make(framework.HostPortInfo)
for _, param := range b.inputs {
res.Add(param.ip, param.protocol, param.port)
}
return res
}
func newNodeInfo(requestedResource *schedulertypes.Resource,
nonzeroRequest *schedulertypes.Resource,
func newNodeInfo(requestedResource *framework.Resource,
nonzeroRequest *framework.Resource,
pods []*v1.Pod,
usedPorts schedulertypes.HostPortInfo,
imageStates map[string]*schedulertypes.ImageStateSummary,
) *schedulertypes.NodeInfo {
nodeInfo := schedulertypes.NewNodeInfo(pods...)
usedPorts framework.HostPortInfo,
imageStates map[string]*framework.ImageStateSummary,
) *framework.NodeInfo {
nodeInfo := framework.NewNodeInfo(pods...)
nodeInfo.SetRequestedResource(requestedResource)
nodeInfo.SetNonZeroRequest(nonzeroRequest)
nodeInfo.SetUsedPorts(usedPorts)
@@ -112,98 +112,98 @@ func TestAssumePodScheduled(t *testing.T) {
tests := []struct {
pods []*v1.Pod
wNodeInfo *schedulertypes.NodeInfo
wNodeInfo *framework.NodeInfo
}{{
pods: []*v1.Pod{testPods[0]},
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
}, {
pods: []*v1.Pod{testPods[1], testPods[2]},
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 300,
Memory: 1524,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 300,
Memory: 1524,
},
[]*v1.Pod{testPods[1], testPods[2]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
}, { // test non-zero request
pods: []*v1.Pod{testPods[3]},
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 0,
Memory: 0,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: schedutil.DefaultMilliCPURequest,
Memory: schedutil.DefaultMemoryRequest,
},
[]*v1.Pod{testPods[3]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
}, {
pods: []*v1.Pod{testPods[4]},
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 3},
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{testPods[4]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
}, {
pods: []*v1.Pod{testPods[4], testPods[5]},
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 300,
Memory: 1524,
ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 8},
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 300,
Memory: 1524,
},
[]*v1.Pod{testPods[4], testPods[5]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
}, {
pods: []*v1.Pod{testPods[6]},
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{testPods[6]},
newHostPortInfoBuilder().build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
},
}
@@ -263,13 +263,13 @@ func TestExpirePod(t *testing.T) {
pods []*testExpirePodStruct
cleanupTime time.Time
wNodeInfo *schedulertypes.NodeInfo
wNodeInfo *framework.NodeInfo
}{{ // assumed pod would expires
pods: []*testExpirePodStruct{
{pod: testPods[0], finishBind: true, assumedTime: now},
},
cleanupTime: now.Add(2 * ttl),
wNodeInfo: schedulertypes.NewNodeInfo(),
wNodeInfo: framework.NewNodeInfo(),
}, { // first one would expire, second and third would not.
pods: []*testExpirePodStruct{
{pod: testPods[0], finishBind: true, assumedTime: now},
@@ -278,18 +278,18 @@ func TestExpirePod(t *testing.T) {
},
cleanupTime: now.Add(2 * ttl),
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 400,
Memory: 2048,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 400,
Memory: 2048,
},
// Order gets altered when removing pods.
[]*v1.Pod{testPods[2], testPods[1]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
}}
@@ -336,22 +336,22 @@ func TestAddPodWillConfirm(t *testing.T) {
podsToAssume []*v1.Pod
podsToAdd []*v1.Pod
wNodeInfo *schedulertypes.NodeInfo
wNodeInfo *framework.NodeInfo
}{{ // two pod were assumed at same time. But first one is called Add() and gets confirmed.
podsToAssume: []*v1.Pod{testPods[0], testPods[1]},
podsToAdd: []*v1.Pod{testPods[0]},
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
}}
@@ -438,25 +438,25 @@ func TestAddPodWillReplaceAssumed(t *testing.T) {
podsToAdd []*v1.Pod
podsToUpdate [][]*v1.Pod
wNodeInfo map[string]*schedulertypes.NodeInfo
wNodeInfo map[string]*framework.NodeInfo
}{{
podsToAssume: []*v1.Pod{assumedPod.DeepCopy()},
podsToAdd: []*v1.Pod{addedPod.DeepCopy()},
podsToUpdate: [][]*v1.Pod{{addedPod.DeepCopy(), updatedPod.DeepCopy()}},
wNodeInfo: map[string]*schedulertypes.NodeInfo{
wNodeInfo: map[string]*framework.NodeInfo{
"assumed-node": nil,
"actual-node": newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 200,
Memory: 500,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 200,
Memory: 500,
},
[]*v1.Pod{updatedPod.DeepCopy()},
newHostPortInfoBuilder().add("TCP", "0.0.0.0", 90).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
},
}}
@@ -499,21 +499,21 @@ func TestAddPodAfterExpiration(t *testing.T) {
tests := []struct {
pod *v1.Pod
wNodeInfo *schedulertypes.NodeInfo
wNodeInfo *framework.NodeInfo
}{{
pod: basePod,
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{basePod},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
}}
@@ -555,34 +555,34 @@ func TestUpdatePod(t *testing.T) {
podsToAdd []*v1.Pod
podsToUpdate []*v1.Pod
wNodeInfo []*schedulertypes.NodeInfo
wNodeInfo []*framework.NodeInfo
}{{ // add a pod and then update it twice
podsToAdd: []*v1.Pod{testPods[0]},
podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]},
wNodeInfo: []*schedulertypes.NodeInfo{newNodeInfo(
&schedulertypes.Resource{
wNodeInfo: []*framework.NodeInfo{newNodeInfo(
&framework.Resource{
MilliCPU: 200,
Memory: 1024,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 200,
Memory: 1024,
},
[]*v1.Pod{testPods[1]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
), newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
)},
}}
@@ -686,35 +686,35 @@ func TestExpireAddUpdatePod(t *testing.T) {
podsToAdd []*v1.Pod
podsToUpdate []*v1.Pod
wNodeInfo []*schedulertypes.NodeInfo
wNodeInfo []*framework.NodeInfo
}{{ // Pod is assumed, expired, and added. Then it would be updated twice.
podsToAssume: []*v1.Pod{testPods[0]},
podsToAdd: []*v1.Pod{testPods[0]},
podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]},
wNodeInfo: []*schedulertypes.NodeInfo{newNodeInfo(
&schedulertypes.Resource{
wNodeInfo: []*framework.NodeInfo{newNodeInfo(
&framework.Resource{
MilliCPU: 200,
Memory: 1024,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 200,
Memory: 1024,
},
[]*v1.Pod{testPods[1]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
), newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{testPods[0]},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
)},
}}
@@ -780,21 +780,21 @@ func TestEphemeralStorageResource(t *testing.T) {
podE := makePodWithEphemeralStorage(nodeName, "500")
tests := []struct {
pod *v1.Pod
wNodeInfo *schedulertypes.NodeInfo
wNodeInfo *framework.NodeInfo
}{
{
pod: podE,
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
EphemeralStorage: 500,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: schedutil.DefaultMilliCPURequest,
Memory: schedutil.DefaultMemoryRequest,
},
[]*v1.Pod{podE},
schedulertypes.HostPortInfo{},
make(map[string]*schedulertypes.ImageStateSummary),
framework.HostPortInfo{},
make(map[string]*framework.ImageStateSummary),
),
},
}
@@ -827,7 +827,7 @@ func TestRemovePod(t *testing.T) {
tests := []struct {
nodes []*v1.Node
pod *v1.Pod
wNodeInfo *schedulertypes.NodeInfo
wNodeInfo *framework.NodeInfo
}{{
nodes: []*v1.Node{
{
@@ -839,17 +839,17 @@ func TestRemovePod(t *testing.T) {
},
pod: basePod,
wNodeInfo: newNodeInfo(
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
&schedulertypes.Resource{
&framework.Resource{
MilliCPU: 100,
Memory: 500,
},
[]*v1.Pod{basePod},
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
make(map[string]*schedulertypes.ImageStateSummary),
make(map[string]*framework.ImageStateSummary),
),
}}
@@ -930,7 +930,7 @@ func TestForgetPod(t *testing.T) {
// getResourceRequest returns the resource request of all containers in Pods;
// excluding initContainers.
func getResourceRequest(pod *v1.Pod) v1.ResourceList {
result := &schedulertypes.Resource{}
result := &framework.Resource{}
for _, container := range pod.Spec.Containers {
result.Add(container.Resources.Requests)
}
@@ -939,13 +939,13 @@ func getResourceRequest(pod *v1.Pod) v1.ResourceList {
}
// buildNodeInfo creates a NodeInfo by simulating node operations in cache.
func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *schedulertypes.NodeInfo {
expected := schedulertypes.NewNodeInfo()
func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *framework.NodeInfo {
expected := framework.NewNodeInfo()
// Simulate SetNode.
expected.SetNode(node)
expected.SetAllocatableResource(schedulertypes.NewResource(node.Status.Allocatable))
expected.SetAllocatableResource(framework.NewResource(node.Status.Allocatable))
expected.SetTaints(node.Spec.Taints)
expected.SetGeneration(expected.GetGeneration() + 1)
@@ -1533,8 +1533,8 @@ func compareCacheWithNodeInfoSnapshot(cache *schedulerCache, snapshot *Snapshot)
return fmt.Errorf("unexpected number of nodes in NodeInfoList. Expected: %v, got: %v", len(cache.nodes), len(snapshot.nodeInfoList))
}
expectedNodeInfoList := make([]*schedulertypes.NodeInfo, 0, cache.nodeTree.numNodes)
expectedHavePodsWithAffinityNodeInfoList := make([]*schedulertypes.NodeInfo, 0, cache.nodeTree.numNodes)
expectedNodeInfoList := make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes)
expectedHavePodsWithAffinityNodeInfoList := make([]*framework.NodeInfo, 0, cache.nodeTree.numNodes)
for i := 0; i < cache.nodeTree.numNodes; i++ {
nodeName := cache.nodeTree.next()
if n := snapshot.nodeInfoMap[nodeName]; n != nil {