move scheduler nodeinfo to pkg/scheduler/types
This commit is contained in:
160
pkg/scheduler/internal/cache/cache_test.go
vendored
160
pkg/scheduler/internal/cache/cache_test.go
vendored
@@ -31,11 +31,11 @@ import (
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedulertypes "k8s.io/kubernetes/pkg/scheduler/types"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
func deepEqualWithoutGeneration(actual *nodeInfoListItem, expected *schedulernodeinfo.NodeInfo) error {
|
||||
func deepEqualWithoutGeneration(actual *nodeInfoListItem, expected *schedulertypes.NodeInfo) error {
|
||||
if (actual == nil) != (expected == nil) {
|
||||
return errors.New("one of the actual or expected is nil and the other is not")
|
||||
}
|
||||
@@ -70,21 +70,21 @@ func (b *hostPortInfoBuilder) add(protocol, ip string, port int32) *hostPortInfo
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *hostPortInfoBuilder) build() schedulernodeinfo.HostPortInfo {
|
||||
res := make(schedulernodeinfo.HostPortInfo)
|
||||
func (b *hostPortInfoBuilder) build() schedulertypes.HostPortInfo {
|
||||
res := make(schedulertypes.HostPortInfo)
|
||||
for _, param := range b.inputs {
|
||||
res.Add(param.ip, param.protocol, param.port)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func newNodeInfo(requestedResource *schedulernodeinfo.Resource,
|
||||
nonzeroRequest *schedulernodeinfo.Resource,
|
||||
func newNodeInfo(requestedResource *schedulertypes.Resource,
|
||||
nonzeroRequest *schedulertypes.Resource,
|
||||
pods []*v1.Pod,
|
||||
usedPorts schedulernodeinfo.HostPortInfo,
|
||||
imageStates map[string]*schedulernodeinfo.ImageStateSummary,
|
||||
) *schedulernodeinfo.NodeInfo {
|
||||
nodeInfo := schedulernodeinfo.NewNodeInfo(pods...)
|
||||
usedPorts schedulertypes.HostPortInfo,
|
||||
imageStates map[string]*schedulertypes.ImageStateSummary,
|
||||
) *schedulertypes.NodeInfo {
|
||||
nodeInfo := schedulertypes.NewNodeInfo(pods...)
|
||||
nodeInfo.SetRequestedResource(requestedResource)
|
||||
nodeInfo.SetNonZeroRequest(nonzeroRequest)
|
||||
nodeInfo.SetUsedPorts(usedPorts)
|
||||
@@ -112,98 +112,98 @@ func TestAssumePodScheduled(t *testing.T) {
|
||||
tests := []struct {
|
||||
pods []*v1.Pod
|
||||
|
||||
wNodeInfo *schedulernodeinfo.NodeInfo
|
||||
wNodeInfo *schedulertypes.NodeInfo
|
||||
}{{
|
||||
pods: []*v1.Pod{testPods[0]},
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
[]*v1.Pod{testPods[0]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*schedulernodeinfo.ImageStateSummary),
|
||||
make(map[string]*schedulertypes.ImageStateSummary),
|
||||
),
|
||||
}, {
|
||||
pods: []*v1.Pod{testPods[1], testPods[2]},
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 300,
|
||||
Memory: 1524,
|
||||
},
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 300,
|
||||
Memory: 1524,
|
||||
},
|
||||
[]*v1.Pod{testPods[1], testPods[2]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(),
|
||||
make(map[string]*schedulernodeinfo.ImageStateSummary),
|
||||
make(map[string]*schedulertypes.ImageStateSummary),
|
||||
),
|
||||
}, { // test non-zero request
|
||||
pods: []*v1.Pod{testPods[3]},
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 0,
|
||||
Memory: 0,
|
||||
},
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: schedutil.DefaultMilliCPURequest,
|
||||
Memory: schedutil.DefaultMemoryRequest,
|
||||
},
|
||||
[]*v1.Pod{testPods[3]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*schedulernodeinfo.ImageStateSummary),
|
||||
make(map[string]*schedulertypes.ImageStateSummary),
|
||||
),
|
||||
}, {
|
||||
pods: []*v1.Pod{testPods[4]},
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 3},
|
||||
},
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
[]*v1.Pod{testPods[4]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*schedulernodeinfo.ImageStateSummary),
|
||||
make(map[string]*schedulertypes.ImageStateSummary),
|
||||
),
|
||||
}, {
|
||||
pods: []*v1.Pod{testPods[4], testPods[5]},
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 300,
|
||||
Memory: 1524,
|
||||
ScalarResources: map[v1.ResourceName]int64{"example.com/foo": 8},
|
||||
},
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 300,
|
||||
Memory: 1524,
|
||||
},
|
||||
[]*v1.Pod{testPods[4], testPods[5]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).add("TCP", "127.0.0.1", 8080).build(),
|
||||
make(map[string]*schedulernodeinfo.ImageStateSummary),
|
||||
make(map[string]*schedulertypes.ImageStateSummary),
|
||||
),
|
||||
}, {
|
||||
pods: []*v1.Pod{testPods[6]},
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
[]*v1.Pod{testPods[6]},
|
||||
newHostPortInfoBuilder().build(),
|
||||
make(map[string]*schedulernodeinfo.ImageStateSummary),
|
||||
make(map[string]*schedulertypes.ImageStateSummary),
|
||||
),
|
||||
},
|
||||
}
|
||||
@@ -263,13 +263,13 @@ func TestExpirePod(t *testing.T) {
|
||||
pods []*testExpirePodStruct
|
||||
cleanupTime time.Time
|
||||
|
||||
wNodeInfo *schedulernodeinfo.NodeInfo
|
||||
wNodeInfo *schedulertypes.NodeInfo
|
||||
}{{ // assumed pod would expires
|
||||
pods: []*testExpirePodStruct{
|
||||
{pod: testPods[0], finishBind: true, assumedTime: now},
|
||||
},
|
||||
cleanupTime: now.Add(2 * ttl),
|
||||
wNodeInfo: schedulernodeinfo.NewNodeInfo(),
|
||||
wNodeInfo: schedulertypes.NewNodeInfo(),
|
||||
}, { // first one would expire, second and third would not.
|
||||
pods: []*testExpirePodStruct{
|
||||
{pod: testPods[0], finishBind: true, assumedTime: now},
|
||||
@@ -278,18 +278,18 @@ func TestExpirePod(t *testing.T) {
|
||||
},
|
||||
cleanupTime: now.Add(2 * ttl),
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 400,
|
||||
Memory: 2048,
|
||||
},
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 400,
|
||||
Memory: 2048,
|
||||
},
|
||||
// Order gets altered when removing pods.
|
||||
[]*v1.Pod{testPods[2], testPods[1]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
|
||||
make(map[string]*schedulernodeinfo.ImageStateSummary),
|
||||
make(map[string]*schedulertypes.ImageStateSummary),
|
||||
),
|
||||
}}
|
||||
|
||||
@@ -336,22 +336,22 @@ func TestAddPodWillConfirm(t *testing.T) {
|
||||
podsToAssume []*v1.Pod
|
||||
podsToAdd []*v1.Pod
|
||||
|
||||
wNodeInfo *schedulernodeinfo.NodeInfo
|
||||
wNodeInfo *schedulertypes.NodeInfo
|
||||
}{{ // two pod were assumed at same time. But first one is called Add() and gets confirmed.
|
||||
podsToAssume: []*v1.Pod{testPods[0], testPods[1]},
|
||||
podsToAdd: []*v1.Pod{testPods[0]},
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
[]*v1.Pod{testPods[0]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*schedulernodeinfo.ImageStateSummary),
|
||||
make(map[string]*schedulertypes.ImageStateSummary),
|
||||
),
|
||||
}}
|
||||
|
||||
@@ -438,25 +438,25 @@ func TestAddPodWillReplaceAssumed(t *testing.T) {
|
||||
podsToAdd []*v1.Pod
|
||||
podsToUpdate [][]*v1.Pod
|
||||
|
||||
wNodeInfo map[string]*schedulernodeinfo.NodeInfo
|
||||
wNodeInfo map[string]*schedulertypes.NodeInfo
|
||||
}{{
|
||||
podsToAssume: []*v1.Pod{assumedPod.DeepCopy()},
|
||||
podsToAdd: []*v1.Pod{addedPod.DeepCopy()},
|
||||
podsToUpdate: [][]*v1.Pod{{addedPod.DeepCopy(), updatedPod.DeepCopy()}},
|
||||
wNodeInfo: map[string]*schedulernodeinfo.NodeInfo{
|
||||
wNodeInfo: map[string]*schedulertypes.NodeInfo{
|
||||
"assumed-node": nil,
|
||||
"actual-node": newNodeInfo(
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 200,
|
||||
Memory: 500,
|
||||
},
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 200,
|
||||
Memory: 500,
|
||||
},
|
||||
[]*v1.Pod{updatedPod.DeepCopy()},
|
||||
newHostPortInfoBuilder().add("TCP", "0.0.0.0", 90).build(),
|
||||
make(map[string]*schedulernodeinfo.ImageStateSummary),
|
||||
make(map[string]*schedulertypes.ImageStateSummary),
|
||||
),
|
||||
},
|
||||
}}
|
||||
@@ -499,21 +499,21 @@ func TestAddPodAfterExpiration(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
|
||||
wNodeInfo *schedulernodeinfo.NodeInfo
|
||||
wNodeInfo *schedulertypes.NodeInfo
|
||||
}{{
|
||||
pod: basePod,
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
[]*v1.Pod{basePod},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*schedulernodeinfo.ImageStateSummary),
|
||||
make(map[string]*schedulertypes.ImageStateSummary),
|
||||
),
|
||||
}}
|
||||
|
||||
@@ -555,34 +555,34 @@ func TestUpdatePod(t *testing.T) {
|
||||
podsToAdd []*v1.Pod
|
||||
podsToUpdate []*v1.Pod
|
||||
|
||||
wNodeInfo []*schedulernodeinfo.NodeInfo
|
||||
wNodeInfo []*schedulertypes.NodeInfo
|
||||
}{{ // add a pod and then update it twice
|
||||
podsToAdd: []*v1.Pod{testPods[0]},
|
||||
podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]},
|
||||
wNodeInfo: []*schedulernodeinfo.NodeInfo{newNodeInfo(
|
||||
&schedulernodeinfo.Resource{
|
||||
wNodeInfo: []*schedulertypes.NodeInfo{newNodeInfo(
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 200,
|
||||
Memory: 1024,
|
||||
},
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 200,
|
||||
Memory: 1024,
|
||||
},
|
||||
[]*v1.Pod{testPods[1]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
|
||||
make(map[string]*schedulernodeinfo.ImageStateSummary),
|
||||
make(map[string]*schedulertypes.ImageStateSummary),
|
||||
), newNodeInfo(
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
[]*v1.Pod{testPods[0]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*schedulernodeinfo.ImageStateSummary),
|
||||
make(map[string]*schedulertypes.ImageStateSummary),
|
||||
)},
|
||||
}}
|
||||
|
||||
@@ -686,35 +686,35 @@ func TestExpireAddUpdatePod(t *testing.T) {
|
||||
podsToAdd []*v1.Pod
|
||||
podsToUpdate []*v1.Pod
|
||||
|
||||
wNodeInfo []*schedulernodeinfo.NodeInfo
|
||||
wNodeInfo []*schedulertypes.NodeInfo
|
||||
}{{ // Pod is assumed, expired, and added. Then it would be updated twice.
|
||||
podsToAssume: []*v1.Pod{testPods[0]},
|
||||
podsToAdd: []*v1.Pod{testPods[0]},
|
||||
podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]},
|
||||
wNodeInfo: []*schedulernodeinfo.NodeInfo{newNodeInfo(
|
||||
&schedulernodeinfo.Resource{
|
||||
wNodeInfo: []*schedulertypes.NodeInfo{newNodeInfo(
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 200,
|
||||
Memory: 1024,
|
||||
},
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 200,
|
||||
Memory: 1024,
|
||||
},
|
||||
[]*v1.Pod{testPods[1]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 8080).build(),
|
||||
make(map[string]*schedulernodeinfo.ImageStateSummary),
|
||||
make(map[string]*schedulertypes.ImageStateSummary),
|
||||
), newNodeInfo(
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
[]*v1.Pod{testPods[0]},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*schedulernodeinfo.ImageStateSummary),
|
||||
make(map[string]*schedulertypes.ImageStateSummary),
|
||||
)},
|
||||
}}
|
||||
|
||||
@@ -780,21 +780,21 @@ func TestEphemeralStorageResource(t *testing.T) {
|
||||
podE := makePodWithEphemeralStorage(nodeName, "500")
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
wNodeInfo *schedulernodeinfo.NodeInfo
|
||||
wNodeInfo *schedulertypes.NodeInfo
|
||||
}{
|
||||
{
|
||||
pod: podE,
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
EphemeralStorage: 500,
|
||||
},
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: schedutil.DefaultMilliCPURequest,
|
||||
Memory: schedutil.DefaultMemoryRequest,
|
||||
},
|
||||
[]*v1.Pod{podE},
|
||||
schedulernodeinfo.HostPortInfo{},
|
||||
make(map[string]*schedulernodeinfo.ImageStateSummary),
|
||||
schedulertypes.HostPortInfo{},
|
||||
make(map[string]*schedulertypes.ImageStateSummary),
|
||||
),
|
||||
},
|
||||
}
|
||||
@@ -827,7 +827,7 @@ func TestRemovePod(t *testing.T) {
|
||||
tests := []struct {
|
||||
nodes []*v1.Node
|
||||
pod *v1.Pod
|
||||
wNodeInfo *schedulernodeinfo.NodeInfo
|
||||
wNodeInfo *schedulertypes.NodeInfo
|
||||
}{{
|
||||
nodes: []*v1.Node{
|
||||
{
|
||||
@@ -839,17 +839,17 @@ func TestRemovePod(t *testing.T) {
|
||||
},
|
||||
pod: basePod,
|
||||
wNodeInfo: newNodeInfo(
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
&schedulernodeinfo.Resource{
|
||||
&schedulertypes.Resource{
|
||||
MilliCPU: 100,
|
||||
Memory: 500,
|
||||
},
|
||||
[]*v1.Pod{basePod},
|
||||
newHostPortInfoBuilder().add("TCP", "127.0.0.1", 80).build(),
|
||||
make(map[string]*schedulernodeinfo.ImageStateSummary),
|
||||
make(map[string]*schedulertypes.ImageStateSummary),
|
||||
),
|
||||
}}
|
||||
|
||||
@@ -930,7 +930,7 @@ func TestForgetPod(t *testing.T) {
|
||||
// getResourceRequest returns the resource request of all containers in Pods;
|
||||
// excluding initContainers.
|
||||
func getResourceRequest(pod *v1.Pod) v1.ResourceList {
|
||||
result := &schedulernodeinfo.Resource{}
|
||||
result := &schedulertypes.Resource{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
result.Add(container.Resources.Requests)
|
||||
}
|
||||
@@ -939,13 +939,13 @@ func getResourceRequest(pod *v1.Pod) v1.ResourceList {
|
||||
}
|
||||
|
||||
// buildNodeInfo creates a NodeInfo by simulating node operations in cache.
|
||||
func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *schedulernodeinfo.NodeInfo {
|
||||
expected := schedulernodeinfo.NewNodeInfo()
|
||||
func buildNodeInfo(node *v1.Node, pods []*v1.Pod) *schedulertypes.NodeInfo {
|
||||
expected := schedulertypes.NewNodeInfo()
|
||||
|
||||
// Simulate SetNode.
|
||||
expected.SetNode(node)
|
||||
|
||||
expected.SetAllocatableResource(schedulernodeinfo.NewResource(node.Status.Allocatable))
|
||||
expected.SetAllocatableResource(schedulertypes.NewResource(node.Status.Allocatable))
|
||||
expected.SetTaints(node.Spec.Taints)
|
||||
expected.SetGeneration(expected.GetGeneration() + 1)
|
||||
|
||||
@@ -1156,7 +1156,7 @@ func TestNodeOperators(t *testing.T) {
|
||||
}
|
||||
got, found = cache.nodes[node.Name]
|
||||
if !found {
|
||||
t.Errorf("Failed to find node %v in schedulernodeinfo after UpdateNode.", node.Name)
|
||||
t.Errorf("Failed to find node %v in schedulertypes after UpdateNode.", node.Name)
|
||||
}
|
||||
if got.info.GetGeneration() <= expected.GetGeneration() {
|
||||
t.Errorf("Generation is not incremented. got: %v, expected: %v", got.info.GetGeneration(), expected.GetGeneration())
|
||||
@@ -1164,7 +1164,7 @@ func TestNodeOperators(t *testing.T) {
|
||||
expected.SetGeneration(got.info.GetGeneration())
|
||||
|
||||
if !reflect.DeepEqual(got.info, expected) {
|
||||
t.Errorf("Failed to update node in schedulernodeinfo:\n got: %+v \nexpected: %+v", got, expected)
|
||||
t.Errorf("Failed to update node in schedulertypes:\n got: %+v \nexpected: %+v", got, expected)
|
||||
}
|
||||
// Check nodeTree after update
|
||||
if cache.nodeTree.numNodes != 1 || cache.nodeTree.next() != node.Name {
|
||||
@@ -1533,8 +1533,8 @@ func compareCacheWithNodeInfoSnapshot(cache *schedulerCache, snapshot *Snapshot)
|
||||
return fmt.Errorf("unexpected number of nodes in NodeInfoList. Expected: %v, got: %v", len(cache.nodes), len(snapshot.nodeInfoList))
|
||||
}
|
||||
|
||||
expectedNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes)
|
||||
expectedHavePodsWithAffinityNodeInfoList := make([]*schedulernodeinfo.NodeInfo, 0, cache.nodeTree.numNodes)
|
||||
expectedNodeInfoList := make([]*schedulertypes.NodeInfo, 0, cache.nodeTree.numNodes)
|
||||
expectedHavePodsWithAffinityNodeInfoList := make([]*schedulertypes.NodeInfo, 0, cache.nodeTree.numNodes)
|
||||
for i := 0; i < cache.nodeTree.numNodes; i++ {
|
||||
nodeName := cache.nodeTree.next()
|
||||
if n := snapshot.nodeInfoMap[nodeName]; n != nil {
|
||||
|
Reference in New Issue
Block a user