Rename DefaultPodTopologySpread plugin #91994
This commit is contained in:
52
pkg/scheduler/framework/plugins/selectorspread/BUILD
Normal file
52
pkg/scheduler/framework/plugins/selectorspread/BUILD
Normal file
@@ -0,0 +1,52 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["selector_spread.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/selectorspread",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/scheduler/framework/plugins/helper:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/util/node:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
go_test(
|
||||
name = "go_default_test",
|
||||
srcs = [
|
||||
"selector_spread_perf_test.go",
|
||||
"selector_spread_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/framework/runtime:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/internal/cache:go_default_library",
|
||||
"//pkg/scheduler/internal/parallelize:go_default_library",
|
||||
"//pkg/scheduler/testing:go_default_library",
|
||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
@@ -0,0 +1,217 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package selectorspread
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/helper"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
)
|
||||
|
||||
// SelectorSpread is a plugin that calculates selector spread priority.
|
||||
type SelectorSpread struct {
|
||||
handle framework.FrameworkHandle
|
||||
}
|
||||
|
||||
var _ framework.PreScorePlugin = &SelectorSpread{}
|
||||
var _ framework.ScorePlugin = &SelectorSpread{}
|
||||
|
||||
const (
|
||||
// Name is the name of the plugin used in the plugin registry and configurations.
|
||||
Name = "SelectorSpread"
|
||||
// preScoreStateKey is the key in CycleState to SelectorSpread pre-computed data for Scoring.
|
||||
preScoreStateKey = "PreScore" + Name
|
||||
|
||||
// When zone information is present, give 2/3 of the weighting to zone spreading, 1/3 to node spreading
|
||||
// TODO: Any way to justify this weighting?
|
||||
zoneWeighting float64 = 2.0 / 3.0
|
||||
)
|
||||
|
||||
// Name returns name of the plugin. It is used in logs, etc.
|
||||
func (pl *SelectorSpread) Name() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
// preScoreState computed at PreScore and used at Score.
|
||||
type preScoreState struct {
|
||||
selector labels.Selector
|
||||
}
|
||||
|
||||
// Clone implements the mandatory Clone interface. We don't really copy the data since
|
||||
// there is no need for that.
|
||||
func (s *preScoreState) Clone() framework.StateData {
|
||||
return s
|
||||
}
|
||||
|
||||
// skipSelectorSpread returns true if the pod's TopologySpreadConstraints are specified.
|
||||
// Note that this doesn't take into account default constraints defined for
|
||||
// the PodTopologySpread plugin.
|
||||
func skipSelectorSpread(pod *v1.Pod) bool {
|
||||
return len(pod.Spec.TopologySpreadConstraints) != 0
|
||||
}
|
||||
|
||||
// Score invoked at the Score extension point.
|
||||
// The "score" returned in this function is the matching number of pods on the `nodeName`,
|
||||
// it is normalized later.
|
||||
func (pl *SelectorSpread) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) {
|
||||
if skipSelectorSpread(pod) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
c, err := state.Read(preScoreStateKey)
|
||||
if err != nil {
|
||||
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("Error reading %q from cycleState: %v", preScoreStateKey, err))
|
||||
}
|
||||
|
||||
s, ok := c.(*preScoreState)
|
||||
if !ok {
|
||||
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("%+v convert to tainttoleration.preScoreState error", c))
|
||||
}
|
||||
|
||||
nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName)
|
||||
if err != nil {
|
||||
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err))
|
||||
}
|
||||
|
||||
count := countMatchingPods(pod.Namespace, s.selector, nodeInfo)
|
||||
return int64(count), nil
|
||||
}
|
||||
|
||||
// NormalizeScore invoked after scoring all nodes.
|
||||
// For this plugin, it calculates the score of each node
|
||||
// based on the number of existing matching pods on the node
|
||||
// where zone information is included on the nodes, it favors nodes
|
||||
// in zones with fewer existing matching pods.
|
||||
func (pl *SelectorSpread) NormalizeScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
|
||||
if skipSelectorSpread(pod) {
|
||||
return nil
|
||||
}
|
||||
|
||||
countsByZone := make(map[string]int64, 10)
|
||||
maxCountByZone := int64(0)
|
||||
maxCountByNodeName := int64(0)
|
||||
|
||||
for i := range scores {
|
||||
if scores[i].Score > maxCountByNodeName {
|
||||
maxCountByNodeName = scores[i].Score
|
||||
}
|
||||
nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(scores[i].Name)
|
||||
if err != nil {
|
||||
return framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", scores[i].Name, err))
|
||||
}
|
||||
zoneID := utilnode.GetZoneKey(nodeInfo.Node())
|
||||
if zoneID == "" {
|
||||
continue
|
||||
}
|
||||
countsByZone[zoneID] += scores[i].Score
|
||||
}
|
||||
|
||||
for zoneID := range countsByZone {
|
||||
if countsByZone[zoneID] > maxCountByZone {
|
||||
maxCountByZone = countsByZone[zoneID]
|
||||
}
|
||||
}
|
||||
|
||||
haveZones := len(countsByZone) != 0
|
||||
|
||||
maxCountByNodeNameFloat64 := float64(maxCountByNodeName)
|
||||
maxCountByZoneFloat64 := float64(maxCountByZone)
|
||||
MaxNodeScoreFloat64 := float64(framework.MaxNodeScore)
|
||||
|
||||
for i := range scores {
|
||||
// initializing to the default/max node score of maxPriority
|
||||
fScore := MaxNodeScoreFloat64
|
||||
if maxCountByNodeName > 0 {
|
||||
fScore = MaxNodeScoreFloat64 * (float64(maxCountByNodeName-scores[i].Score) / maxCountByNodeNameFloat64)
|
||||
}
|
||||
// If there is zone information present, incorporate it
|
||||
if haveZones {
|
||||
nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(scores[i].Name)
|
||||
if err != nil {
|
||||
return framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", scores[i].Name, err))
|
||||
}
|
||||
|
||||
zoneID := utilnode.GetZoneKey(nodeInfo.Node())
|
||||
if zoneID != "" {
|
||||
zoneScore := MaxNodeScoreFloat64
|
||||
if maxCountByZone > 0 {
|
||||
zoneScore = MaxNodeScoreFloat64 * (float64(maxCountByZone-countsByZone[zoneID]) / maxCountByZoneFloat64)
|
||||
}
|
||||
fScore = (fScore * (1.0 - zoneWeighting)) + (zoneWeighting * zoneScore)
|
||||
}
|
||||
}
|
||||
scores[i].Score = int64(fScore)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ScoreExtensions of the Score plugin.
|
||||
func (pl *SelectorSpread) ScoreExtensions() framework.ScoreExtensions {
|
||||
return pl
|
||||
}
|
||||
|
||||
// PreScore builds and writes cycle state used by Score and NormalizeScore.
|
||||
func (pl *SelectorSpread) PreScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodes []*v1.Node) *framework.Status {
|
||||
if skipSelectorSpread(pod) {
|
||||
return nil
|
||||
}
|
||||
var selector labels.Selector
|
||||
informerFactory := pl.handle.SharedInformerFactory()
|
||||
selector = helper.DefaultSelector(
|
||||
pod,
|
||||
informerFactory.Core().V1().Services().Lister(),
|
||||
informerFactory.Core().V1().ReplicationControllers().Lister(),
|
||||
informerFactory.Apps().V1().ReplicaSets().Lister(),
|
||||
informerFactory.Apps().V1().StatefulSets().Lister(),
|
||||
)
|
||||
state := &preScoreState{
|
||||
selector: selector,
|
||||
}
|
||||
cycleState.Write(preScoreStateKey, state)
|
||||
return nil
|
||||
}
|
||||
|
||||
// New initializes a new plugin and returns it.
|
||||
func New(_ runtime.Object, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
return &SelectorSpread{
|
||||
handle: handle,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// countMatchingPods counts pods based on namespace and matching all selectors
|
||||
func countMatchingPods(namespace string, selector labels.Selector, nodeInfo *framework.NodeInfo) int {
|
||||
if len(nodeInfo.Pods) == 0 || selector.Empty() {
|
||||
return 0
|
||||
}
|
||||
count := 0
|
||||
for _, p := range nodeInfo.Pods {
|
||||
// Ignore pods being deleted for spreading purposes
|
||||
// Similar to how it is done for SelectorSpreadPriority
|
||||
if namespace == p.Pod.Namespace && p.Pod.DeletionTimestamp == nil {
|
||||
if selector.Matches(labels.Set(p.Pod.Labels)) {
|
||||
count++
|
||||
}
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
@@ -0,0 +1,95 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package selectorspread
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/parallelize"
|
||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||
)
|
||||
|
||||
var (
|
||||
tests = []struct {
|
||||
name string
|
||||
existingPodsNum int
|
||||
allNodesNum int
|
||||
}{
|
||||
{
|
||||
name: "100nodes",
|
||||
existingPodsNum: 1000,
|
||||
allNodesNum: 100,
|
||||
},
|
||||
{
|
||||
name: "1000nodes",
|
||||
existingPodsNum: 10000,
|
||||
allNodesNum: 1000,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func BenchmarkTestSelectorSpreadPriority(b *testing.B) {
|
||||
for _, tt := range tests {
|
||||
b.Run(tt.name, func(b *testing.B) {
|
||||
pod := st.MakePod().Name("p").Label("foo", "").Obj()
|
||||
existingPods, allNodes, filteredNodes := st.MakeNodesAndPodsForEvenPodsSpread(pod.Labels, tt.existingPodsNum, tt.allNodesNum, tt.allNodesNum)
|
||||
snapshot := cache.NewSnapshot(existingPods, allNodes)
|
||||
client := fake.NewSimpleClientset(
|
||||
&v1.Service{Spec: v1.ServiceSpec{Selector: map[string]string{"foo": ""}}},
|
||||
)
|
||||
ctx := context.Background()
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
_ = informerFactory.Core().V1().Services().Lister()
|
||||
informerFactory.Start(ctx.Done())
|
||||
caches := informerFactory.WaitForCacheSync(ctx.Done())
|
||||
for _, synced := range caches {
|
||||
if !synced {
|
||||
b.Errorf("error waiting for informer cache sync")
|
||||
}
|
||||
}
|
||||
fh, _ := runtime.NewFramework(nil, nil, nil, runtime.WithSnapshotSharedLister(snapshot), runtime.WithInformerFactory(informerFactory))
|
||||
plugin := &SelectorSpread{handle: fh}
|
||||
b.ResetTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
state := framework.NewCycleState()
|
||||
status := plugin.PreScore(ctx, state, pod, allNodes)
|
||||
if !status.IsSuccess() {
|
||||
b.Fatalf("unexpected error: %v", status)
|
||||
}
|
||||
gotList := make(framework.NodeScoreList, len(filteredNodes))
|
||||
scoreNode := func(i int) {
|
||||
n := filteredNodes[i]
|
||||
score, _ := plugin.Score(ctx, state, pod, n.Name)
|
||||
gotList[i] = framework.NodeScore{Name: n.Name, Score: score}
|
||||
}
|
||||
parallelize.Until(ctx, len(filteredNodes), scoreNode)
|
||||
status = plugin.NormalizeScore(ctx, state, pod, gotList)
|
||||
if !status.IsSuccess() {
|
||||
b.Fatal(status)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@@ -0,0 +1,727 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package selectorspread
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/informers"
|
||||
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
||||
frameworkruntime "k8s.io/kubernetes/pkg/scheduler/framework/runtime"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||
)
|
||||
|
||||
func controllerRef(kind, name, uid string) []metav1.OwnerReference {
|
||||
// TODO: When ControllerRef will be implemented uncomment code below.
|
||||
return nil
|
||||
//trueVar := true
|
||||
//return []metav1.OwnerReference{
|
||||
// {Kind: kind, Name: name, UID: types.UID(uid), Controller: &trueVar},
|
||||
//}
|
||||
}
|
||||
|
||||
func TestSelectorSpreadScore(t *testing.T) {
|
||||
labels1 := map[string]string{
|
||||
"foo": "bar",
|
||||
"baz": "blah",
|
||||
}
|
||||
labels2 := map[string]string{
|
||||
"bar": "foo",
|
||||
"baz": "blah",
|
||||
}
|
||||
zone1Spec := v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
}
|
||||
zone2Spec := v1.PodSpec{
|
||||
NodeName: "machine2",
|
||||
}
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []string
|
||||
rcs []*v1.ReplicationController
|
||||
rss []*apps.ReplicaSet
|
||||
services []*v1.Service
|
||||
sss []*apps.StatefulSet
|
||||
expectedList framework.NodeScoreList
|
||||
name string
|
||||
}{
|
||||
{
|
||||
pod: new(v1.Pod),
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
name: "nothing scheduled",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{{Spec: zone1Spec}},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
name: "no services",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}}},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
name: "different services",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}},
|
||||
name: "two pods, one service pod",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}},
|
||||
name: "five pods, one service pod in no namespace",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: metav1.NamespaceDefault}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}},
|
||||
name: "four pods, one service pod in default namespace",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: metav1.NamespaceDefault}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns2"}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: metav1.ObjectMeta{Namespace: "ns1"}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: 0}},
|
||||
name: "five pods, one service pod in specific namespace",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "three pods, two service pods on different machines",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 50}, {Name: "machine2", Score: 0}},
|
||||
name: "four pods, three service pods",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 50}},
|
||||
name: "service with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
// "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to
|
||||
// do spreading pod2 and pod3 and not pod1.
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "service with partial pod label matches with service and replication controller",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
rss: []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "service with partial pod label matches with service and replica set",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "service with partial pod label matches with service and stateful set",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
||||
// Taken together Service and Replication Controller should match no pods.
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
name: "disjoined service and replication controller matches no pods",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
||||
rss: []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
name: "disjoined service and replica set matches no pods",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
||||
sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: framework.MaxNodeScore}, {Name: "machine2", Score: framework.MaxNodeScore}},
|
||||
name: "disjoined service and stateful set matches no pods",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
||||
// Both Nodes have one pod from the given RC, hence both get 0 score.
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "Replication controller with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
rss: []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "Replica set with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||
// We use StatefulSet, instead of ReplicationController. The result should be exactly as above.
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "StatefulSet with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 50}},
|
||||
name: "Another replication controller with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
rss: []*apps.ReplicaSet{{Spec: apps.ReplicaSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
|
||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 50}},
|
||||
name: "Another replication set with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
|
||||
// We use StatefulSet, instead of ReplicationController. The result should be exactly as above.
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 50}},
|
||||
name: "Another stateful set with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels1,
|
||||
OwnerReferences: controllerRef("StatefulSet", "name", "abc123"),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
TopologySpreadConstraints: []v1.TopologySpreadConstraint{
|
||||
{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "foo",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||
{Spec: zone1Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: metav1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("StatefulSet", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
sss: []*apps.StatefulSet{{Spec: apps.StatefulSetSpec{Selector: &metav1.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
|
||||
expectedList: []framework.NodeScore{{Name: "machine1", Score: 0}, {Name: "machine2", Score: 0}},
|
||||
name: "Another stateful set with TopologySpreadConstraints set in pod",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodes := makeNodeList(test.nodes)
|
||||
snapshot := cache.NewSnapshot(test.pods, nodes)
|
||||
ctx := context.Background()
|
||||
informerFactory, err := populateAndStartInformers(ctx, test.rcs, test.rss, test.services, test.sss)
|
||||
if err != nil {
|
||||
t.Errorf("error creating informerFactory: %+v", err)
|
||||
}
|
||||
fh, err := frameworkruntime.NewFramework(nil, nil, nil, frameworkruntime.WithSnapshotSharedLister(snapshot), frameworkruntime.WithInformerFactory(informerFactory))
|
||||
if err != nil {
|
||||
t.Errorf("error creating new framework handle: %+v", err)
|
||||
}
|
||||
|
||||
state := framework.NewCycleState()
|
||||
|
||||
plugin := &SelectorSpread{
|
||||
handle: fh,
|
||||
}
|
||||
|
||||
status := plugin.PreScore(ctx, state, test.pod, nodes)
|
||||
if !status.IsSuccess() {
|
||||
t.Fatalf("unexpected error: %v", status)
|
||||
}
|
||||
|
||||
var gotList framework.NodeScoreList
|
||||
for _, nodeName := range test.nodes {
|
||||
score, status := plugin.Score(ctx, state, test.pod, nodeName)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
}
|
||||
gotList = append(gotList, framework.NodeScore{Name: nodeName, Score: score})
|
||||
}
|
||||
|
||||
status = plugin.ScoreExtensions().NormalizeScore(ctx, state, test.pod, gotList)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(test.expectedList, gotList) {
|
||||
t.Errorf("expected:\n\t%+v,\ngot:\n\t%+v", test.expectedList, gotList)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func buildPod(nodeName string, labels map[string]string, ownerRefs []metav1.OwnerReference) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Labels: labels, OwnerReferences: ownerRefs},
|
||||
Spec: v1.PodSpec{NodeName: nodeName},
|
||||
}
|
||||
}
|
||||
|
||||
func TestZoneSelectorSpreadPriority(t *testing.T) {
|
||||
labels1 := map[string]string{
|
||||
"label1": "l1",
|
||||
"baz": "blah",
|
||||
}
|
||||
labels2 := map[string]string{
|
||||
"label2": "l2",
|
||||
"baz": "blah",
|
||||
}
|
||||
|
||||
const nodeMachine1Zone1 = "machine1.zone1"
|
||||
const nodeMachine1Zone2 = "machine1.zone2"
|
||||
const nodeMachine2Zone2 = "machine2.zone2"
|
||||
const nodeMachine1Zone3 = "machine1.zone3"
|
||||
const nodeMachine2Zone3 = "machine2.zone3"
|
||||
const nodeMachine3Zone3 = "machine3.zone3"
|
||||
|
||||
buildNodeLabels := func(failureDomain string) map[string]string {
|
||||
labels := map[string]string{
|
||||
v1.LabelZoneFailureDomain: failureDomain,
|
||||
}
|
||||
return labels
|
||||
}
|
||||
labeledNodes := map[string]map[string]string{
|
||||
nodeMachine1Zone1: buildNodeLabels("zone1"),
|
||||
nodeMachine1Zone2: buildNodeLabels("zone2"),
|
||||
nodeMachine2Zone2: buildNodeLabels("zone2"),
|
||||
nodeMachine1Zone3: buildNodeLabels("zone3"),
|
||||
nodeMachine2Zone3: buildNodeLabels("zone3"),
|
||||
nodeMachine3Zone3: buildNodeLabels("zone3"),
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
rcs []*v1.ReplicationController
|
||||
rss []*apps.ReplicaSet
|
||||
services []*v1.Service
|
||||
sss []*apps.StatefulSet
|
||||
expectedList framework.NodeScoreList
|
||||
name string
|
||||
}{
|
||||
{
|
||||
pod: new(v1.Pod),
|
||||
expectedList: []framework.NodeScore{
|
||||
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine1Zone2, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine2Zone2, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine1Zone3, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine2Zone3, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine3Zone3, Score: framework.MaxNodeScore},
|
||||
},
|
||||
name: "nothing scheduled",
|
||||
},
|
||||
{
|
||||
pod: buildPod("", labels1, nil),
|
||||
pods: []*v1.Pod{buildPod(nodeMachine1Zone1, nil, nil)},
|
||||
expectedList: []framework.NodeScore{
|
||||
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine1Zone2, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine2Zone2, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine1Zone3, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine2Zone3, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine3Zone3, Score: framework.MaxNodeScore},
|
||||
},
|
||||
name: "no services",
|
||||
},
|
||||
{
|
||||
pod: buildPod("", labels1, nil),
|
||||
pods: []*v1.Pod{buildPod(nodeMachine1Zone1, labels2, nil)},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||
expectedList: []framework.NodeScore{
|
||||
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine1Zone2, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine2Zone2, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine1Zone3, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine2Zone3, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine3Zone3, Score: framework.MaxNodeScore},
|
||||
},
|
||||
name: "different services",
|
||||
},
|
||||
{
|
||||
pod: buildPod("", labels1, nil),
|
||||
pods: []*v1.Pod{
|
||||
buildPod(nodeMachine1Zone1, labels2, nil),
|
||||
buildPod(nodeMachine1Zone2, labels2, nil),
|
||||
},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{
|
||||
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine1Zone2, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine2Zone2, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine1Zone3, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine2Zone3, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine3Zone3, Score: framework.MaxNodeScore},
|
||||
},
|
||||
name: "two pods, 0 matching",
|
||||
},
|
||||
{
|
||||
pod: buildPod("", labels1, nil),
|
||||
pods: []*v1.Pod{
|
||||
buildPod(nodeMachine1Zone1, labels2, nil),
|
||||
buildPod(nodeMachine1Zone2, labels1, nil),
|
||||
},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{
|
||||
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine1Zone2, Score: 0}, // Already have pod on machine
|
||||
{Name: nodeMachine2Zone2, Score: 33}, // Already have pod in zone
|
||||
{Name: nodeMachine1Zone3, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine2Zone3, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine3Zone3, Score: framework.MaxNodeScore},
|
||||
},
|
||||
name: "two pods, 1 matching (in z2)",
|
||||
},
|
||||
{
|
||||
pod: buildPod("", labels1, nil),
|
||||
pods: []*v1.Pod{
|
||||
buildPod(nodeMachine1Zone1, labels2, nil),
|
||||
buildPod(nodeMachine1Zone2, labels1, nil),
|
||||
buildPod(nodeMachine2Zone2, labels1, nil),
|
||||
buildPod(nodeMachine1Zone3, labels2, nil),
|
||||
buildPod(nodeMachine2Zone3, labels1, nil),
|
||||
},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{
|
||||
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore},
|
||||
{Name: nodeMachine1Zone2, Score: 0}, // Pod on node
|
||||
{Name: nodeMachine2Zone2, Score: 0}, // Pod on node
|
||||
{Name: nodeMachine1Zone3, Score: 66}, // Pod in zone
|
||||
{Name: nodeMachine2Zone3, Score: 33}, // Pod on node
|
||||
{Name: nodeMachine3Zone3, Score: 66}, // Pod in zone
|
||||
},
|
||||
name: "five pods, 3 matching (z2=2, z3=1)",
|
||||
},
|
||||
{
|
||||
pod: buildPod("", labels1, nil),
|
||||
pods: []*v1.Pod{
|
||||
buildPod(nodeMachine1Zone1, labels1, nil),
|
||||
buildPod(nodeMachine1Zone2, labels1, nil),
|
||||
buildPod(nodeMachine2Zone2, labels2, nil),
|
||||
buildPod(nodeMachine1Zone3, labels1, nil),
|
||||
},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{
|
||||
{Name: nodeMachine1Zone1, Score: 0}, // Pod on node
|
||||
{Name: nodeMachine1Zone2, Score: 0}, // Pod on node
|
||||
{Name: nodeMachine2Zone2, Score: 33}, // Pod in zone
|
||||
{Name: nodeMachine1Zone3, Score: 0}, // Pod on node
|
||||
{Name: nodeMachine2Zone3, Score: 33}, // Pod in zone
|
||||
{Name: nodeMachine3Zone3, Score: 33}, // Pod in zone
|
||||
},
|
||||
name: "four pods, 3 matching (z1=1, z2=1, z3=1)",
|
||||
},
|
||||
{
|
||||
pod: buildPod("", labels1, nil),
|
||||
pods: []*v1.Pod{
|
||||
buildPod(nodeMachine1Zone1, labels1, nil),
|
||||
buildPod(nodeMachine1Zone2, labels1, nil),
|
||||
buildPod(nodeMachine2Zone2, labels1, nil),
|
||||
buildPod(nodeMachine2Zone2, labels2, nil),
|
||||
buildPod(nodeMachine1Zone3, labels1, nil),
|
||||
},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{
|
||||
{Name: nodeMachine1Zone1, Score: 33}, // Pod on node
|
||||
{Name: nodeMachine1Zone2, Score: 0}, // Pod on node
|
||||
{Name: nodeMachine2Zone2, Score: 0}, // Pod in zone
|
||||
{Name: nodeMachine1Zone3, Score: 33}, // Pod on node
|
||||
{Name: nodeMachine2Zone3, Score: 66}, // Pod in zone
|
||||
{Name: nodeMachine3Zone3, Score: 66}, // Pod in zone
|
||||
},
|
||||
name: "five pods, 4 matching (z1=1, z2=2, z3=1)",
|
||||
},
|
||||
{
|
||||
pod: buildPod("", labels1, controllerRef("ReplicationController", "name", "abc123")),
|
||||
pods: []*v1.Pod{
|
||||
buildPod(nodeMachine1Zone3, labels1, controllerRef("ReplicationController", "name", "abc123")),
|
||||
buildPod(nodeMachine1Zone2, labels1, controllerRef("ReplicationController", "name", "abc123")),
|
||||
buildPod(nodeMachine1Zone3, labels1, controllerRef("ReplicationController", "name", "abc123")),
|
||||
},
|
||||
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: labels1}}},
|
||||
expectedList: []framework.NodeScore{
|
||||
// Note that because we put two pods on the same node (nodeMachine1Zone3),
|
||||
// the values here are questionable for zone2, in particular for nodeMachine1Zone2.
|
||||
// However they kind of make sense; zone1 is still most-highly favored.
|
||||
// zone3 is in general least favored, and m1.z3 particularly low priority.
|
||||
// We would probably prefer to see a bigger gap between putting a second
|
||||
// pod on m1.z2 and putting a pod on m2.z2, but the ordering is correct.
|
||||
// This is also consistent with what we have already.
|
||||
{Name: nodeMachine1Zone1, Score: framework.MaxNodeScore}, // No pods in zone
|
||||
{Name: nodeMachine1Zone2, Score: 50}, // Pod on node
|
||||
{Name: nodeMachine2Zone2, Score: 66}, // Pod in zone
|
||||
{Name: nodeMachine1Zone3, Score: 0}, // Two pods on node
|
||||
{Name: nodeMachine2Zone3, Score: 33}, // Pod in zone
|
||||
{Name: nodeMachine3Zone3, Score: 33}, // Pod in zone
|
||||
},
|
||||
name: "Replication controller spreading (z1=0, z2=1, z3=2)",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodes := makeLabeledNodeList(labeledNodes)
|
||||
snapshot := cache.NewSnapshot(test.pods, nodes)
|
||||
ctx := context.Background()
|
||||
informerFactory, err := populateAndStartInformers(ctx, test.rcs, test.rss, test.services, test.sss)
|
||||
if err != nil {
|
||||
t.Errorf("error creating informerFactory: %+v", err)
|
||||
}
|
||||
fh, err := frameworkruntime.NewFramework(nil, nil, nil, frameworkruntime.WithSnapshotSharedLister(snapshot), frameworkruntime.WithInformerFactory(informerFactory))
|
||||
if err != nil {
|
||||
t.Errorf("error creating new framework handle: %+v", err)
|
||||
}
|
||||
|
||||
plugin := &SelectorSpread{
|
||||
handle: fh,
|
||||
}
|
||||
|
||||
state := framework.NewCycleState()
|
||||
status := plugin.PreScore(ctx, state, test.pod, nodes)
|
||||
if !status.IsSuccess() {
|
||||
t.Fatalf("unexpected error: %v", status)
|
||||
}
|
||||
|
||||
var gotList framework.NodeScoreList
|
||||
for _, n := range nodes {
|
||||
nodeName := n.ObjectMeta.Name
|
||||
score, status := plugin.Score(ctx, state, test.pod, nodeName)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
}
|
||||
gotList = append(gotList, framework.NodeScore{Name: nodeName, Score: score})
|
||||
}
|
||||
|
||||
status = plugin.ScoreExtensions().NormalizeScore(ctx, state, test.pod, gotList)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
}
|
||||
|
||||
sortNodeScoreList(test.expectedList)
|
||||
sortNodeScoreList(gotList)
|
||||
if !reflect.DeepEqual(test.expectedList, gotList) {
|
||||
t.Errorf("expected:\n\t%+v,\ngot:\n\t%+v", test.expectedList, gotList)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func populateAndStartInformers(ctx context.Context, rcs []*v1.ReplicationController, rss []*apps.ReplicaSet, services []*v1.Service, sss []*apps.StatefulSet) (informers.SharedInformerFactory, error) {
|
||||
objects := make([]runtime.Object, 0, len(rcs)+len(rss)+len(services)+len(sss))
|
||||
for _, rc := range rcs {
|
||||
objects = append(objects, rc.DeepCopyObject())
|
||||
}
|
||||
for _, rs := range rss {
|
||||
objects = append(objects, rs.DeepCopyObject())
|
||||
}
|
||||
for _, service := range services {
|
||||
objects = append(objects, service.DeepCopyObject())
|
||||
}
|
||||
for _, ss := range sss {
|
||||
objects = append(objects, ss.DeepCopyObject())
|
||||
}
|
||||
client := clientsetfake.NewSimpleClientset(objects...)
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
|
||||
// Because we use an informer factory, we need to make requests for the specific informers we want before calling Start()
|
||||
_ = informerFactory.Core().V1().Services().Lister()
|
||||
_ = informerFactory.Core().V1().ReplicationControllers().Lister()
|
||||
_ = informerFactory.Apps().V1().ReplicaSets().Lister()
|
||||
_ = informerFactory.Apps().V1().StatefulSets().Lister()
|
||||
informerFactory.Start(ctx.Done())
|
||||
caches := informerFactory.WaitForCacheSync(ctx.Done())
|
||||
for _, synced := range caches {
|
||||
if !synced {
|
||||
return nil, fmt.Errorf("error waiting for informer cache sync")
|
||||
}
|
||||
}
|
||||
return informerFactory, nil
|
||||
}
|
||||
|
||||
func makeLabeledNodeList(nodeMap map[string]map[string]string) []*v1.Node {
|
||||
nodes := make([]*v1.Node, 0, len(nodeMap))
|
||||
for nodeName, labels := range nodeMap {
|
||||
nodes = append(nodes, &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName, Labels: labels}})
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
func makeNodeList(nodeNames []string) []*v1.Node {
|
||||
nodes := make([]*v1.Node, 0, len(nodeNames))
|
||||
for _, nodeName := range nodeNames {
|
||||
nodes = append(nodes, &v1.Node{ObjectMeta: metav1.ObjectMeta{Name: nodeName}})
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
func sortNodeScoreList(out framework.NodeScoreList) {
|
||||
sort.Slice(out, func(i, j int) bool {
|
||||
if out[i].Score == out[j].Score {
|
||||
return out[i].Name < out[j].Name
|
||||
}
|
||||
return out[i].Score < out[j].Score
|
||||
})
|
||||
}
|
Reference in New Issue
Block a user