Move service affinity predicate logic to its plugin.
This commit is contained in:
@@ -223,6 +223,7 @@ func NewDefaultConfigProducerRegistry() *ConfigProducerRegistry {
|
||||
func(args ConfigProducerArgs) (plugins config.Plugins, pluginConfig []config.PluginConfig) {
|
||||
plugins.Filter = appendToPluginSet(plugins.Filter, serviceaffinity.Name, nil)
|
||||
pluginConfig = append(pluginConfig, makePluginConfig(serviceaffinity.Name, args.ServiceAffinityArgs))
|
||||
plugins.PreFilter = appendToPluginSet(plugins.PreFilter, serviceaffinity.Name, nil)
|
||||
return
|
||||
})
|
||||
|
||||
|
||||
@@ -10,9 +10,13 @@ go_library(
|
||||
"//pkg/scheduler/algorithm/priorities:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/listers:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -21,7 +25,6 @@ go_test(
|
||||
srcs = ["service_affinity_test.go"],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
|
||||
@@ -20,17 +20,27 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/klog"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
)
|
||||
|
||||
// Name is the name of the plugin used in the plugin registry and configurations.
|
||||
const Name = "ServiceAffinity"
|
||||
const (
|
||||
// Name is the name of the plugin used in the plugin registry and configurations.
|
||||
Name = "ServiceAffinity"
|
||||
|
||||
// preFilterStateKey is the key in CycleState to InterPodAffinity pre-computed data.
|
||||
// Using the name of the plugin will likely help us avoid collisions with other plugins.
|
||||
preFilterStateKey = "PreFilter" + Name
|
||||
)
|
||||
|
||||
// Args holds the args that are used to configure the plugin.
|
||||
type Args struct {
|
||||
@@ -42,39 +52,58 @@ type Args struct {
|
||||
AntiAffinityLabelsPreference []string `json:"antiAffinityLabelsPreference,omitempty"`
|
||||
}
|
||||
|
||||
// preFilterState computed at PreFilter and used at Filter.
|
||||
type preFilterState struct {
|
||||
matchingPodList []*v1.Pod
|
||||
matchingPodServices []*v1.Service
|
||||
}
|
||||
|
||||
// Clone the prefilter state.
|
||||
func (s *preFilterState) Clone() framework.StateData {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
copy := preFilterState{}
|
||||
copy.matchingPodServices = append([]*v1.Service(nil),
|
||||
s.matchingPodServices...)
|
||||
copy.matchingPodList = append([]*v1.Pod(nil),
|
||||
s.matchingPodList...)
|
||||
|
||||
return ©
|
||||
}
|
||||
|
||||
// New initializes a new plugin and returns it.
|
||||
func New(plArgs *runtime.Unknown, handle framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
args := &Args{}
|
||||
if err := framework.DecodeInto(plArgs, args); err != nil {
|
||||
args := Args{}
|
||||
if err := framework.DecodeInto(plArgs, &args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
informerFactory := handle.SharedInformerFactory()
|
||||
nodeInfoLister := handle.SnapshotSharedLister().NodeInfos()
|
||||
podLister := handle.SnapshotSharedLister().Pods()
|
||||
serviceLister := informerFactory.Core().V1().Services().Lister()
|
||||
|
||||
fitPredicate, predicateMetadataProducer := predicates.NewServiceAffinityPredicate(nodeInfoLister, podLister, serviceLister, args.AffinityLabels)
|
||||
// Once we generate the predicate we should also Register the Precomputation
|
||||
predicates.RegisterPredicateMetadataProducer(predicates.CheckServiceAffinityPred, predicateMetadataProducer)
|
||||
|
||||
priorityMapFunction, priorityReduceFunction := priorities.NewServiceAntiAffinityPriority(podLister, serviceLister, args.AntiAffinityLabelsPreference)
|
||||
|
||||
return &ServiceAffinity{
|
||||
handle: handle,
|
||||
predicate: fitPredicate,
|
||||
sharedLister: handle.SnapshotSharedLister(),
|
||||
serviceLister: serviceLister,
|
||||
priorityMapFunction: priorityMapFunction,
|
||||
priorityReduceFunction: priorityReduceFunction,
|
||||
args: args,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ServiceAffinity is a plugin that checks service affinity.
|
||||
type ServiceAffinity struct {
|
||||
handle framework.FrameworkHandle
|
||||
predicate predicates.FitPredicate
|
||||
args Args
|
||||
sharedLister schedulerlisters.SharedLister
|
||||
serviceLister corelisters.ServiceLister
|
||||
priorityMapFunction priorities.PriorityMapFunction
|
||||
priorityReduceFunction priorities.PriorityReduceFunction
|
||||
}
|
||||
|
||||
var _ framework.PreFilterPlugin = &ServiceAffinity{}
|
||||
var _ framework.FilterPlugin = &ServiceAffinity{}
|
||||
var _ framework.ScorePlugin = &ServiceAffinity{}
|
||||
|
||||
@@ -83,19 +112,184 @@ func (pl *ServiceAffinity) Name() string {
|
||||
return Name
|
||||
}
|
||||
|
||||
// Filter invoked at the filter extension point.
|
||||
func (pl *ServiceAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
meta, ok := migration.CovertStateRefToPredMeta(migration.PredicateMetadata(cycleState))
|
||||
if !ok {
|
||||
return framework.NewStatus(framework.Error, "looking up Metadata")
|
||||
func (pl *ServiceAffinity) createPreFilterState(pod *v1.Pod) (*preFilterState, error) {
|
||||
if pod == nil {
|
||||
return nil, fmt.Errorf("a pod is required to calculate service affinity preFilterState")
|
||||
}
|
||||
_, reasons, err := pl.predicate(pod, meta, nodeInfo)
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
// Store services which match the pod.
|
||||
matchingPodServices, err := schedulerlisters.GetPodServices(pl.serviceLister, pod)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing pod services: %v", err.Error())
|
||||
}
|
||||
selector := predicates.CreateSelectorFromLabels(pod.Labels)
|
||||
allMatches, err := pl.sharedLister.Pods().List(selector)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listing pods: %v", err.Error())
|
||||
}
|
||||
|
||||
// consider only the pods that belong to the same namespace
|
||||
matchingPodList := predicates.FilterPodsByNamespace(allMatches, pod.Namespace)
|
||||
|
||||
return &preFilterState{
|
||||
matchingPodList: matchingPodList,
|
||||
matchingPodServices: matchingPodServices,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// PreFilter invoked at the prefilter extension point.
|
||||
func (pl *ServiceAffinity) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) *framework.Status {
|
||||
s, err := pl.createPreFilterState(pod)
|
||||
if err != nil {
|
||||
return framework.NewStatus(framework.Error, fmt.Sprintf("could not create preFilterState: %v", err))
|
||||
|
||||
}
|
||||
cycleState.Write(preFilterStateKey, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
// PreFilterExtensions returns prefilter extensions, pod add and remove.
|
||||
func (pl *ServiceAffinity) PreFilterExtensions() framework.PreFilterExtensions {
|
||||
return pl
|
||||
}
|
||||
|
||||
// AddPod from pre-computed data in cycleState.
|
||||
func (pl *ServiceAffinity) AddPod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToAdd *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
s, err := getPreFilterState(cycleState)
|
||||
if err != nil {
|
||||
return framework.NewStatus(framework.Error, err.Error())
|
||||
}
|
||||
|
||||
// If addedPod is in the same namespace as the pod, update the list
|
||||
// of matching pods if applicable.
|
||||
if s == nil || podToAdd.Namespace != podToSchedule.Namespace {
|
||||
return nil
|
||||
}
|
||||
|
||||
selector := predicates.CreateSelectorFromLabels(podToSchedule.Labels)
|
||||
if selector.Matches(labels.Set(podToAdd.Labels)) {
|
||||
s.matchingPodList = append(s.matchingPodList, podToAdd)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemovePod from pre-computed data in cycleState.
|
||||
func (pl *ServiceAffinity) RemovePod(ctx context.Context, cycleState *framework.CycleState, podToSchedule *v1.Pod, podToRemove *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
s, err := getPreFilterState(cycleState)
|
||||
if err != nil {
|
||||
return framework.NewStatus(framework.Error, err.Error())
|
||||
}
|
||||
|
||||
if s == nil ||
|
||||
len(s.matchingPodList) == 0 ||
|
||||
podToRemove.Namespace != s.matchingPodList[0].Namespace {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i, pod := range s.matchingPodList {
|
||||
if pod.Name == podToRemove.Name && pod.Namespace == podToRemove.Namespace {
|
||||
s.matchingPodList = append(s.matchingPodList[:i], s.matchingPodList[i+1:]...)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPreFilterState(cycleState *framework.CycleState) (*preFilterState, error) {
|
||||
c, err := cycleState.Read(preFilterStateKey)
|
||||
if err != nil {
|
||||
// The metadata wasn't pre-computed in prefilter. We ignore the error for now since
|
||||
// Filter is able to handle that by computing it again.
|
||||
klog.Error(fmt.Sprintf("reading %q from cycleState: %v", preFilterStateKey, err))
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
if c == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
s, ok := c.(*preFilterState)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%+v convert to interpodaffinity.state error", c)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Filter matches nodes in such a way to force that
|
||||
// ServiceAffinity.labels are homogeneous for pods that are scheduled to a node.
|
||||
// (i.e. it returns true IFF this pod can be added to this node such that all other pods in
|
||||
// the same service are running on nodes with the exact same ServiceAffinity.label values).
|
||||
//
|
||||
// For example:
|
||||
// If the first pod of a service was scheduled to a node with label "region=foo",
|
||||
// all the other subsequent pods belong to the same service will be schedule on
|
||||
// nodes with the same "region=foo" label.
|
||||
//
|
||||
// Details:
|
||||
//
|
||||
// If (the svc affinity labels are not a subset of pod's label selectors )
|
||||
// The pod has all information necessary to check affinity, the pod's label selector is sufficient to calculate
|
||||
// the match.
|
||||
// Otherwise:
|
||||
// Create an "implicit selector" which guarantees pods will land on nodes with similar values
|
||||
// for the affinity labels.
|
||||
//
|
||||
// To do this, we "reverse engineer" a selector by introspecting existing pods running under the same service+namespace.
|
||||
// These backfilled labels in the selector "L" are defined like so:
|
||||
// - L is a label that the ServiceAffinity object needs as a matching constraint.
|
||||
// - L is not defined in the pod itself already.
|
||||
// - and SOME pod, from a service, in the same namespace, ALREADY scheduled onto a node, has a matching value.
|
||||
func (pl *ServiceAffinity) Filter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
|
||||
if len(pl.args.AffinityLabels) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return framework.NewStatus(framework.Error, "node not found")
|
||||
}
|
||||
|
||||
s, err := getPreFilterState(cycleState)
|
||||
if err != nil {
|
||||
return framework.NewStatus(framework.Error, err.Error())
|
||||
}
|
||||
if s == nil {
|
||||
// Make the filter resilient in case preFilterState is missing.
|
||||
s, err = pl.createPreFilterState(pod)
|
||||
if err != nil {
|
||||
return framework.NewStatus(framework.Error, fmt.Sprintf("could not create preFilterState: %v", err))
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
pods, services := s.matchingPodList, s.matchingPodServices
|
||||
filteredPods := nodeInfo.FilterOutPods(pods)
|
||||
// check if the pod being scheduled has the affinity labels specified in its NodeSelector
|
||||
affinityLabels := predicates.FindLabelsInSet(pl.args.AffinityLabels, labels.Set(pod.Spec.NodeSelector))
|
||||
// Step 1: If we don't have all constraints, introspect nodes to find the missing constraints.
|
||||
if len(pl.args.AffinityLabels) > len(affinityLabels) {
|
||||
if len(services) > 0 {
|
||||
if len(filteredPods) > 0 {
|
||||
nodeWithAffinityLabels, err := pl.sharedLister.NodeInfos().Get(filteredPods[0].Spec.NodeName)
|
||||
if err != nil {
|
||||
return framework.NewStatus(framework.Error, "node not found")
|
||||
}
|
||||
predicates.AddUnsetLabelsToMap(affinityLabels, pl.args.AffinityLabels, labels.Set(nodeWithAffinityLabels.Node().Labels))
|
||||
}
|
||||
}
|
||||
}
|
||||
// Step 2: Finally complete the affinity predicate based on whatever set of predicates we were able to find.
|
||||
if predicates.CreateSelectorFromLabels(affinityLabels).Matches(labels.Set(node.Labels)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return migration.PredicateResultToFrameworkStatus([]predicates.PredicateFailureReason{predicates.ErrServiceAffinityViolated}, nil)
|
||||
}
|
||||
|
||||
// Score invoked at the Score extension point.
|
||||
func (pl *ServiceAffinity) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) {
|
||||
nodeInfo, err := pl.handle.SnapshotSharedLister().NodeInfos().Get(nodeName)
|
||||
nodeInfo, err := pl.sharedLister.NodeInfos().Get(nodeName)
|
||||
if err != nil {
|
||||
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err))
|
||||
}
|
||||
@@ -107,7 +301,7 @@ func (pl *ServiceAffinity) Score(ctx context.Context, state *framework.CycleStat
|
||||
// NormalizeScore invoked after scoring all nodes.
|
||||
func (pl *ServiceAffinity) NormalizeScore(ctx context.Context, _ *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
|
||||
// Note that priorityReduceFunction doesn't use priority metadata, hence passing nil here.
|
||||
err := pl.priorityReduceFunction(pod, nil, pl.handle.SnapshotSharedLister(), scores)
|
||||
err := pl.priorityReduceFunction(pod, nil, pl.sharedLister, scores)
|
||||
return migration.ErrorToFrameworkStatus(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
@@ -165,18 +164,18 @@ func TestServiceAffinity(t *testing.T) {
|
||||
nodes := []*v1.Node{&node1, &node2, &node3, &node4, &node5}
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes))
|
||||
|
||||
predicate, precompute := predicates.NewServiceAffinityPredicate(snapshot.NodeInfos(), snapshot.Pods(), fakelisters.ServiceLister(test.services), test.labels)
|
||||
predicates.RegisterPredicateMetadataProducer("ServiceAffinityMetaProducer", precompute)
|
||||
|
||||
p := &ServiceAffinity{
|
||||
predicate: predicate,
|
||||
sharedLister: snapshot,
|
||||
serviceLister: fakelisters.ServiceLister(test.services),
|
||||
args: Args{
|
||||
AffinityLabels: test.labels,
|
||||
},
|
||||
}
|
||||
|
||||
factory := &predicates.MetadataProducerFactory{}
|
||||
meta := factory.GetPredicateMetadata(test.pod, snapshot)
|
||||
state := framework.NewCycleState()
|
||||
state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta})
|
||||
|
||||
if s := p.PreFilter(context.Background(), state, test.pod); !s.IsSuccess() {
|
||||
t.Errorf("PreFilter failed: %v", s.Message())
|
||||
}
|
||||
status := p.Filter(context.Background(), state, test.pod, snapshot.NodeInfoMap[test.node.Name])
|
||||
if status.Code() != test.res {
|
||||
t.Errorf("Status mismatch. got: %v, want: %v", status.Code(), test.res)
|
||||
@@ -391,12 +390,12 @@ func TestServiceAffinityScore(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
nodes := makeLabeledNodeList(test.nodes)
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, nodes))
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
serviceLister := fakelisters.ServiceLister(test.services)
|
||||
priorityMapFunction, priorityReduceFunction := priorities.NewServiceAntiAffinityPriority(snapshot.Pods(), serviceLister, test.labels)
|
||||
|
||||
p := &ServiceAffinity{
|
||||
handle: fh,
|
||||
sharedLister: snapshot,
|
||||
serviceLister: serviceLister,
|
||||
priorityMapFunction: priorityMapFunction,
|
||||
priorityReduceFunction: priorityReduceFunction,
|
||||
}
|
||||
@@ -434,6 +433,160 @@ func TestServiceAffinityScore(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreFilterStateAddRemovePod(t *testing.T) {
|
||||
var label1 = map[string]string{
|
||||
"region": "r1",
|
||||
"zone": "z11",
|
||||
}
|
||||
var label2 = map[string]string{
|
||||
"region": "r1",
|
||||
"zone": "z12",
|
||||
}
|
||||
var label3 = map[string]string{
|
||||
"region": "r2",
|
||||
"zone": "z21",
|
||||
}
|
||||
selector1 := map[string]string{"foo": "bar"}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pendingPod *v1.Pod
|
||||
addedPod *v1.Pod
|
||||
existingPods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
services []*v1.Service
|
||||
}{
|
||||
{
|
||||
name: "no anti-affinity or service affinity exist",
|
||||
pendingPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pending", Labels: selector1},
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeA"},
|
||||
},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "p2"},
|
||||
Spec: v1.PodSpec{NodeName: "nodeC"},
|
||||
},
|
||||
},
|
||||
addedPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "addedPod", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeB"},
|
||||
},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: label1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: label3}},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "metadata service-affinity data are updated correctly after adding and removing a pod",
|
||||
pendingPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "pending", Labels: selector1},
|
||||
},
|
||||
existingPods: []*v1.Pod{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeA"},
|
||||
},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "p2"},
|
||||
Spec: v1.PodSpec{NodeName: "nodeC"},
|
||||
},
|
||||
},
|
||||
addedPod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "addedPod", Labels: selector1},
|
||||
Spec: v1.PodSpec{NodeName: "nodeB"},
|
||||
},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector1}}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: label1}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: label2}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: label3}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
// getMeta creates predicate meta data given the list of pods.
|
||||
getState := func(pods []*v1.Pod) (*ServiceAffinity, *framework.CycleState, *preFilterState, *nodeinfosnapshot.Snapshot) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(pods, test.nodes))
|
||||
|
||||
p := &ServiceAffinity{
|
||||
sharedLister: snapshot,
|
||||
serviceLister: fakelisters.ServiceLister(test.services),
|
||||
}
|
||||
cycleState := framework.NewCycleState()
|
||||
preFilterStatus := p.PreFilter(context.Background(), cycleState, test.pendingPod)
|
||||
if !preFilterStatus.IsSuccess() {
|
||||
t.Errorf("prefilter failed with status: %v", preFilterStatus)
|
||||
}
|
||||
|
||||
plState, err := getPreFilterState(cycleState)
|
||||
if err != nil {
|
||||
t.Errorf("failed to get metadata from cycleState: %v", err)
|
||||
}
|
||||
|
||||
return p, cycleState, plState, snapshot
|
||||
}
|
||||
|
||||
sortState := func(plState *preFilterState) *preFilterState {
|
||||
sort.SliceStable(plState.matchingPodList, func(i, j int) bool {
|
||||
return plState.matchingPodList[i].Name < plState.matchingPodList[j].Name
|
||||
})
|
||||
sort.SliceStable(plState.matchingPodServices, func(i, j int) bool {
|
||||
return plState.matchingPodServices[i].Name < plState.matchingPodServices[j].Name
|
||||
})
|
||||
return plState
|
||||
}
|
||||
|
||||
// allPodsState is the state produced when all pods, including test.addedPod are given to prefilter.
|
||||
_, _, plStateAllPods, _ := getState(append(test.existingPods, test.addedPod))
|
||||
|
||||
// state is produced for test.existingPods (without test.addedPod).
|
||||
ipa, state, plState, snapshot := getState(test.existingPods)
|
||||
// clone the state so that we can compare it later when performing Remove.
|
||||
plStateOriginal, _ := plState.Clone().(*preFilterState)
|
||||
|
||||
// Add test.addedPod to state1 and verify it is equal to allPodsState.
|
||||
if err := ipa.AddPod(context.Background(), state, test.pendingPod, test.addedPod, snapshot.NodeInfoMap[test.addedPod.Spec.NodeName]); err != nil {
|
||||
t.Errorf("error adding pod to preFilterState: %v", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(sortState(plStateAllPods), sortState(plState)) {
|
||||
t.Errorf("State is not equal, got: %v, want: %v", plState, plStateAllPods)
|
||||
}
|
||||
|
||||
// Remove the added pod pod and make sure it is equal to the original state.
|
||||
if err := ipa.RemovePod(context.Background(), state, test.pendingPod, test.addedPod, snapshot.NodeInfoMap[test.addedPod.Spec.NodeName]); err != nil {
|
||||
t.Errorf("error removing pod from preFilterState: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(sortState(plStateOriginal), sortState(plState)) {
|
||||
t.Errorf("State is not equal, got: %v, want: %v", plState, plStateOriginal)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreFilterStateClone(t *testing.T) {
|
||||
source := &preFilterState{
|
||||
matchingPodList: []*v1.Pod{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod1"}},
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "pod2"}},
|
||||
},
|
||||
matchingPodServices: []*v1.Service{
|
||||
{ObjectMeta: metav1.ObjectMeta{Name: "service1"}},
|
||||
},
|
||||
}
|
||||
|
||||
clone := source.Clone()
|
||||
if clone == source {
|
||||
t.Errorf("Clone returned the exact same object!")
|
||||
}
|
||||
if !reflect.DeepEqual(clone, source) {
|
||||
t.Errorf("Copy is not equal to source!")
|
||||
}
|
||||
}
|
||||
|
||||
func makeLabeledNodeList(nodeMap map[string]map[string]string) []*v1.Node {
|
||||
nodes := make([]*v1.Node, 0, len(nodeMap))
|
||||
for nodeName, labels := range nodeMap {
|
||||
|
||||
Reference in New Issue
Block a user