InterPodAffinity Priority as Score plugin
This commit is contained in:
@@ -7,13 +7,18 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/listers:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||
"//pkg/scheduler/util:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/util/workqueue:go_default_library",
|
||||
"//vendor/k8s.io/klog:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -24,14 +29,11 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||
"//pkg/scheduler/algorithm/priorities:go_default_library",
|
||||
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||
"//pkg/scheduler/nodeinfo/snapshot:go_default_library",
|
||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/informers:go_default_library",
|
||||
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
|
||||
"//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -19,36 +19,52 @@ package interpodaffinity
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
|
||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||
)
|
||||
|
||||
// InterPodAffinity is a plugin that checks inter pod affinity
|
||||
type InterPodAffinity struct {
|
||||
snapshotSharedLister schedulerlisters.SharedLister
|
||||
podAffinityChecker *predicates.PodAffinityChecker
|
||||
sharedLister schedulerlisters.SharedLister
|
||||
podAffinityChecker *predicates.PodAffinityChecker
|
||||
hardPodAffinityWeight int32
|
||||
sync.Mutex
|
||||
}
|
||||
|
||||
// Args holds the args that are used to configure the plugin.
|
||||
type Args struct {
|
||||
HardPodAffinityWeight int32 `json:"hardPodAffinityWeight,omitempty"`
|
||||
}
|
||||
|
||||
var _ framework.PreFilterPlugin = &InterPodAffinity{}
|
||||
var _ framework.FilterPlugin = &InterPodAffinity{}
|
||||
var _ framework.PostFilterPlugin = &InterPodAffinity{}
|
||||
var _ framework.ScorePlugin = &InterPodAffinity{}
|
||||
|
||||
const (
|
||||
// Name is the name of the plugin used in the plugin registry and configurations.
|
||||
Name = "InterPodAffinity"
|
||||
|
||||
// preFilterStateKey is the key in CycleState to InterPodAffinity pre-computed data.
|
||||
// preFilterStateKey is the key in CycleState to InterPodAffinity pre-computed data for Filtering.
|
||||
// Using the name of the plugin will likely help us avoid collisions with other plugins.
|
||||
preFilterStateKey = "PreFilter" + Name
|
||||
|
||||
// postFilterStateKey is the key in CycleState to InterPodAffinity pre-computed data for Scoring.
|
||||
postFilterStateKey = "PostFilter" + Name
|
||||
)
|
||||
|
||||
// preFilterState computed at PreFilter and used at Filter.
|
||||
@@ -75,10 +91,10 @@ func (pl *InterPodAffinity) PreFilter(ctx context.Context, cycleState *framework
|
||||
var allNodes []*nodeinfo.NodeInfo
|
||||
var havePodsWithAffinityNodes []*nodeinfo.NodeInfo
|
||||
var err error
|
||||
if allNodes, err = pl.snapshotSharedLister.NodeInfos().List(); err != nil {
|
||||
if allNodes, err = pl.sharedLister.NodeInfos().List(); err != nil {
|
||||
return framework.NewStatus(framework.Error, fmt.Sprintf("failed to list NodeInfos: %v", err))
|
||||
}
|
||||
if havePodsWithAffinityNodes, err = pl.snapshotSharedLister.NodeInfos().HavePodsWithAffinityList(); err != nil {
|
||||
if havePodsWithAffinityNodes, err = pl.sharedLister.NodeInfos().HavePodsWithAffinityList(); err != nil {
|
||||
return framework.NewStatus(framework.Error, fmt.Sprintf("failed to list NodeInfos with pods with affinity: %v", err))
|
||||
}
|
||||
if meta, err = predicates.GetPodAffinityMetadata(pod, allNodes, havePodsWithAffinityNodes); err != nil {
|
||||
@@ -143,25 +159,299 @@ func (pl *InterPodAffinity) Filter(ctx context.Context, cycleState *framework.Cy
|
||||
return migration.PredicateResultToFrameworkStatus(reasons, err)
|
||||
}
|
||||
|
||||
// A "processed" representation of v1.WeightedAffinityTerm.
|
||||
type weightedAffinityTerm struct {
|
||||
namespaces sets.String
|
||||
selector labels.Selector
|
||||
weight int32
|
||||
topologyKey string
|
||||
}
|
||||
|
||||
// postFilterState computed at PostFilter and used at Score.
|
||||
type postFilterState struct {
|
||||
topologyScore map[string]map[string]int64
|
||||
affinityTerms []*weightedAffinityTerm
|
||||
antiAffinityTerms []*weightedAffinityTerm
|
||||
}
|
||||
|
||||
// Clone implements the mandatory Clone interface. We don't really copy the data since
|
||||
// there is no need for that.
|
||||
func (s *postFilterState) Clone() framework.StateData {
|
||||
return s
|
||||
}
|
||||
|
||||
func newWeightedAffinityTerm(pod *v1.Pod, term *v1.PodAffinityTerm, weight int32) (*weightedAffinityTerm, error) {
|
||||
namespaces := priorityutil.GetNamespacesFromPodAffinityTerm(pod, term)
|
||||
selector, err := metav1.LabelSelectorAsSelector(term.LabelSelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &weightedAffinityTerm{namespaces: namespaces, selector: selector, topologyKey: term.TopologyKey, weight: weight}, nil
|
||||
}
|
||||
|
||||
func getProcessedTerms(pod *v1.Pod, terms []v1.WeightedPodAffinityTerm) ([]*weightedAffinityTerm, error) {
|
||||
if terms == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var processedTerms []*weightedAffinityTerm
|
||||
for i := range terms {
|
||||
p, err := newWeightedAffinityTerm(pod, &terms[i].PodAffinityTerm, terms[i].Weight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
processedTerms = append(processedTerms, p)
|
||||
}
|
||||
return processedTerms, nil
|
||||
}
|
||||
|
||||
func (pl *InterPodAffinity) processTerm(
|
||||
state *postFilterState,
|
||||
term *weightedAffinityTerm,
|
||||
podToCheck *v1.Pod,
|
||||
fixedNode *v1.Node,
|
||||
multiplier int,
|
||||
) {
|
||||
if len(fixedNode.Labels) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
match := priorityutil.PodMatchesTermsNamespaceAndSelector(podToCheck, term.namespaces, term.selector)
|
||||
tpValue, tpValueExist := fixedNode.Labels[term.topologyKey]
|
||||
if match && tpValueExist {
|
||||
pl.Lock()
|
||||
if state.topologyScore[term.topologyKey] == nil {
|
||||
state.topologyScore[term.topologyKey] = make(map[string]int64)
|
||||
}
|
||||
state.topologyScore[term.topologyKey][tpValue] += int64(term.weight * int32(multiplier))
|
||||
pl.Unlock()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (pl *InterPodAffinity) processTerms(state *postFilterState, terms []*weightedAffinityTerm, podToCheck *v1.Pod, fixedNode *v1.Node, multiplier int) error {
|
||||
for _, term := range terms {
|
||||
pl.processTerm(state, term, podToCheck, fixedNode, multiplier)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pl *InterPodAffinity) processExistingPod(state *postFilterState, existingPod *v1.Pod, existingPodNodeInfo *nodeinfo.NodeInfo, incomingPod *v1.Pod) error {
|
||||
existingPodAffinity := existingPod.Spec.Affinity
|
||||
existingHasAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAffinity != nil
|
||||
existingHasAntiAffinityConstraints := existingPodAffinity != nil && existingPodAffinity.PodAntiAffinity != nil
|
||||
existingPodNode := existingPodNodeInfo.Node()
|
||||
|
||||
// For every soft pod affinity term of <pod>, if <existingPod> matches the term,
|
||||
// increment <p.counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// value as that of <existingPods>`s node by the term`s weight.
|
||||
pl.processTerms(state, state.affinityTerms, existingPod, existingPodNode, 1)
|
||||
|
||||
// For every soft pod anti-affinity term of <pod>, if <existingPod> matches the term,
|
||||
// decrement <p.counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// value as that of <existingPod>`s node by the term`s weight.
|
||||
pl.processTerms(state, state.antiAffinityTerms, existingPod, existingPodNode, -1)
|
||||
|
||||
if existingHasAffinityConstraints {
|
||||
// For every hard pod affinity term of <existingPod>, if <pod> matches the term,
|
||||
// increment <p.counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// value as that of <existingPod>'s node by the constant <ipa.hardPodAffinityWeight>
|
||||
if pl.hardPodAffinityWeight > 0 {
|
||||
terms := existingPodAffinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
// TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution.
|
||||
//if len(existingPodAffinity.PodAffinity.RequiredDuringSchedulingRequiredDuringExecution) != 0 {
|
||||
// terms = append(terms, existingPodAffinity.PodAffinity.RequiredDuringSchedulingRequiredDuringExecution...)
|
||||
//}
|
||||
for i := range terms {
|
||||
term := &terms[i]
|
||||
processedTerm, err := newWeightedAffinityTerm(existingPod, term, pl.hardPodAffinityWeight)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pl.processTerm(state, processedTerm, incomingPod, existingPodNode, 1)
|
||||
}
|
||||
}
|
||||
// For every soft pod affinity term of <existingPod>, if <pod> matches the term,
|
||||
// increment <p.counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// value as that of <existingPod>'s node by the term's weight.
|
||||
terms, err := getProcessedTerms(existingPod, existingPodAffinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution)
|
||||
if err != nil {
|
||||
klog.Error(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
pl.processTerms(state, terms, incomingPod, existingPodNode, 1)
|
||||
}
|
||||
if existingHasAntiAffinityConstraints {
|
||||
// For every soft pod anti-affinity term of <existingPod>, if <pod> matches the term,
|
||||
// decrement <pm.counts> for every node in the cluster with the same <term.TopologyKey>
|
||||
// value as that of <existingPod>'s node by the term's weight.
|
||||
terms, err := getProcessedTerms(existingPod, existingPodAffinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pl.processTerms(state, terms, incomingPod, existingPodNode, -1)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PostFilter builds and writes cycle state used by Score and NormalizeScore.
|
||||
func (pl *InterPodAffinity) PostFilter(
|
||||
pCtx context.Context,
|
||||
cycleState *framework.CycleState,
|
||||
pod *v1.Pod,
|
||||
nodes []*v1.Node,
|
||||
_ framework.NodeToStatusMap,
|
||||
) *framework.Status {
|
||||
if len(nodes) == 0 {
|
||||
// No nodes to score.
|
||||
return nil
|
||||
}
|
||||
|
||||
if pl.sharedLister == nil {
|
||||
return framework.NewStatus(framework.Error, fmt.Sprintf("BuildTopologyPairToScore with empty shared lister"))
|
||||
}
|
||||
|
||||
affinity := pod.Spec.Affinity
|
||||
hasAffinityConstraints := affinity != nil && affinity.PodAffinity != nil
|
||||
hasAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil
|
||||
|
||||
// Unless the pod being scheduled has affinity terms, we only
|
||||
// need to process nodes hosting pods with affinity.
|
||||
allNodes, err := pl.sharedLister.NodeInfos().HavePodsWithAffinityList()
|
||||
if err != nil {
|
||||
framework.NewStatus(framework.Error, fmt.Sprintf("get pods with affinity list error, err: %v", err))
|
||||
}
|
||||
if hasAffinityConstraints || hasAntiAffinityConstraints {
|
||||
allNodes, err = pl.sharedLister.NodeInfos().List()
|
||||
if err != nil {
|
||||
framework.NewStatus(framework.Error, fmt.Sprintf("get all nodes from shared lister error, err: %v", err))
|
||||
}
|
||||
}
|
||||
|
||||
var affinityTerms []*weightedAffinityTerm
|
||||
var antiAffinityTerms []*weightedAffinityTerm
|
||||
if hasAffinityConstraints {
|
||||
if affinityTerms, err = getProcessedTerms(pod, affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution); err != nil {
|
||||
klog.Error(err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if hasAntiAffinityConstraints {
|
||||
if antiAffinityTerms, err = getProcessedTerms(pod, affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution); err != nil {
|
||||
klog.Error(err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
state := &postFilterState{
|
||||
topologyScore: make(map[string]map[string]int64),
|
||||
affinityTerms: affinityTerms,
|
||||
antiAffinityTerms: antiAffinityTerms,
|
||||
}
|
||||
|
||||
errCh := schedutil.NewErrorChannel()
|
||||
ctx, cancel := context.WithCancel(pCtx)
|
||||
processNode := func(i int) {
|
||||
nodeInfo := allNodes[i]
|
||||
if nodeInfo.Node() == nil {
|
||||
return
|
||||
}
|
||||
// Unless the pod being scheduled has affinity terms, we only
|
||||
// need to process pods with affinity in the node.
|
||||
podsToProcess := nodeInfo.PodsWithAffinity()
|
||||
if hasAffinityConstraints || hasAntiAffinityConstraints {
|
||||
// We need to process all the pods.
|
||||
podsToProcess = nodeInfo.Pods()
|
||||
}
|
||||
|
||||
for _, existingPod := range podsToProcess {
|
||||
if err := pl.processExistingPod(state, existingPod, nodeInfo, pod); err != nil {
|
||||
errCh.SendErrorWithCancel(err, cancel)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
workqueue.ParallelizeUntil(ctx, 16, len(allNodes), processNode)
|
||||
if err := errCh.ReceiveError(); err != nil {
|
||||
return framework.NewStatus(framework.Error, err.Error())
|
||||
}
|
||||
|
||||
cycleState.Write(postFilterStateKey, state)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getPostFilterState(cycleState *framework.CycleState) (*postFilterState, error) {
|
||||
c, err := cycleState.Read(postFilterStateKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error reading %q from cycleState: %v", preFilterStateKey, err)
|
||||
}
|
||||
|
||||
s, ok := c.(*postFilterState)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%+v convert to interpodaffinity.postFilterState error", c)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Score invoked at the Score extension point.
|
||||
// The "score" returned in this function is the matching number of pods on the `nodeName`,
|
||||
// it is normalized later.
|
||||
func (pl *InterPodAffinity) Score(ctx context.Context, state *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) {
|
||||
nodeInfo, err := pl.snapshotSharedLister.NodeInfos().Get(nodeName)
|
||||
func (pl *InterPodAffinity) Score(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, nodeName string) (int64, *framework.Status) {
|
||||
nodeInfo, err := pl.sharedLister.NodeInfos().Get(nodeName)
|
||||
if err != nil || nodeInfo.Node() == nil {
|
||||
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v, node is nil: %v", nodeName, err, nodeInfo.Node() == nil))
|
||||
}
|
||||
node := nodeInfo.Node()
|
||||
|
||||
s, err := getPostFilterState(cycleState)
|
||||
if err != nil {
|
||||
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err))
|
||||
return 0, framework.NewStatus(framework.Error, err.Error())
|
||||
}
|
||||
var score int64
|
||||
for tpKey, tpValues := range s.topologyScore {
|
||||
if v, exist := node.Labels[tpKey]; exist {
|
||||
score += tpValues[v]
|
||||
}
|
||||
}
|
||||
|
||||
meta := migration.PriorityMetadata(state)
|
||||
s, err := priorities.CalculateInterPodAffinityPriorityMap(pod, meta, nodeInfo)
|
||||
return s.Score, migration.ErrorToFrameworkStatus(err)
|
||||
return score, nil
|
||||
}
|
||||
|
||||
// NormalizeScore invoked after scoring all nodes.
|
||||
func (pl *InterPodAffinity) NormalizeScore(ctx context.Context, state *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
|
||||
meta := migration.PriorityMetadata(state)
|
||||
err := priorities.CalculateInterPodAffinityPriorityReduce(pod, meta, pl.snapshotSharedLister, scores)
|
||||
return migration.ErrorToFrameworkStatus(err)
|
||||
// NormalizeScore normalizes the score for each filteredNode.
|
||||
// The basic rule is: the bigger the score(matching number of pods) is, the smaller the
|
||||
// final normalized score will be.
|
||||
func (pl *InterPodAffinity) NormalizeScore(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod, scores framework.NodeScoreList) *framework.Status {
|
||||
s, err := getPostFilterState(cycleState)
|
||||
if err != nil {
|
||||
return framework.NewStatus(framework.Error, err.Error())
|
||||
}
|
||||
if len(s.topologyScore) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var maxCount, minCount int64
|
||||
for i := range scores {
|
||||
score := scores[i].Score
|
||||
if score > maxCount {
|
||||
maxCount = score
|
||||
}
|
||||
if score < minCount {
|
||||
minCount = score
|
||||
}
|
||||
}
|
||||
|
||||
maxMinDiff := maxCount - minCount
|
||||
for i := range scores {
|
||||
fScore := float64(0)
|
||||
if maxMinDiff > 0 {
|
||||
fScore = float64(framework.MaxNodeScore) * (float64(scores[i].Score-minCount) / float64(maxMinDiff))
|
||||
}
|
||||
|
||||
scores[i].Score = int64(fScore)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ScoreExtensions of the Score plugin.
|
||||
@@ -170,12 +460,19 @@ func (pl *InterPodAffinity) ScoreExtensions() framework.ScoreExtensions {
|
||||
}
|
||||
|
||||
// New initializes a new plugin and returns it.
|
||||
func New(_ *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
func New(plArgs *runtime.Unknown, h framework.FrameworkHandle) (framework.Plugin, error) {
|
||||
if h.SnapshotSharedLister() == nil {
|
||||
return nil, fmt.Errorf("SnapshotSharedlister is nil")
|
||||
}
|
||||
|
||||
args := &Args{}
|
||||
if err := framework.DecodeInto(plArgs, args); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &InterPodAffinity{
|
||||
snapshotSharedLister: h.SnapshotSharedLister(),
|
||||
podAffinityChecker: predicates.NewPodAffinityChecker(h.SnapshotSharedLister()),
|
||||
sharedLister: h.SnapshotSharedLister(),
|
||||
podAffinityChecker: predicates.NewPodAffinityChecker(h.SnapshotSharedLister()),
|
||||
hardPodAffinityWeight: args.HardPodAffinityWeight,
|
||||
}, nil
|
||||
}
|
||||
|
@@ -18,16 +18,14 @@ package interpodaffinity
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
clientsetfake "k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
|
||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||
nodeinfosnapshot "k8s.io/kubernetes/pkg/scheduler/nodeinfo/snapshot"
|
||||
)
|
||||
@@ -57,7 +55,7 @@ func createPodWithAffinityTerms(namespace, nodeName string, labels map[string]st
|
||||
|
||||
}
|
||||
|
||||
func TestSingleNode(t *testing.T) {
|
||||
func TestRequiredAffinitySingleNode(t *testing.T) {
|
||||
podLabel := map[string]string{"service": "securityscan"}
|
||||
labels1 := map[string]string{
|
||||
"region": "r1",
|
||||
@@ -783,8 +781,8 @@ func TestSingleNode(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, []*v1.Node{test.node}))
|
||||
p := &InterPodAffinity{
|
||||
snapshotSharedLister: snapshot,
|
||||
podAffinityChecker: predicates.NewPodAffinityChecker(snapshot),
|
||||
sharedLister: snapshot,
|
||||
podAffinityChecker: predicates.NewPodAffinityChecker(snapshot),
|
||||
}
|
||||
state := framework.NewCycleState()
|
||||
preFilterStatus := p.PreFilter(context.Background(), state, test.pod)
|
||||
@@ -799,7 +797,7 @@ func TestSingleNode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultipleNodes(t *testing.T) {
|
||||
func TestRequiredAffinityMultipleNodes(t *testing.T) {
|
||||
podLabelA := map[string]string{
|
||||
"foo": "bar",
|
||||
}
|
||||
@@ -1621,8 +1619,8 @@ func TestMultipleNodes(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
|
||||
for indexNode, node := range test.nodes {
|
||||
p := &InterPodAffinity{
|
||||
snapshotSharedLister: snapshot,
|
||||
podAffinityChecker: predicates.NewPodAffinityChecker(snapshot),
|
||||
sharedLister: snapshot,
|
||||
podAffinityChecker: predicates.NewPodAffinityChecker(snapshot),
|
||||
}
|
||||
state := framework.NewCycleState()
|
||||
preFilterStatus := p.PreFilter(context.Background(), state, test.pod)
|
||||
@@ -1638,7 +1636,7 @@ func TestMultipleNodes(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestInterPodAffinityPriority(t *testing.T) {
|
||||
func TestPreferredAffinity(t *testing.T) {
|
||||
labelRgChina := map[string]string{
|
||||
"region": "China",
|
||||
}
|
||||
@@ -2127,35 +2125,27 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
state := framework.NewCycleState()
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
p := &InterPodAffinity{
|
||||
sharedLister: snapshot,
|
||||
podAffinityChecker: predicates.NewPodAffinityChecker(snapshot),
|
||||
hardPodAffinityWeight: 1,
|
||||
}
|
||||
|
||||
client := clientsetfake.NewSimpleClientset()
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
|
||||
metaDataProducer := priorities.NewMetadataFactory(
|
||||
informerFactory.Core().V1().Services().Lister(),
|
||||
informerFactory.Core().V1().ReplicationControllers().Lister(),
|
||||
informerFactory.Apps().V1().ReplicaSets().Lister(),
|
||||
informerFactory.Apps().V1().StatefulSets().Lister(),
|
||||
1,
|
||||
)
|
||||
|
||||
metaData := metaDataProducer(test.pod, test.nodes, snapshot)
|
||||
|
||||
state.Write(migration.PrioritiesStateKey, &migration.PrioritiesStateData{Reference: metaData})
|
||||
|
||||
p, _ := New(nil, fh)
|
||||
status := p.PostFilter(context.Background(), state, test.pod, test.nodes, nil)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
}
|
||||
var gotList framework.NodeScoreList
|
||||
for _, n := range test.nodes {
|
||||
nodeName := n.ObjectMeta.Name
|
||||
score, status := p.(framework.ScorePlugin).Score(context.Background(), state, test.pod, nodeName)
|
||||
score, status := p.Score(context.Background(), state, test.pod, nodeName)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
}
|
||||
gotList = append(gotList, framework.NodeScore{Name: nodeName, Score: score})
|
||||
}
|
||||
|
||||
status := p.(framework.ScorePlugin).ScoreExtensions().NormalizeScore(context.Background(), state, test.pod, gotList)
|
||||
status = p.ScoreExtensions().NormalizeScore(context.Background(), state, test.pod, gotList)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
}
|
||||
@@ -2168,7 +2158,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
func TestPreferredAffinityWithHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
podLabelServiceS1 := map[string]string{
|
||||
"service": "S1",
|
||||
}
|
||||
@@ -2244,22 +2234,12 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(test.pods, test.nodes))
|
||||
fh, _ := framework.NewFramework(nil, nil, nil, framework.WithSnapshotSharedLister(snapshot))
|
||||
|
||||
client := clientsetfake.NewSimpleClientset()
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
|
||||
metaDataProducer := priorities.NewMetadataFactory(
|
||||
informerFactory.Core().V1().Services().Lister(),
|
||||
informerFactory.Core().V1().ReplicationControllers().Lister(),
|
||||
informerFactory.Apps().V1().ReplicaSets().Lister(),
|
||||
informerFactory.Apps().V1().StatefulSets().Lister(),
|
||||
test.hardPodAffinityWeight,
|
||||
)
|
||||
|
||||
metaData := metaDataProducer(test.pod, test.nodes, snapshot)
|
||||
|
||||
state.Write(migration.PrioritiesStateKey, &migration.PrioritiesStateData{Reference: metaData})
|
||||
|
||||
p, _ := New(nil, fh)
|
||||
args := &runtime.Unknown{Raw: []byte(fmt.Sprintf(`{"hardPodAffinityWeight":%d}`, test.hardPodAffinityWeight))}
|
||||
p, _ := New(args, fh)
|
||||
status := p.(framework.PostFilterPlugin).PostFilter(context.Background(), state, test.pod, test.nodes, nil)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
}
|
||||
var gotList framework.NodeScoreList
|
||||
for _, n := range test.nodes {
|
||||
nodeName := n.ObjectMeta.Name
|
||||
@@ -2270,7 +2250,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
gotList = append(gotList, framework.NodeScore{Name: nodeName, Score: score})
|
||||
}
|
||||
|
||||
status := p.(framework.ScorePlugin).ScoreExtensions().NormalizeScore(context.Background(), state, test.pod, gotList)
|
||||
status = p.(framework.ScorePlugin).ScoreExtensions().NormalizeScore(context.Background(), state, test.pod, gotList)
|
||||
if !status.IsSuccess() {
|
||||
t.Errorf("unexpected error: %v", status)
|
||||
}
|
||||
@@ -2282,7 +2262,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateAddRemovePod(t *testing.T) {
|
||||
func TestPreFilterStateAddRemovePod(t *testing.T) {
|
||||
var label1 = map[string]string{
|
||||
"region": "r1",
|
||||
"zone": "z11",
|
||||
@@ -2511,8 +2491,8 @@ func TestStateAddRemovePod(t *testing.T) {
|
||||
snapshot := nodeinfosnapshot.NewSnapshot(nodeinfosnapshot.CreateNodeInfoMap(pods, test.nodes))
|
||||
|
||||
p := &InterPodAffinity{
|
||||
snapshotSharedLister: snapshot,
|
||||
podAffinityChecker: predicates.NewPodAffinityChecker(snapshot),
|
||||
sharedLister: snapshot,
|
||||
podAffinityChecker: predicates.NewPodAffinityChecker(snapshot),
|
||||
}
|
||||
cycleState := framework.NewCycleState()
|
||||
preFilterStatus := p.PreFilter(context.Background(), cycleState, test.pendingPod)
|
||||
|
Reference in New Issue
Block a user