plugin/scheduler
This commit is contained in:
@@ -19,39 +19,39 @@ package algorithm
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
)
|
||||
|
||||
// NodeLister interface represents anything that can list nodes for a scheduler.
|
||||
type NodeLister interface {
|
||||
// We explicitly return []*api.Node, instead of api.NodeList, to avoid
|
||||
// We explicitly return []*v1.Node, instead of v1.NodeList, to avoid
|
||||
// performing expensive copies that are unneded.
|
||||
List() ([]*api.Node, error)
|
||||
List() ([]*v1.Node, error)
|
||||
}
|
||||
|
||||
// FakeNodeLister implements NodeLister on a []string for test purposes.
|
||||
type FakeNodeLister []*api.Node
|
||||
type FakeNodeLister []*v1.Node
|
||||
|
||||
// List returns nodes as a []string.
|
||||
func (f FakeNodeLister) List() ([]*api.Node, error) {
|
||||
func (f FakeNodeLister) List() ([]*v1.Node, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// PodLister interface represents anything that can list pods for a scheduler.
|
||||
type PodLister interface {
|
||||
// We explicitly return []*api.Pod, instead of api.PodList, to avoid
|
||||
// We explicitly return []*v1.Pod, instead of v1.PodList, to avoid
|
||||
// performing expensive copies that are unneded.
|
||||
List(labels.Selector) ([]*api.Pod, error)
|
||||
List(labels.Selector) ([]*v1.Pod, error)
|
||||
}
|
||||
|
||||
// FakePodLister implements PodLister on an []api.Pods for test purposes.
|
||||
type FakePodLister []*api.Pod
|
||||
// FakePodLister implements PodLister on an []v1.Pods for test purposes.
|
||||
type FakePodLister []*v1.Pod
|
||||
|
||||
// List returns []*api.Pod matching a query.
|
||||
func (f FakePodLister) List(s labels.Selector) (selected []*api.Pod, err error) {
|
||||
// List returns []*v1.Pod matching a query.
|
||||
func (f FakePodLister) List(s labels.Selector) (selected []*v1.Pod, err error) {
|
||||
for _, pod := range f {
|
||||
if s.Matches(labels.Set(pod.Labels)) {
|
||||
selected = append(selected, pod)
|
||||
@@ -63,21 +63,21 @@ func (f FakePodLister) List(s labels.Selector) (selected []*api.Pod, err error)
|
||||
// ServiceLister interface represents anything that can produce a list of services; the list is consumed by a scheduler.
|
||||
type ServiceLister interface {
|
||||
// Lists all the services
|
||||
List(labels.Selector) ([]*api.Service, error)
|
||||
List(labels.Selector) ([]*v1.Service, error)
|
||||
// Gets the services for the given pod
|
||||
GetPodServices(*api.Pod) ([]*api.Service, error)
|
||||
GetPodServices(*v1.Pod) ([]*v1.Service, error)
|
||||
}
|
||||
|
||||
// FakeServiceLister implements ServiceLister on []api.Service for test purposes.
|
||||
type FakeServiceLister []*api.Service
|
||||
// FakeServiceLister implements ServiceLister on []v1.Service for test purposes.
|
||||
type FakeServiceLister []*v1.Service
|
||||
|
||||
// List returns api.ServiceList, the list of all services.
|
||||
func (f FakeServiceLister) List(labels.Selector) ([]*api.Service, error) {
|
||||
// List returns v1.ServiceList, the list of all services.
|
||||
func (f FakeServiceLister) List(labels.Selector) ([]*v1.Service, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// GetPodServices gets the services that have the selector that match the labels on the given pod.
|
||||
func (f FakeServiceLister) GetPodServices(pod *api.Pod) (services []*api.Service, err error) {
|
||||
func (f FakeServiceLister) GetPodServices(pod *v1.Pod) (services []*v1.Service, err error) {
|
||||
var selector labels.Selector
|
||||
|
||||
for i := range f {
|
||||
@@ -97,34 +97,34 @@ func (f FakeServiceLister) GetPodServices(pod *api.Pod) (services []*api.Service
|
||||
// ControllerLister interface represents anything that can produce a list of ReplicationController; the list is consumed by a scheduler.
|
||||
type ControllerLister interface {
|
||||
// Lists all the replication controllers
|
||||
List(labels.Selector) ([]*api.ReplicationController, error)
|
||||
List(labels.Selector) ([]*v1.ReplicationController, error)
|
||||
// Gets the services for the given pod
|
||||
GetPodControllers(*api.Pod) ([]*api.ReplicationController, error)
|
||||
GetPodControllers(*v1.Pod) ([]*v1.ReplicationController, error)
|
||||
}
|
||||
|
||||
// EmptyControllerLister implements ControllerLister on []api.ReplicationController returning empty data
|
||||
// EmptyControllerLister implements ControllerLister on []v1.ReplicationController returning empty data
|
||||
type EmptyControllerLister struct{}
|
||||
|
||||
// List returns nil
|
||||
func (f EmptyControllerLister) List(labels.Selector) ([]*api.ReplicationController, error) {
|
||||
func (f EmptyControllerLister) List(labels.Selector) ([]*v1.ReplicationController, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// GetPodControllers returns nil
|
||||
func (f EmptyControllerLister) GetPodControllers(pod *api.Pod) (controllers []*api.ReplicationController, err error) {
|
||||
func (f EmptyControllerLister) GetPodControllers(pod *v1.Pod) (controllers []*v1.ReplicationController, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// FakeControllerLister implements ControllerLister on []api.ReplicationController for test purposes.
|
||||
type FakeControllerLister []*api.ReplicationController
|
||||
// FakeControllerLister implements ControllerLister on []v1.ReplicationController for test purposes.
|
||||
type FakeControllerLister []*v1.ReplicationController
|
||||
|
||||
// List returns []api.ReplicationController, the list of all ReplicationControllers.
|
||||
func (f FakeControllerLister) List(labels.Selector) ([]*api.ReplicationController, error) {
|
||||
// List returns []v1.ReplicationController, the list of all ReplicationControllers.
|
||||
func (f FakeControllerLister) List(labels.Selector) ([]*v1.ReplicationController, error) {
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// GetPodControllers gets the ReplicationControllers that have the selector that match the labels on the given pod
|
||||
func (f FakeControllerLister) GetPodControllers(pod *api.Pod) (controllers []*api.ReplicationController, err error) {
|
||||
func (f FakeControllerLister) GetPodControllers(pod *v1.Pod) (controllers []*v1.ReplicationController, err error) {
|
||||
var selector labels.Selector
|
||||
|
||||
for i := range f {
|
||||
@@ -147,14 +147,14 @@ func (f FakeControllerLister) GetPodControllers(pod *api.Pod) (controllers []*ap
|
||||
// ReplicaSetLister interface represents anything that can produce a list of ReplicaSet; the list is consumed by a scheduler.
|
||||
type ReplicaSetLister interface {
|
||||
// Gets the replicasets for the given pod
|
||||
GetPodReplicaSets(*api.Pod) ([]*extensions.ReplicaSet, error)
|
||||
GetPodReplicaSets(*v1.Pod) ([]*extensions.ReplicaSet, error)
|
||||
}
|
||||
|
||||
// EmptyReplicaSetLister implements ReplicaSetLister on []extensions.ReplicaSet returning empty data
|
||||
type EmptyReplicaSetLister struct{}
|
||||
|
||||
// GetPodReplicaSets returns nil
|
||||
func (f EmptyReplicaSetLister) GetPodReplicaSets(pod *api.Pod) (rss []*extensions.ReplicaSet, err error) {
|
||||
func (f EmptyReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*extensions.ReplicaSet, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -162,7 +162,7 @@ func (f EmptyReplicaSetLister) GetPodReplicaSets(pod *api.Pod) (rss []*extension
|
||||
type FakeReplicaSetLister []*extensions.ReplicaSet
|
||||
|
||||
// GetPodReplicaSets gets the ReplicaSets that have the selector that match the labels on the given pod
|
||||
func (f FakeReplicaSetLister) GetPodReplicaSets(pod *api.Pod) (rss []*extensions.ReplicaSet, err error) {
|
||||
func (f FakeReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*extensions.ReplicaSet, err error) {
|
||||
var selector labels.Selector
|
||||
|
||||
for _, rs := range f {
|
||||
|
@@ -19,7 +19,7 @@ package predicates
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -46,13 +46,13 @@ var (
|
||||
// hit and caused the unfitting failure.
|
||||
type InsufficientResourceError struct {
|
||||
// resourceName is the name of the resource that is insufficient
|
||||
ResourceName api.ResourceName
|
||||
ResourceName v1.ResourceName
|
||||
requested int64
|
||||
used int64
|
||||
capacity int64
|
||||
}
|
||||
|
||||
func NewInsufficientResourceError(resourceName api.ResourceName, requested, used, capacity int64) *InsufficientResourceError {
|
||||
func NewInsufficientResourceError(resourceName v1.ResourceName, requested, used, capacity int64) *InsufficientResourceError {
|
||||
return &InsufficientResourceError{
|
||||
ResourceName: resourceName,
|
||||
requested: requested,
|
||||
|
@@ -18,7 +18,7 @@ package predicates
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
@@ -35,7 +35,7 @@ func NewPredicateMetadataFactory(podLister algorithm.PodLister) algorithm.Metada
|
||||
}
|
||||
|
||||
// GetMetadata returns the predicateMetadata used which will be used by various predicates.
|
||||
func (pfactory *PredicateMetadataFactory) GetMetadata(pod *api.Pod, nodeNameToInfoMap map[string]*schedulercache.NodeInfo) interface{} {
|
||||
func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulercache.NodeInfo) interface{} {
|
||||
// If we cannot compute metadata, just return nil
|
||||
if pod == nil {
|
||||
return nil
|
||||
|
@@ -24,8 +24,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/client/cache"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
@@ -50,15 +50,15 @@ func RegisterPredicatePrecomputation(predicateName string, precomp PredicateMeta
|
||||
|
||||
// Other types for predicate functions...
|
||||
type NodeInfo interface {
|
||||
GetNodeInfo(nodeID string) (*api.Node, error)
|
||||
GetNodeInfo(nodeID string) (*v1.Node, error)
|
||||
}
|
||||
|
||||
type PersistentVolumeInfo interface {
|
||||
GetPersistentVolumeInfo(pvID string) (*api.PersistentVolume, error)
|
||||
GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error)
|
||||
}
|
||||
|
||||
type PersistentVolumeClaimInfo interface {
|
||||
GetPersistentVolumeClaimInfo(namespace string, name string) (*api.PersistentVolumeClaim, error)
|
||||
GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error)
|
||||
}
|
||||
|
||||
// CachedPersistentVolumeClaimInfo implements PersistentVolumeClaimInfo
|
||||
@@ -67,7 +67,7 @@ type CachedPersistentVolumeClaimInfo struct {
|
||||
}
|
||||
|
||||
// GetPersistentVolumeClaimInfo fetches the claim in specified namespace with specified name
|
||||
func (c *CachedPersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, name string) (*api.PersistentVolumeClaim, error) {
|
||||
func (c *CachedPersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error) {
|
||||
return c.PersistentVolumeClaims(namespace).Get(name)
|
||||
}
|
||||
|
||||
@@ -76,8 +76,8 @@ type CachedNodeInfo struct {
|
||||
}
|
||||
|
||||
// GetNodeInfo returns cached data for the node 'id'.
|
||||
func (c *CachedNodeInfo) GetNodeInfo(id string) (*api.Node, error) {
|
||||
node, exists, err := c.Get(&api.Node{ObjectMeta: api.ObjectMeta{Name: id}})
|
||||
func (c *CachedNodeInfo) GetNodeInfo(id string) (*v1.Node, error) {
|
||||
node, exists, err := c.Get(&v1.Node{ObjectMeta: v1.ObjectMeta{Name: id}})
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error retrieving node '%v' from cache: %v", id, err)
|
||||
@@ -87,27 +87,27 @@ func (c *CachedNodeInfo) GetNodeInfo(id string) (*api.Node, error) {
|
||||
return nil, fmt.Errorf("node '%v' not found", id)
|
||||
}
|
||||
|
||||
return node.(*api.Node), nil
|
||||
return node.(*v1.Node), nil
|
||||
}
|
||||
|
||||
// Note that predicateMetdata and matchingPodAntiAffinityTerm need to be declared in the same file
|
||||
// due to the way declarations are processed in predicate declaration unit tests.
|
||||
type matchingPodAntiAffinityTerm struct {
|
||||
term *api.PodAffinityTerm
|
||||
node *api.Node
|
||||
term *v1.PodAffinityTerm
|
||||
node *v1.Node
|
||||
}
|
||||
|
||||
type predicateMetadata struct {
|
||||
pod *api.Pod
|
||||
pod *v1.Pod
|
||||
podBestEffort bool
|
||||
podRequest *schedulercache.Resource
|
||||
podPorts map[int]bool
|
||||
matchingAntiAffinityTerms []matchingPodAntiAffinityTerm
|
||||
serviceAffinityMatchingPodList []*api.Pod
|
||||
serviceAffinityMatchingPodServices []*api.Service
|
||||
serviceAffinityMatchingPodList []*v1.Pod
|
||||
serviceAffinityMatchingPodServices []*v1.Service
|
||||
}
|
||||
|
||||
func isVolumeConflict(volume api.Volume, pod *api.Pod) bool {
|
||||
func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool {
|
||||
// fast path if there is no conflict checking targets.
|
||||
if volume.GCEPersistentDisk == nil && volume.AWSElasticBlockStore == nil && volume.RBD == nil {
|
||||
return false
|
||||
@@ -151,7 +151,7 @@ func isVolumeConflict(volume api.Volume, pod *api.Pod) bool {
|
||||
// - AWS EBS forbids any two pods mounting the same volume ID
|
||||
// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image.
|
||||
// TODO: migrate this into some per-volume specific code?
|
||||
func NoDiskConflict(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func NoDiskConflict(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
for _, v := range pod.Spec.Volumes {
|
||||
for _, ev := range nodeInfo.Pods() {
|
||||
if isVolumeConflict(v, ev) {
|
||||
@@ -172,8 +172,8 @@ type MaxPDVolumeCountChecker struct {
|
||||
// VolumeFilter contains information on how to filter PD Volumes when checking PD Volume caps
|
||||
type VolumeFilter struct {
|
||||
// Filter normal volumes
|
||||
FilterVolume func(vol *api.Volume) (id string, relevant bool)
|
||||
FilterPersistentVolume func(pv *api.PersistentVolume) (id string, relevant bool)
|
||||
FilterVolume func(vol *v1.Volume) (id string, relevant bool)
|
||||
FilterPersistentVolume func(pv *v1.PersistentVolume) (id string, relevant bool)
|
||||
}
|
||||
|
||||
// NewMaxPDVolumeCountPredicate creates a predicate which evaluates whether a pod can fit based on the
|
||||
@@ -194,7 +194,7 @@ func NewMaxPDVolumeCountPredicate(filter VolumeFilter, maxVolumes int, pvInfo Pe
|
||||
return c.predicate
|
||||
}
|
||||
|
||||
func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []api.Volume, namespace string, filteredVolumes map[string]bool) error {
|
||||
func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace string, filteredVolumes map[string]bool) error {
|
||||
for _, vol := range volumes {
|
||||
if id, ok := c.filter.FilterVolume(&vol); ok {
|
||||
filteredVolumes[id] = true
|
||||
@@ -248,7 +248,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []api.Volume, namespace
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *MaxPDVolumeCountChecker) predicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
||||
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
||||
if len(pod.Spec.Volumes) == 0 {
|
||||
@@ -293,14 +293,14 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *api.Pod, meta interface{}, node
|
||||
|
||||
// EBSVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes
|
||||
var EBSVolumeFilter VolumeFilter = VolumeFilter{
|
||||
FilterVolume: func(vol *api.Volume) (string, bool) {
|
||||
FilterVolume: func(vol *v1.Volume) (string, bool) {
|
||||
if vol.AWSElasticBlockStore != nil {
|
||||
return vol.AWSElasticBlockStore.VolumeID, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
FilterPersistentVolume: func(pv *api.PersistentVolume) (string, bool) {
|
||||
FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
|
||||
if pv.Spec.AWSElasticBlockStore != nil {
|
||||
return pv.Spec.AWSElasticBlockStore.VolumeID, true
|
||||
}
|
||||
@@ -310,14 +310,14 @@ var EBSVolumeFilter VolumeFilter = VolumeFilter{
|
||||
|
||||
// GCEPDVolumeFilter is a VolumeFilter for filtering GCE PersistentDisk Volumes
|
||||
var GCEPDVolumeFilter VolumeFilter = VolumeFilter{
|
||||
FilterVolume: func(vol *api.Volume) (string, bool) {
|
||||
FilterVolume: func(vol *v1.Volume) (string, bool) {
|
||||
if vol.GCEPersistentDisk != nil {
|
||||
return vol.GCEPersistentDisk.PDName, true
|
||||
}
|
||||
return "", false
|
||||
},
|
||||
|
||||
FilterPersistentVolume: func(pv *api.PersistentVolume) (string, bool) {
|
||||
FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
|
||||
if pv.Spec.GCEPersistentDisk != nil {
|
||||
return pv.Spec.GCEPersistentDisk.PDName, true
|
||||
}
|
||||
@@ -352,7 +352,7 @@ func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolum
|
||||
return c.predicate
|
||||
}
|
||||
|
||||
func (c *VolumeZoneChecker) predicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
||||
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
||||
if len(pod.Spec.Volumes) == 0 {
|
||||
@@ -427,22 +427,22 @@ func (c *VolumeZoneChecker) predicate(pod *api.Pod, meta interface{}, nodeInfo *
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
func GetResourceRequest(pod *api.Pod) *schedulercache.Resource {
|
||||
func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
|
||||
result := schedulercache.Resource{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
for rName, rQuantity := range container.Resources.Requests {
|
||||
switch rName {
|
||||
case api.ResourceMemory:
|
||||
case v1.ResourceMemory:
|
||||
result.Memory += rQuantity.Value()
|
||||
case api.ResourceCPU:
|
||||
case v1.ResourceCPU:
|
||||
result.MilliCPU += rQuantity.MilliValue()
|
||||
case api.ResourceNvidiaGPU:
|
||||
case v1.ResourceNvidiaGPU:
|
||||
result.NvidiaGPU += rQuantity.Value()
|
||||
default:
|
||||
if api.IsOpaqueIntResourceName(rName) {
|
||||
if v1.IsOpaqueIntResourceName(rName) {
|
||||
// Lazily allocate this map only if required.
|
||||
if result.OpaqueIntResources == nil {
|
||||
result.OpaqueIntResources = map[api.ResourceName]int64{}
|
||||
result.OpaqueIntResources = map[v1.ResourceName]int64{}
|
||||
}
|
||||
result.OpaqueIntResources[rName] += rQuantity.Value()
|
||||
}
|
||||
@@ -453,23 +453,23 @@ func GetResourceRequest(pod *api.Pod) *schedulercache.Resource {
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
for rName, rQuantity := range container.Resources.Requests {
|
||||
switch rName {
|
||||
case api.ResourceMemory:
|
||||
case v1.ResourceMemory:
|
||||
if mem := rQuantity.Value(); mem > result.Memory {
|
||||
result.Memory = mem
|
||||
}
|
||||
case api.ResourceCPU:
|
||||
case v1.ResourceCPU:
|
||||
if cpu := rQuantity.MilliValue(); cpu > result.MilliCPU {
|
||||
result.MilliCPU = cpu
|
||||
}
|
||||
case api.ResourceNvidiaGPU:
|
||||
case v1.ResourceNvidiaGPU:
|
||||
if gpu := rQuantity.Value(); gpu > result.NvidiaGPU {
|
||||
result.NvidiaGPU = gpu
|
||||
}
|
||||
default:
|
||||
if api.IsOpaqueIntResourceName(rName) {
|
||||
if v1.IsOpaqueIntResourceName(rName) {
|
||||
// Lazily allocate this map only if required.
|
||||
if result.OpaqueIntResources == nil {
|
||||
result.OpaqueIntResources = map[api.ResourceName]int64{}
|
||||
result.OpaqueIntResources = map[v1.ResourceName]int64{}
|
||||
}
|
||||
value := rQuantity.Value()
|
||||
if value > result.OpaqueIntResources[rName] {
|
||||
@@ -482,11 +482,11 @@ func GetResourceRequest(pod *api.Pod) *schedulercache.Resource {
|
||||
return &result
|
||||
}
|
||||
|
||||
func podName(pod *api.Pod) string {
|
||||
func podName(pod *v1.Pod) string {
|
||||
return pod.Namespace + "/" + pod.Name
|
||||
}
|
||||
|
||||
func PodFitsResources(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PodFitsResources(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
@@ -495,7 +495,7 @@ func PodFitsResources(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.N
|
||||
var predicateFails []algorithm.PredicateFailureReason
|
||||
allowedPodNumber := nodeInfo.AllowedPodNumber()
|
||||
if len(nodeInfo.Pods())+1 > allowedPodNumber {
|
||||
predicateFails = append(predicateFails, NewInsufficientResourceError(api.ResourcePods, 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber)))
|
||||
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourcePods, 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber)))
|
||||
}
|
||||
|
||||
var podRequest *schedulercache.Resource
|
||||
@@ -511,13 +511,13 @@ func PodFitsResources(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.N
|
||||
|
||||
allocatable := nodeInfo.AllocatableResource()
|
||||
if allocatable.MilliCPU < podRequest.MilliCPU+nodeInfo.RequestedResource().MilliCPU {
|
||||
predicateFails = append(predicateFails, NewInsufficientResourceError(api.ResourceCPU, podRequest.MilliCPU, nodeInfo.RequestedResource().MilliCPU, allocatable.MilliCPU))
|
||||
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceCPU, podRequest.MilliCPU, nodeInfo.RequestedResource().MilliCPU, allocatable.MilliCPU))
|
||||
}
|
||||
if allocatable.Memory < podRequest.Memory+nodeInfo.RequestedResource().Memory {
|
||||
predicateFails = append(predicateFails, NewInsufficientResourceError(api.ResourceMemory, podRequest.Memory, nodeInfo.RequestedResource().Memory, allocatable.Memory))
|
||||
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceMemory, podRequest.Memory, nodeInfo.RequestedResource().Memory, allocatable.Memory))
|
||||
}
|
||||
if allocatable.NvidiaGPU < podRequest.NvidiaGPU+nodeInfo.RequestedResource().NvidiaGPU {
|
||||
predicateFails = append(predicateFails, NewInsufficientResourceError(api.ResourceNvidiaGPU, podRequest.NvidiaGPU, nodeInfo.RequestedResource().NvidiaGPU, allocatable.NvidiaGPU))
|
||||
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceNvidiaGPU, podRequest.NvidiaGPU, nodeInfo.RequestedResource().NvidiaGPU, allocatable.NvidiaGPU))
|
||||
}
|
||||
for rName, rQuant := range podRequest.OpaqueIntResources {
|
||||
if allocatable.OpaqueIntResources[rName] < rQuant+nodeInfo.RequestedResource().OpaqueIntResources[rName] {
|
||||
@@ -536,9 +536,9 @@ func PodFitsResources(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.N
|
||||
|
||||
// nodeMatchesNodeSelectorTerms checks if a node's labels satisfy a list of node selector terms,
|
||||
// terms are ORed, and an empty list of terms will match nothing.
|
||||
func nodeMatchesNodeSelectorTerms(node *api.Node, nodeSelectorTerms []api.NodeSelectorTerm) bool {
|
||||
func nodeMatchesNodeSelectorTerms(node *v1.Node, nodeSelectorTerms []v1.NodeSelectorTerm) bool {
|
||||
for _, req := range nodeSelectorTerms {
|
||||
nodeSelector, err := api.NodeSelectorRequirementsAsSelector(req.MatchExpressions)
|
||||
nodeSelector, err := v1.NodeSelectorRequirementsAsSelector(req.MatchExpressions)
|
||||
if err != nil {
|
||||
glog.V(10).Infof("Failed to parse MatchExpressions: %+v, regarding as not match.", req.MatchExpressions)
|
||||
return false
|
||||
@@ -551,7 +551,7 @@ func nodeMatchesNodeSelectorTerms(node *api.Node, nodeSelectorTerms []api.NodeSe
|
||||
}
|
||||
|
||||
// The pod can only schedule onto nodes that satisfy requirements in both NodeAffinity and nodeSelector.
|
||||
func podMatchesNodeLabels(pod *api.Pod, node *api.Node) bool {
|
||||
func podMatchesNodeLabels(pod *v1.Pod, node *v1.Node) bool {
|
||||
// Check if node.Labels match pod.Spec.NodeSelector.
|
||||
if len(pod.Spec.NodeSelector) > 0 {
|
||||
selector := labels.SelectorFromSet(pod.Spec.NodeSelector)
|
||||
@@ -562,7 +562,7 @@ func podMatchesNodeLabels(pod *api.Pod, node *api.Node) bool {
|
||||
|
||||
// Parse required node affinity scheduling requirements
|
||||
// and check if the current node match the requirements.
|
||||
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||
affinity, err := v1.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||
if err != nil {
|
||||
glog.V(10).Infof("Failed to get Affinity from Pod %+v, err: %+v", podName(pod), err)
|
||||
return false
|
||||
@@ -603,7 +603,7 @@ func podMatchesNodeLabels(pod *api.Pod, node *api.Node) bool {
|
||||
return nodeAffinityMatches
|
||||
}
|
||||
|
||||
func PodSelectorMatches(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PodSelectorMatches(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
@@ -614,7 +614,7 @@ func PodSelectorMatches(pod *api.Pod, meta interface{}, nodeInfo *schedulercache
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}, nil
|
||||
}
|
||||
|
||||
func PodFitsHost(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PodFitsHost(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
if len(pod.Spec.NodeName) == 0 {
|
||||
return true, nil, nil
|
||||
}
|
||||
@@ -653,7 +653,7 @@ func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicat
|
||||
// Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
|
||||
// A node may have a label with "retiring" as key and the date as the value
|
||||
// and it may be desirable to avoid scheduling new pods on this node
|
||||
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
@@ -732,9 +732,9 @@ func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister al
|
||||
//
|
||||
// WARNING: This Predicate is NOT guaranteed to work if some of the predicateMetadata data isn't precomputed...
|
||||
// For that reason it is not exported, i.e. it is highly coupled to the implementation of the FitPredicate construction.
|
||||
func (s *ServiceAffinity) checkServiceAffinity(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var services []*api.Service
|
||||
var pods []*api.Pod
|
||||
func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var services []*v1.Service
|
||||
var pods []*v1.Pod
|
||||
if pm, ok := meta.(*predicateMetadata); ok && (pm.serviceAffinityMatchingPodList != nil || pm.serviceAffinityMatchingPodServices != nil) {
|
||||
services = pm.serviceAffinityMatchingPodServices
|
||||
pods = pm.serviceAffinityMatchingPodList
|
||||
@@ -769,7 +769,7 @@ func (s *ServiceAffinity) checkServiceAffinity(pod *api.Pod, meta interface{}, n
|
||||
return false, []algorithm.PredicateFailureReason{ErrServiceAffinityViolated}, nil
|
||||
}
|
||||
|
||||
func PodFitsHostPorts(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PodFitsHostPorts(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var wantPorts map[int]bool
|
||||
if predicateMeta, ok := meta.(*predicateMetadata); ok {
|
||||
wantPorts = predicateMeta.podPorts
|
||||
@@ -791,7 +791,7 @@ func PodFitsHostPorts(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.N
|
||||
return true, nil, nil
|
||||
}
|
||||
|
||||
func GetUsedPorts(pods ...*api.Pod) map[int]bool {
|
||||
func GetUsedPorts(pods ...*v1.Pod) map[int]bool {
|
||||
ports := make(map[int]bool)
|
||||
for _, pod := range pods {
|
||||
for j := range pod.Spec.Containers {
|
||||
@@ -821,7 +821,7 @@ func haveSame(a1, a2 []string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func GeneralPredicates(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func GeneralPredicates(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
var predicateFails []algorithm.PredicateFailureReason
|
||||
fit, reasons, err := PodFitsResources(pod, meta, nodeInfo)
|
||||
if err != nil {
|
||||
@@ -873,7 +873,7 @@ func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister, failu
|
||||
return checker.InterPodAffinityMatches
|
||||
}
|
||||
|
||||
func (c *PodAffinityChecker) InterPodAffinityMatches(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
@@ -883,7 +883,7 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *api.Pod, meta interfac
|
||||
}
|
||||
|
||||
// Now check if <pod> requirements will be satisfied on this node.
|
||||
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||
affinity, err := v1.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
@@ -907,7 +907,7 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *api.Pod, meta interfac
|
||||
// First return value indicates whether a matching pod exists on a node that matches the topology key,
|
||||
// while the second return value indicates whether a matching pod exists anywhere.
|
||||
// TODO: Do we really need any pod matching, or all pods matching? I think the latter.
|
||||
func (c *PodAffinityChecker) anyPodMatchesPodAffinityTerm(pod *api.Pod, allPods []*api.Pod, node *api.Node, term *api.PodAffinityTerm) (bool, bool, error) {
|
||||
func (c *PodAffinityChecker) anyPodMatchesPodAffinityTerm(pod *v1.Pod, allPods []*v1.Pod, node *v1.Node, term *v1.PodAffinityTerm) (bool, bool, error) {
|
||||
matchingPodExists := false
|
||||
for _, existingPod := range allPods {
|
||||
match, err := priorityutil.PodMatchesTermsNamespaceAndSelector(existingPod, pod, term)
|
||||
@@ -928,7 +928,7 @@ func (c *PodAffinityChecker) anyPodMatchesPodAffinityTerm(pod *api.Pod, allPods
|
||||
return false, matchingPodExists, nil
|
||||
}
|
||||
|
||||
func getPodAffinityTerms(podAffinity *api.PodAffinity) (terms []api.PodAffinityTerm) {
|
||||
func getPodAffinityTerms(podAffinity *v1.PodAffinity) (terms []v1.PodAffinityTerm) {
|
||||
if podAffinity != nil {
|
||||
if len(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
||||
terms = podAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
@@ -941,7 +941,7 @@ func getPodAffinityTerms(podAffinity *api.PodAffinity) (terms []api.PodAffinityT
|
||||
return terms
|
||||
}
|
||||
|
||||
func getPodAntiAffinityTerms(podAntiAffinity *api.PodAntiAffinity) (terms []api.PodAffinityTerm) {
|
||||
func getPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.PodAffinityTerm) {
|
||||
if podAntiAffinity != nil {
|
||||
if len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
||||
terms = podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||
@@ -954,7 +954,7 @@ func getPodAntiAffinityTerms(podAntiAffinity *api.PodAntiAffinity) (terms []api.
|
||||
return terms
|
||||
}
|
||||
|
||||
func getMatchingAntiAffinityTerms(pod *api.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) ([]matchingPodAntiAffinityTerm, error) {
|
||||
func getMatchingAntiAffinityTerms(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) ([]matchingPodAntiAffinityTerm, error) {
|
||||
allNodeNames := make([]string, 0, len(nodeInfoMap))
|
||||
for name := range nodeInfoMap {
|
||||
allNodeNames = append(allNodeNames, name)
|
||||
@@ -985,7 +985,7 @@ func getMatchingAntiAffinityTerms(pod *api.Pod, nodeInfoMap map[string]*schedule
|
||||
}
|
||||
var nodeResult []matchingPodAntiAffinityTerm
|
||||
for _, existingPod := range nodeInfo.PodsWithAffinity() {
|
||||
affinity, err := api.GetAffinityFromPodAnnotations(existingPod.Annotations)
|
||||
affinity, err := v1.GetAffinityFromPodAnnotations(existingPod.Annotations)
|
||||
if err != nil {
|
||||
catchError(err)
|
||||
return
|
||||
@@ -1012,10 +1012,10 @@ func getMatchingAntiAffinityTerms(pod *api.Pod, nodeInfoMap map[string]*schedule
|
||||
return result, firstError
|
||||
}
|
||||
|
||||
func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *api.Pod, allPods []*api.Pod) ([]matchingPodAntiAffinityTerm, error) {
|
||||
func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *v1.Pod, allPods []*v1.Pod) ([]matchingPodAntiAffinityTerm, error) {
|
||||
var result []matchingPodAntiAffinityTerm
|
||||
for _, existingPod := range allPods {
|
||||
affinity, err := api.GetAffinityFromPodAnnotations(existingPod.Annotations)
|
||||
affinity, err := v1.GetAffinityFromPodAnnotations(existingPod.Annotations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1040,7 +1040,7 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *api.Pod, allPods
|
||||
|
||||
// Checks if scheduling the pod onto this node would break any anti-affinity
|
||||
// rules indicated by the existing pods.
|
||||
func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *api.Pod, meta interface{}, node *api.Node) bool {
|
||||
func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta interface{}, node *v1.Node) bool {
|
||||
var matchingTerms []matchingPodAntiAffinityTerm
|
||||
if predicateMeta, ok := meta.(*predicateMetadata); ok {
|
||||
matchingTerms = predicateMeta.matchingAntiAffinityTerms
|
||||
@@ -1072,7 +1072,7 @@ func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *api.Pod, met
|
||||
}
|
||||
|
||||
// Checks if scheduling the pod onto this node would break any rules of this pod.
|
||||
func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *api.Pod, node *api.Node, affinity *api.Affinity) bool {
|
||||
func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, node *v1.Node, affinity *v1.Affinity) bool {
|
||||
allPods, err := c.podLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return false
|
||||
@@ -1118,18 +1118,18 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *api.Pod, nod
|
||||
return true
|
||||
}
|
||||
|
||||
func PodToleratesNodeTaints(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func PodToleratesNodeTaints(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
}
|
||||
|
||||
taints, err := api.GetTaintsFromNodeAnnotations(node.Annotations)
|
||||
taints, err := v1.GetTaintsFromNodeAnnotations(node.Annotations)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
|
||||
tolerations, err := api.GetTolerationsFromPodAnnotations(pod.Annotations)
|
||||
tolerations, err := v1.GetTolerationsFromPodAnnotations(pod.Annotations)
|
||||
if err != nil {
|
||||
return false, nil, err
|
||||
}
|
||||
@@ -1140,7 +1140,7 @@ func PodToleratesNodeTaints(pod *api.Pod, meta interface{}, nodeInfo *schedulerc
|
||||
return false, []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil
|
||||
}
|
||||
|
||||
func tolerationsToleratesTaints(tolerations []api.Toleration, taints []api.Taint) bool {
|
||||
func tolerationsToleratesTaints(tolerations []v1.Toleration, taints []v1.Taint) bool {
|
||||
// If the taint list is nil/empty, it is tolerated by all tolerations by default.
|
||||
if len(taints) == 0 {
|
||||
return true
|
||||
@@ -1154,11 +1154,11 @@ func tolerationsToleratesTaints(tolerations []api.Toleration, taints []api.Taint
|
||||
for i := range taints {
|
||||
taint := &taints[i]
|
||||
// skip taints that have effect PreferNoSchedule, since it is for priorities
|
||||
if taint.Effect == api.TaintEffectPreferNoSchedule {
|
||||
if taint.Effect == v1.TaintEffectPreferNoSchedule {
|
||||
continue
|
||||
}
|
||||
|
||||
if !api.TaintToleratedByTolerations(taint, tolerations) {
|
||||
if !v1.TaintToleratedByTolerations(taint, tolerations) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
@@ -1167,13 +1167,13 @@ func tolerationsToleratesTaints(tolerations []api.Toleration, taints []api.Taint
|
||||
}
|
||||
|
||||
// Determine if a pod is scheduled with best-effort QoS
|
||||
func isPodBestEffort(pod *api.Pod) bool {
|
||||
func isPodBestEffort(pod *v1.Pod) bool {
|
||||
return qos.GetPodQOS(pod) == qos.BestEffort
|
||||
}
|
||||
|
||||
// CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node
|
||||
// reporting memory pressure condition.
|
||||
func CheckNodeMemoryPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
@@ -1194,7 +1194,7 @@ func CheckNodeMemoryPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *
|
||||
|
||||
// is node under pressure?
|
||||
for _, cond := range node.Status.Conditions {
|
||||
if cond.Type == api.NodeMemoryPressure && cond.Status == api.ConditionTrue {
|
||||
if cond.Type == v1.NodeMemoryPressure && cond.Status == v1.ConditionTrue {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnderMemoryPressure}, nil
|
||||
}
|
||||
}
|
||||
@@ -1204,7 +1204,7 @@ func CheckNodeMemoryPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *
|
||||
|
||||
// CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node
|
||||
// reporting disk pressure condition.
|
||||
func CheckNodeDiskPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return false, nil, fmt.Errorf("node not found")
|
||||
@@ -1212,7 +1212,7 @@ func CheckNodeDiskPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *sc
|
||||
|
||||
// is node under pressure?
|
||||
for _, cond := range node.Status.Conditions {
|
||||
if cond.Type == api.NodeDiskPressure && cond.Status == api.ConditionTrue {
|
||||
if cond.Type == v1.NodeDiskPressure && cond.Status == v1.ConditionTrue {
|
||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure}, nil
|
||||
}
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@@ -16,8 +16,10 @@ limitations under the License.
|
||||
|
||||
package predicates
|
||||
|
||||
import "k8s.io/kubernetes/pkg/labels"
|
||||
import "k8s.io/kubernetes/pkg/api"
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
)
|
||||
|
||||
// FindLabelsInSet gets as many key/value pairs as possible out of a label set.
|
||||
func FindLabelsInSet(labelsToKeep []string, selector labels.Set) map[string]string {
|
||||
@@ -45,8 +47,8 @@ func AddUnsetLabelsToMap(aL map[string]string, labelsToAdd []string, labelSet la
|
||||
}
|
||||
|
||||
// FilterPodsByNamespace filters pods outside a namespace from the given list.
|
||||
func FilterPodsByNamespace(pods []*api.Pod, ns string) []*api.Pod {
|
||||
filtered := []*api.Pod{}
|
||||
func FilterPodsByNamespace(pods []*v1.Pod, ns string) []*v1.Pod {
|
||||
filtered := []*v1.Pod{}
|
||||
for _, nsPod := range pods {
|
||||
if nsPod.Namespace == ns {
|
||||
filtered = append(filtered, nsPod)
|
||||
|
@@ -19,7 +19,7 @@ package predicates
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
)
|
||||
|
||||
@@ -30,9 +30,9 @@ func ExampleFindLabelsInSet() {
|
||||
labelSubset["label2"] = "value2"
|
||||
// Lets make believe that these pods are on the cluster.
|
||||
// Utility functions will inspect their labels, filter them, and so on.
|
||||
nsPods := []*api.Pod{
|
||||
nsPods := []*v1.Pod{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pod1",
|
||||
Namespace: "ns1",
|
||||
Labels: map[string]string{
|
||||
@@ -43,14 +43,14 @@ func ExampleFindLabelsInSet() {
|
||||
},
|
||||
}, // first pod which will be used via the utilities
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pod2",
|
||||
Namespace: "ns1",
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: "pod3ThatWeWontSee",
|
||||
},
|
||||
},
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
@@ -37,7 +37,7 @@ const (
|
||||
|
||||
// Also used in most/least_requested nad metadata.
|
||||
// TODO: despaghettify it
|
||||
func getNonZeroRequests(pod *api.Pod) *schedulercache.Resource {
|
||||
func getNonZeroRequests(pod *v1.Pod) *schedulercache.Resource {
|
||||
result := &schedulercache.Resource{}
|
||||
for i := range pod.Spec.Containers {
|
||||
container := &pod.Spec.Containers[i]
|
||||
@@ -48,7 +48,7 @@ func getNonZeroRequests(pod *api.Pod) *schedulercache.Resource {
|
||||
return result
|
||||
}
|
||||
|
||||
func calculateBalancedResourceAllocation(pod *api.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
func calculateBalancedResourceAllocation(pod *v1.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||
@@ -104,7 +104,7 @@ func fractionOfCapacity(requested, capacity int64) float64 {
|
||||
// close the two metrics are to each other.
|
||||
// Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by:
|
||||
// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization"
|
||||
func BalancedResourceAllocationMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
func BalancedResourceAllocationMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
var nonZeroRequest *schedulercache.Resource
|
||||
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
||||
nonZeroRequest = priorityMeta.nonZeroRequest
|
||||
|
@@ -20,8 +20,8 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
@@ -35,29 +35,29 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
"bar": "foo",
|
||||
"baz": "blah",
|
||||
}
|
||||
machine1Spec := api.PodSpec{
|
||||
machine1Spec := v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
}
|
||||
machine2Spec := api.PodSpec{
|
||||
machine2Spec := v1.PodSpec{
|
||||
NodeName: "machine2",
|
||||
}
|
||||
noResources := api.PodSpec{
|
||||
Containers: []api.Container{},
|
||||
noResources := v1.PodSpec{
|
||||
Containers: []v1.Container{},
|
||||
}
|
||||
cpuOnly := api.PodSpec{
|
||||
cpuOnly := v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
Containers: []api.Container{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse("1000m"),
|
||||
"memory": resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse("2000m"),
|
||||
"memory": resource.MustParse("0"),
|
||||
},
|
||||
@@ -67,20 +67,20 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
}
|
||||
cpuOnly2 := cpuOnly
|
||||
cpuOnly2.NodeName = "machine2"
|
||||
cpuAndMemory := api.PodSpec{
|
||||
cpuAndMemory := v1.PodSpec{
|
||||
NodeName: "machine2",
|
||||
Containers: []api.Container{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse("1000m"),
|
||||
"memory": resource.MustParse("2000"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse("2000m"),
|
||||
"memory": resource.MustParse("3000"),
|
||||
},
|
||||
@@ -89,9 +89,9 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
},
|
||||
}
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []*api.Node
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
@@ -107,8 +107,8 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
Memory Fraction: 0 / 10000 = 0%
|
||||
Node2 Score: 10 - (0-0)*10 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||
test: "nothing scheduled, nothing requested",
|
||||
},
|
||||
@@ -124,8 +124,8 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
Memory Fraction: 5000/10000 = 50%
|
||||
Node2 Score: 10 - (0.5-0.5)*10 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: 10}},
|
||||
test: "nothing scheduled, resources requested, differently sized machines",
|
||||
},
|
||||
@@ -141,15 +141,15 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
Memory Fraction: 0 / 10000 = 0%
|
||||
Node2 Score: 10 - (0-0)*10 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||
test: "no resources requested, pods scheduled",
|
||||
pods: []*api.Pod{
|
||||
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: machine1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: machine1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: machine2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: machine2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -164,15 +164,15 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
Memory Fraction: 5000 / 20000 = 25%
|
||||
Node2 Score: 10 - (0.6-0.25)*10 = 6
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 6}},
|
||||
test: "no resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuAndMemory, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: cpuOnly, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuOnly2, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuAndMemory, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -187,11 +187,11 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
Memory Fraction: 10000 / 20000 = 50%
|
||||
Node2 Score: 10 - (0.6-0.5)*10 = 9
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 9}},
|
||||
test: "resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
},
|
||||
@@ -208,11 +208,11 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
Memory Fraction: 10000 / 50000 = 20%
|
||||
Node2 Score: 10 - (0.6-0.2)*10 = 6
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 6}},
|
||||
test: "resources requested, pods scheduled with resources, differently sized machines",
|
||||
pods: []*api.Pod{
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
},
|
||||
@@ -229,21 +229,21 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
||||
Memory Fraction 5000 / 10000 = 50%
|
||||
Node2 Score: 0
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuOnly},
|
||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
pod: &v1.Pod{Spec: cpuOnly},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||
test: "requested resources exceed node capacity",
|
||||
pods: []*api.Pod{
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []*api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||
test: "zero node resources, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
},
|
||||
|
@@ -19,7 +19,7 @@ package priorities
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
@@ -29,7 +29,7 @@ import (
|
||||
// based on the total size of those images.
|
||||
// - If none of the images are present, this node will be given the lowest priority.
|
||||
// - If some of the images are present on a node, the larger their sizes' sum, the higher the node's priority.
|
||||
func ImageLocalityPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||
@@ -66,7 +66,7 @@ func calculateScoreFromSize(sumSize int64) int {
|
||||
}
|
||||
|
||||
// checkContainerImageOnNode checks if a container image is present on a node and returns its size.
|
||||
func checkContainerImageOnNode(node *api.Node, container *api.Container) int64 {
|
||||
func checkContainerImageOnNode(node *v1.Node, container *v1.Container) int64 {
|
||||
for _, image := range node.Status.Images {
|
||||
for _, name := range image.Names {
|
||||
if container.Image == name {
|
||||
|
@@ -21,14 +21,14 @@ import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
|
||||
func TestImageLocalityPriority(t *testing.T) {
|
||||
test_40_250 := api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
test_40_250 := v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "gcr.io/40",
|
||||
},
|
||||
@@ -38,8 +38,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
test_40_140 := api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
test_40_140 := v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "gcr.io/40",
|
||||
},
|
||||
@@ -49,8 +49,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
test_min_max := api.PodSpec{
|
||||
Containers: []api.Container{
|
||||
test_min_max := v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Image: "gcr.io/10",
|
||||
},
|
||||
@@ -60,8 +60,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
node_40_140_2000 := api.NodeStatus{
|
||||
Images: []api.ContainerImage{
|
||||
node_40_140_2000 := v1.NodeStatus{
|
||||
Images: []v1.ContainerImage{
|
||||
{
|
||||
Names: []string{
|
||||
"gcr.io/40",
|
||||
@@ -86,8 +86,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
node_250_10 := api.NodeStatus{
|
||||
Images: []api.ContainerImage{
|
||||
node_250_10 := v1.NodeStatus{
|
||||
Images: []v1.ContainerImage{
|
||||
{
|
||||
Names: []string{
|
||||
"gcr.io/250",
|
||||
@@ -105,9 +105,9 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []*api.Node
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
@@ -121,8 +121,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
// Node2
|
||||
// Image: gcr.io/250 250MB
|
||||
// Score: (250M-23M)/97.7M + 1 = 3
|
||||
pod: &api.Pod{Spec: test_40_250},
|
||||
nodes: []*api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
||||
pod: &v1.Pod{Spec: test_40_250},
|
||||
nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 3}},
|
||||
test: "two images spread on two nodes, prefer the larger image one",
|
||||
},
|
||||
@@ -136,8 +136,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
// Node2
|
||||
// Image: not present
|
||||
// Score: 0
|
||||
pod: &api.Pod{Spec: test_40_140},
|
||||
nodes: []*api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
||||
pod: &v1.Pod{Spec: test_40_140},
|
||||
nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 0}},
|
||||
test: "two images on one node, prefer this node",
|
||||
},
|
||||
@@ -151,8 +151,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
// Node2
|
||||
// Image: gcr.io/10 10MB
|
||||
// Score: 10 < min score = 0
|
||||
pod: &api.Pod{Spec: test_min_max},
|
||||
nodes: []*api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
||||
pod: &v1.Pod{Spec: test_min_max},
|
||||
nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
||||
test: "if exceed limit, use limit",
|
||||
},
|
||||
@@ -174,9 +174,9 @@ func TestImageLocalityPriority(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func makeImageNode(node string, status api.NodeStatus) *api.Node {
|
||||
return &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: node},
|
||||
func makeImageNode(node string, status v1.NodeStatus) *v1.Node {
|
||||
return &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: node},
|
||||
Status: status,
|
||||
}
|
||||
}
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||
@@ -57,7 +57,7 @@ type podAffinityPriorityMap struct {
|
||||
sync.Mutex
|
||||
|
||||
// nodes contain all nodes that should be considered
|
||||
nodes []*api.Node
|
||||
nodes []*v1.Node
|
||||
// counts store the mapping from node name to so-far computed score of
|
||||
// the node.
|
||||
counts map[string]float64
|
||||
@@ -67,7 +67,7 @@ type podAffinityPriorityMap struct {
|
||||
firstError error
|
||||
}
|
||||
|
||||
func newPodAffinityPriorityMap(nodes []*api.Node, failureDomains priorityutil.Topologies) *podAffinityPriorityMap {
|
||||
func newPodAffinityPriorityMap(nodes []*v1.Node, failureDomains priorityutil.Topologies) *podAffinityPriorityMap {
|
||||
return &podAffinityPriorityMap{
|
||||
nodes: nodes,
|
||||
counts: make(map[string]float64, len(nodes)),
|
||||
@@ -83,7 +83,7 @@ func (p *podAffinityPriorityMap) setError(err error) {
|
||||
}
|
||||
}
|
||||
|
||||
func (p *podAffinityPriorityMap) processTerm(term *api.PodAffinityTerm, podDefiningAffinityTerm, podToCheck *api.Pod, fixedNode *api.Node, weight float64) {
|
||||
func (p *podAffinityPriorityMap) processTerm(term *v1.PodAffinityTerm, podDefiningAffinityTerm, podToCheck *v1.Pod, fixedNode *v1.Node, weight float64) {
|
||||
match, err := priorityutil.PodMatchesTermsNamespaceAndSelector(podToCheck, podDefiningAffinityTerm, term)
|
||||
if err != nil {
|
||||
p.setError(err)
|
||||
@@ -102,7 +102,7 @@ func (p *podAffinityPriorityMap) processTerm(term *api.PodAffinityTerm, podDefin
|
||||
}
|
||||
}
|
||||
|
||||
func (p *podAffinityPriorityMap) processTerms(terms []api.WeightedPodAffinityTerm, podDefiningAffinityTerm, podToCheck *api.Pod, fixedNode *api.Node, multiplier int) {
|
||||
func (p *podAffinityPriorityMap) processTerms(terms []v1.WeightedPodAffinityTerm, podDefiningAffinityTerm, podToCheck *v1.Pod, fixedNode *v1.Node, multiplier int) {
|
||||
for i := range terms {
|
||||
term := &terms[i]
|
||||
p.processTerm(&term.PodAffinityTerm, podDefiningAffinityTerm, podToCheck, fixedNode, float64(term.Weight*int32(multiplier)))
|
||||
@@ -114,8 +114,8 @@ func (p *podAffinityPriorityMap) processTerms(terms []api.WeightedPodAffinityTer
|
||||
// that node; the node(s) with the highest sum are the most preferred.
|
||||
// Symmetry need to be considered for preferredDuringSchedulingIgnoredDuringExecution from podAffinity & podAntiAffinity,
|
||||
// symmetry need to be considered for hard requirements from podAffinity
|
||||
func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) {
|
||||
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||
func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||
affinity, err := v1.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -134,12 +134,12 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nod
|
||||
// the node.
|
||||
pm := newPodAffinityPriorityMap(nodes, ipa.failureDomains)
|
||||
|
||||
processPod := func(existingPod *api.Pod) error {
|
||||
processPod := func(existingPod *v1.Pod) error {
|
||||
existingPodNode, err := ipa.info.GetNodeInfo(existingPod.Spec.NodeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
existingPodAffinity, err := api.GetAffinityFromPodAnnotations(existingPod.Annotations)
|
||||
existingPodAffinity, err := v1.GetAffinityFromPodAnnotations(existingPod.Annotations)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -22,17 +22,17 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
|
||||
type FakeNodeListInfo []*api.Node
|
||||
type FakeNodeListInfo []*v1.Node
|
||||
|
||||
func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*api.Node, error) {
|
||||
func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
|
||||
for _, node := range nodes {
|
||||
if node.Name == nodeName {
|
||||
return node, nil
|
||||
@@ -66,7 +66,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
}
|
||||
// considered only preferredDuringSchedulingIgnoredDuringExecution in pod affinity
|
||||
stayWithS1InRegion := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 5,
|
||||
@@ -85,7 +85,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
}}`,
|
||||
}
|
||||
stayWithS2InRegion := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 6,
|
||||
@@ -104,7 +104,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
}}`,
|
||||
}
|
||||
affinity3 := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [
|
||||
{
|
||||
@@ -144,7 +144,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
}}`,
|
||||
}
|
||||
hardAffinity := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [
|
||||
{
|
||||
@@ -174,7 +174,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
}}`,
|
||||
}
|
||||
awayFromS1InAz := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"podAntiAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 5,
|
||||
@@ -194,7 +194,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
}
|
||||
// to stay away from security S2 in any az.
|
||||
awayFromS2InAz := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"podAntiAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 5,
|
||||
@@ -214,7 +214,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
}
|
||||
// to stay with security S1 in same region, stay away from security S2 in any az.
|
||||
stayWithS1InRegionAwayFromS2InAz := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 8,
|
||||
@@ -250,18 +250,18 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []*api.Node
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: map[string]string{}}},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: map[string]string{}}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
test: "all machines are same priority as Affinity is nil",
|
||||
@@ -270,16 +270,16 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
// the node(machine3) that don't have the label {"region": "whatever the value is"} (mismatch the topology key) but that have existing pods that match the labelSelector get low score
|
||||
// the node(machine2) that have the label {"region": "China"} (match the topology key) but that have existing pods that mismatch the labelSelector get low score
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
test: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" +
|
||||
@@ -290,14 +290,14 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
// the node3(machine3) that have the label {"region": "India"}, match the topology key but have a different label value, don't have existing pods that match the labelSelector,
|
||||
// get a low score.
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Annotations: stayWithS1InRegion}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Annotations: stayWithS1InRegion}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||
test: "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score",
|
||||
@@ -307,37 +307,37 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
// Then, nodes in regionChina get higher score than nodes in regionIndia, and all the nodes in regionChina should get a same score(high score),
|
||||
// while all the nodes in regionIndia should get another same score(low score).
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS2InRegion}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS2InRegion}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine5"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelRgChina}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 5}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 5}},
|
||||
test: "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score",
|
||||
},
|
||||
// Test with the different operators and values for pod affinity scheduling preference, including some match failures.
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: affinity3}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: affinity3}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||
test: "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ",
|
||||
@@ -345,29 +345,29 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
// Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods,
|
||||
// but the existing pods have the inter pod affinity preference while the pod to schedule satisfy the preference.
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: stayWithS2InRegion}},
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2, Annotations: stayWithS2InRegion}},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||
test: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: hardAffinity}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: hardAffinity}},
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: hardAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2, Annotations: hardAffinity}},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||
test: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
||||
@@ -380,69 +380,69 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
// there are 2 nodes, say node1 and node2, both nodes have pods that match the labelSelector and have topology-key in node.Labels.
|
||||
// But there are more pods on node1 that match the preference than node2. Then, node1 get a lower score than node2.
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
||||
test: "Anti Affinity: pod that doesnot match existing pods in node will get high score ",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
||||
test: "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
||||
test: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score",
|
||||
},
|
||||
// Test the symmetry cases for anti affinity
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS2InAz}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: awayFromS1InAz}},
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS2InAz}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2, Annotations: awayFromS1InAz}},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz2}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAz2}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
||||
test: "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score",
|
||||
},
|
||||
// Test both affinity and anti-affinity
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
||||
test: "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity",
|
||||
@@ -452,22 +452,22 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
// so that all the pods of a RC/service can stay in a same region but trying to separate with each other
|
||||
// machine-1,machine-3,machine-4 are in ChinaRegion others machin-2,machine-5 are in IndiaRegion
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine5"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChinaAzAz1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelRgChina}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 4}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 4}},
|
||||
test: "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels",
|
||||
@@ -478,18 +478,18 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
// for Affinity symmetry, the weights are: 0, 0, 8, 0
|
||||
// for Anti Affinity symmetry, the weights are: 0, 0, 0, -5
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||
{Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Annotations: awayFromS1InAz}},
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: v1.ObjectMeta{Annotations: awayFromS1InAz}},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labelAzAz2}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine4", Labels: labelAzAz2}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 0}},
|
||||
test: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry",
|
||||
@@ -501,8 +501,8 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
||||
info: FakeNodeListInfo(test.nodes),
|
||||
nodeLister: algorithm.FakeNodeLister(test.nodes),
|
||||
podLister: algorithm.FakePodLister(test.pods),
|
||||
hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight,
|
||||
failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")},
|
||||
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(v1.DefaultFailureDomains, ",")},
|
||||
}
|
||||
list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes)
|
||||
if err != nil {
|
||||
@@ -528,7 +528,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
"az": "az1",
|
||||
}
|
||||
hardPodAffinity := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"podAffinity": {
|
||||
"requiredDuringSchedulingIgnoredDuringExecution": [
|
||||
{
|
||||
@@ -546,38 +546,38 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
||||
}}`,
|
||||
}
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []*api.Node
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
hardPodAffinityWeight int
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelServiceS1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelServiceS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight,
|
||||
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||
test: "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelServiceS1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelServiceS1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Annotations: hardPodAffinity}},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||
},
|
||||
hardPodAffinityWeight: 0,
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
@@ -613,7 +613,7 @@ func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) {
|
||||
"security": "S1",
|
||||
}
|
||||
antiAffinity1 := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"podAntiAffinity": {
|
||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||
"weight": 5,
|
||||
@@ -632,36 +632,36 @@ func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) {
|
||||
}}`,
|
||||
}
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []*api.Node
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
failureDomains priorityutil.Topologies
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabel1, Annotations: antiAffinity1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1, Annotations: antiAffinity1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1}},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}},
|
||||
},
|
||||
failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")},
|
||||
failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(v1.DefaultFailureDomains, ",")},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
||||
test: "Soft Pod Anti Affinity: when the topologyKey is emtpy, match among topologyKeys indicated by failure domains.",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabel1, Annotations: antiAffinity1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
|
||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
|
||||
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1, Annotations: antiAffinity1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1}},
|
||||
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1}},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}},
|
||||
},
|
||||
failureDomains: priorityutil.Topologies{},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||
@@ -674,7 +674,7 @@ func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) {
|
||||
info: FakeNodeListInfo(test.nodes),
|
||||
nodeLister: algorithm.FakeNodeLister(test.nodes),
|
||||
podLister: algorithm.FakePodLister(test.pods),
|
||||
hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight,
|
||||
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||
failureDomains: test.failureDomains,
|
||||
}
|
||||
list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes)
|
||||
|
@@ -19,7 +19,7 @@ package priorities
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
|
||||
// based on the minimum of the average of the fraction of requested to capacity.
|
||||
// Details: cpu((capacity - sum(requested)) * 10 / capacity) + memory((capacity - sum(requested)) * 10 / capacity) / 2
|
||||
func LeastRequestedPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
func LeastRequestedPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
var nonZeroRequest *schedulercache.Resource
|
||||
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
||||
nonZeroRequest = priorityMeta.nonZeroRequest
|
||||
@@ -59,7 +59,7 @@ func calculateUnusedScore(requested int64, capacity int64, node string) int64 {
|
||||
// Calculates host priority based on the amount of unused resources.
|
||||
// 'node' has information about the resources on the node.
|
||||
// 'pods' is a list of pods currently scheduled on the node.
|
||||
func calculateUnusedPriority(pod *api.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
func calculateUnusedPriority(pod *v1.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||
|
@@ -20,8 +20,8 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
@@ -35,29 +35,29 @@ func TestLeastRequested(t *testing.T) {
|
||||
"bar": "foo",
|
||||
"baz": "blah",
|
||||
}
|
||||
machine1Spec := api.PodSpec{
|
||||
machine1Spec := v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
}
|
||||
machine2Spec := api.PodSpec{
|
||||
machine2Spec := v1.PodSpec{
|
||||
NodeName: "machine2",
|
||||
}
|
||||
noResources := api.PodSpec{
|
||||
Containers: []api.Container{},
|
||||
noResources := v1.PodSpec{
|
||||
Containers: []v1.Container{},
|
||||
}
|
||||
cpuOnly := api.PodSpec{
|
||||
cpuOnly := v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
Containers: []api.Container{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse("1000m"),
|
||||
"memory": resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse("2000m"),
|
||||
"memory": resource.MustParse("0"),
|
||||
},
|
||||
@@ -67,20 +67,20 @@ func TestLeastRequested(t *testing.T) {
|
||||
}
|
||||
cpuOnly2 := cpuOnly
|
||||
cpuOnly2.NodeName = "machine2"
|
||||
cpuAndMemory := api.PodSpec{
|
||||
cpuAndMemory := v1.PodSpec{
|
||||
NodeName: "machine2",
|
||||
Containers: []api.Container{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse("1000m"),
|
||||
"memory": resource.MustParse("2000"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse("2000m"),
|
||||
"memory": resource.MustParse("3000"),
|
||||
},
|
||||
@@ -89,9 +89,9 @@ func TestLeastRequested(t *testing.T) {
|
||||
},
|
||||
}
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []*api.Node
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
@@ -107,8 +107,8 @@ func TestLeastRequested(t *testing.T) {
|
||||
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
||||
Node2 Score: (10 + 10) / 2 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||
test: "nothing scheduled, nothing requested",
|
||||
},
|
||||
@@ -124,8 +124,8 @@ func TestLeastRequested(t *testing.T) {
|
||||
Memory Score: ((10000 - 5000) *10) / 10000 = 5
|
||||
Node2 Score: (5 + 5) / 2 = 5
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 5}},
|
||||
test: "nothing scheduled, resources requested, differently sized machines",
|
||||
},
|
||||
@@ -141,15 +141,15 @@ func TestLeastRequested(t *testing.T) {
|
||||
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
||||
Node2 Score: (10 + 10) / 2 = 10
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||
test: "no resources requested, pods scheduled",
|
||||
pods: []*api.Pod{
|
||||
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: machine1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: machine1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: machine2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: machine2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -164,15 +164,15 @@ func TestLeastRequested(t *testing.T) {
|
||||
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
|
||||
Node2 Score: (4 + 7.5) / 2 = 5
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: 5}},
|
||||
test: "no resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuAndMemory, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: cpuOnly, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuOnly2, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuAndMemory, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -187,11 +187,11 @@ func TestLeastRequested(t *testing.T) {
|
||||
Memory Score: ((20000 - 10000) *10) / 20000 = 5
|
||||
Node2 Score: (4 + 5) / 2 = 4
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 4}},
|
||||
test: "resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
},
|
||||
@@ -208,11 +208,11 @@ func TestLeastRequested(t *testing.T) {
|
||||
Memory Score: ((50000 - 10000) *10) / 50000 = 8
|
||||
Node2 Score: (4 + 8) / 2 = 6
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 6}},
|
||||
test: "resources requested, pods scheduled with resources, differently sized machines",
|
||||
pods: []*api.Pod{
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
},
|
||||
@@ -229,21 +229,21 @@ func TestLeastRequested(t *testing.T) {
|
||||
Memory Score: ((10000 - 5000) *10) / 10000 = 5
|
||||
Node2 Score: (0 + 5) / 2 = 2
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuOnly},
|
||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
pod: &v1.Pod{Spec: cpuOnly},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 2}},
|
||||
test: "requested resources exceed node capacity",
|
||||
pods: []*api.Pod{
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
},
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []*api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||
test: "zero node resources, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
},
|
||||
|
@@ -17,19 +17,19 @@ limitations under the License.
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
|
||||
// priorityMetadata is a type that is passed as metadata for priority functions
|
||||
type priorityMetadata struct {
|
||||
nonZeroRequest *schedulercache.Resource
|
||||
podTolerations []api.Toleration
|
||||
affinity *api.Affinity
|
||||
podTolerations []v1.Toleration
|
||||
affinity *v1.Affinity
|
||||
}
|
||||
|
||||
// PriorityMetadata is a MetadataProducer. Node info can be nil.
|
||||
func PriorityMetadata(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} {
|
||||
func PriorityMetadata(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} {
|
||||
// If we cannot compute metadata, just return nil
|
||||
if pod == nil {
|
||||
return nil
|
||||
@@ -38,7 +38,7 @@ func PriorityMetadata(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.No
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||
affinity, err := v1.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@@ -19,7 +19,7 @@ package priorities
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
|
||||
@@ -30,7 +30,7 @@ import (
|
||||
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
|
||||
// based on the maximum of the average of the fraction of requested to capacity.
|
||||
// Details: (cpu(10 * sum(requested) / capacity) + memory(10 * sum(requested) / capacity)) / 2
|
||||
func MostRequestedPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
func MostRequestedPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
var nonZeroRequest *schedulercache.Resource
|
||||
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
||||
nonZeroRequest = priorityMeta.nonZeroRequest
|
||||
@@ -62,7 +62,7 @@ func calculateUsedScore(requested int64, capacity int64, node string) int64 {
|
||||
|
||||
// Calculate the resource used on a node. 'node' has information about the resources on the node.
|
||||
// 'pods' is a list of pods currently scheduled on the node.
|
||||
func calculateUsedPriority(pod *api.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
func calculateUsedPriority(pod *v1.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||
|
@@ -20,8 +20,8 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
@@ -35,23 +35,23 @@ func TestMostRequested(t *testing.T) {
|
||||
"bar": "foo",
|
||||
"baz": "blah",
|
||||
}
|
||||
noResources := api.PodSpec{
|
||||
Containers: []api.Container{},
|
||||
noResources := v1.PodSpec{
|
||||
Containers: []v1.Container{},
|
||||
}
|
||||
cpuOnly := api.PodSpec{
|
||||
cpuOnly := v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
Containers: []api.Container{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse("1000m"),
|
||||
"memory": resource.MustParse("0"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse("2000m"),
|
||||
"memory": resource.MustParse("0"),
|
||||
},
|
||||
@@ -61,20 +61,20 @@ func TestMostRequested(t *testing.T) {
|
||||
}
|
||||
cpuOnly2 := cpuOnly
|
||||
cpuOnly2.NodeName = "machine2"
|
||||
cpuAndMemory := api.PodSpec{
|
||||
cpuAndMemory := v1.PodSpec{
|
||||
NodeName: "machine2",
|
||||
Containers: []api.Container{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse("1000m"),
|
||||
"memory": resource.MustParse("2000"),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Resources: api.ResourceRequirements{
|
||||
Requests: api.ResourceList{
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
"cpu": resource.MustParse("2000m"),
|
||||
"memory": resource.MustParse("3000"),
|
||||
},
|
||||
@@ -83,9 +83,9 @@ func TestMostRequested(t *testing.T) {
|
||||
},
|
||||
}
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
nodes []*api.Node
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
@@ -101,8 +101,8 @@ func TestMostRequested(t *testing.T) {
|
||||
Memory Score: (0 * 10 / 10000 = 0
|
||||
Node2 Score: (0 + 0) / 2 = 0
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||
test: "nothing scheduled, nothing requested",
|
||||
},
|
||||
@@ -118,8 +118,8 @@ func TestMostRequested(t *testing.T) {
|
||||
Memory Score: (5000 * 10 / 10000 = 5
|
||||
Node2 Score: (5 + 5) / 2 = 5
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 5}},
|
||||
test: "nothing scheduled, resources requested, differently sized machines",
|
||||
},
|
||||
@@ -135,15 +135,15 @@ func TestMostRequested(t *testing.T) {
|
||||
Memory Score: (5000 * 10) / 20000 = 2.5
|
||||
Node2 Score: (6 + 2.5) / 2 = 4
|
||||
*/
|
||||
pod: &api.Pod{Spec: noResources},
|
||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
pod: &v1.Pod{Spec: noResources},
|
||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 4}},
|
||||
test: "no resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuAndMemory, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: cpuOnly, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuOnly2, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: cpuAndMemory, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -158,11 +158,11 @@ func TestMostRequested(t *testing.T) {
|
||||
Memory Score: (10000 * 10) / 20000 = 5
|
||||
Node2 Score: (6 + 5) / 2 = 5
|
||||
*/
|
||||
pod: &api.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 5}},
|
||||
test: "resources requested, pods scheduled with resources",
|
||||
pods: []*api.Pod{
|
||||
pods: []*v1.Pod{
|
||||
{Spec: cpuOnly},
|
||||
{Spec: cpuAndMemory},
|
||||
},
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
@@ -31,19 +31,19 @@ import (
|
||||
// it will a get an add of preferredSchedulingTerm.Weight. Thus, the more preferredSchedulingTerms
|
||||
// the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher
|
||||
// score the node gets.
|
||||
func CalculateNodeAffinityPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||
}
|
||||
|
||||
var affinity *api.Affinity
|
||||
var affinity *v1.Affinity
|
||||
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
||||
affinity = priorityMeta.affinity
|
||||
} else {
|
||||
// We couldn't parse metadata - fallback to computing it.
|
||||
var err error
|
||||
affinity, err = api.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||
affinity, err = v1.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||
if err != nil {
|
||||
return schedulerapi.HostPriority{}, err
|
||||
}
|
||||
@@ -62,7 +62,7 @@ func CalculateNodeAffinityPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *
|
||||
}
|
||||
|
||||
// TODO: Avoid computing it for all nodes if this becomes a performance problem.
|
||||
nodeSelector, err := api.NodeSelectorRequirementsAsSelector(preferredSchedulingTerm.Preference.MatchExpressions)
|
||||
nodeSelector, err := v1.NodeSelectorRequirementsAsSelector(preferredSchedulingTerm.Preference.MatchExpressions)
|
||||
if err != nil {
|
||||
return schedulerapi.HostPriority{}, err
|
||||
}
|
||||
@@ -78,7 +78,7 @@ func CalculateNodeAffinityPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *
|
||||
}, nil
|
||||
}
|
||||
|
||||
func CalculateNodeAffinityPriorityReduce(pod *api.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error {
|
||||
func CalculateNodeAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error {
|
||||
var maxCount int
|
||||
for i := range result {
|
||||
if result[i].Score > maxCount {
|
||||
|
@@ -20,7 +20,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
@@ -33,7 +33,7 @@ func TestNodeAffinityPriority(t *testing.T) {
|
||||
label5 := map[string]string{"foo": "bar", "key": "value", "az": "az1"}
|
||||
|
||||
affinity1 := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"nodeAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [
|
||||
{
|
||||
"weight": 2,
|
||||
@@ -50,7 +50,7 @@ func TestNodeAffinityPriority(t *testing.T) {
|
||||
}
|
||||
|
||||
affinity2 := map[string]string{
|
||||
api.AffinityAnnotationKey: `
|
||||
v1.AffinityAnnotationKey: `
|
||||
{"nodeAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [
|
||||
{
|
||||
"weight": 2,
|
||||
@@ -91,63 +91,63 @@ func TestNodeAffinityPriority(t *testing.T) {
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
nodes []*api.Node
|
||||
pod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
test: "all machines are same priority as NodeAffinity is nil",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Annotations: affinity1,
|
||||
},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label4}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label4}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
test: "no machine macthes preferred scheduling requirements in NodeAffinity of pod so all machines' priority is zero",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Annotations: affinity1,
|
||||
},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
test: "only machine1 matches the preferred scheduling requirements of pod",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Annotations: affinity2,
|
||||
},
|
||||
},
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: label5}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine5", Labels: label5}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine5", Score: 10}, {Host: "machine2", Score: 3}},
|
||||
test: "all machines matches the preferred scheduling requirements of pod but with different priorities ",
|
||||
|
@@ -19,7 +19,7 @@ package priorities
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
@@ -42,7 +42,7 @@ func NewNodeLabelPriority(label string, presence bool) (algorithm.PriorityMapFun
|
||||
// CalculateNodeLabelPriority checks whether a particular label exists on a node or not, regardless of its value.
|
||||
// If presence is true, prioritizes nodes that have the specified label, regardless of value.
|
||||
// If presence is false, prioritizes nodes that do not have the specified label.
|
||||
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||
|
@@ -21,7 +21,7 @@ import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
@@ -31,17 +31,17 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
label2 := map[string]string{"bar": "foo"}
|
||||
label3 := map[string]string{"bar": "baz"}
|
||||
tests := []struct {
|
||||
nodes []*api.Node
|
||||
nodes []*v1.Node
|
||||
label string
|
||||
presence bool
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
label: "baz",
|
||||
@@ -49,10 +49,10 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
test: "no match found, presence true",
|
||||
},
|
||||
{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
|
||||
label: "baz",
|
||||
@@ -60,10 +60,10 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
test: "no match found, presence false",
|
||||
},
|
||||
{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
label: "foo",
|
||||
@@ -71,10 +71,10 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
test: "one match found, presence true",
|
||||
},
|
||||
{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
|
||||
label: "foo",
|
||||
@@ -82,10 +82,10 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
test: "one match found, presence false",
|
||||
},
|
||||
{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
|
||||
label: "bar",
|
||||
@@ -93,10 +93,10 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
||||
test: "two matches found, presence true",
|
||||
},
|
||||
{
|
||||
nodes: []*api.Node{
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
nodes: []*v1.Node{
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||
label: "bar",
|
||||
|
@@ -19,13 +19,13 @@ package priorities
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
|
||||
func CalculateNodePreferAvoidPodsPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||
@@ -43,7 +43,7 @@ func CalculateNodePreferAvoidPodsPriorityMap(pod *api.Pod, meta interface{}, nod
|
||||
return schedulerapi.HostPriority{Host: node.Name, Score: 10}, nil
|
||||
}
|
||||
|
||||
avoids, err := api.GetAvoidPodsFromNodeAnnotations(node.Annotations)
|
||||
avoids, err := v1.GetAvoidPodsFromNodeAnnotations(node.Annotations)
|
||||
if err != nil {
|
||||
// If we cannot get annotation, assume it's schedulable there.
|
||||
return schedulerapi.HostPriority{Host: node.Name, Score: 10}, nil
|
||||
|
@@ -21,14 +21,14 @@ import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
|
||||
func TestNodePreferAvoidPriority(t *testing.T) {
|
||||
annotations1 := map[string]string{
|
||||
api.PreferAvoidPodsAnnotationKey: `
|
||||
v1.PreferAvoidPodsAnnotationKey: `
|
||||
{
|
||||
"preferAvoidPods": [
|
||||
{
|
||||
@@ -48,7 +48,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
|
||||
}`,
|
||||
}
|
||||
annotations2 := map[string]string{
|
||||
api.PreferAvoidPodsAnnotationKey: `
|
||||
v1.PreferAvoidPodsAnnotationKey: `
|
||||
{
|
||||
"preferAvoidPods": [
|
||||
{
|
||||
@@ -67,29 +67,29 @@ func TestNodePreferAvoidPriority(t *testing.T) {
|
||||
]
|
||||
}`,
|
||||
}
|
||||
testNodes := []*api.Node{
|
||||
testNodes := []*v1.Node{
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "machine1", Annotations: annotations1},
|
||||
ObjectMeta: v1.ObjectMeta{Name: "machine1", Annotations: annotations1},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "machine2", Annotations: annotations2},
|
||||
ObjectMeta: v1.ObjectMeta{Name: "machine2", Annotations: annotations2},
|
||||
},
|
||||
{
|
||||
ObjectMeta: api.ObjectMeta{Name: "machine3"},
|
||||
ObjectMeta: v1.ObjectMeta{Name: "machine3"},
|
||||
},
|
||||
}
|
||||
trueVar := true
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
nodes []*api.Node
|
||||
pod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
OwnerReferences: []api.OwnerReference{
|
||||
OwnerReferences: []v1.OwnerReference{
|
||||
{Kind: "ReplicationController", Name: "foo", UID: "abcdef123456", Controller: &trueVar},
|
||||
},
|
||||
},
|
||||
@@ -99,10 +99,10 @@ func TestNodePreferAvoidPriority(t *testing.T) {
|
||||
test: "pod managed by ReplicationController should avoid a node, this node get lowest priority score",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
OwnerReferences: []api.OwnerReference{
|
||||
OwnerReferences: []v1.OwnerReference{
|
||||
{Kind: "RandomController", Name: "foo", UID: "abcdef123456", Controller: &trueVar},
|
||||
},
|
||||
},
|
||||
@@ -112,10 +112,10 @@ func TestNodePreferAvoidPriority(t *testing.T) {
|
||||
test: "ownership by random controller should be ignored",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
OwnerReferences: []api.OwnerReference{
|
||||
OwnerReferences: []v1.OwnerReference{
|
||||
{Kind: "ReplicationController", Name: "foo", UID: "abcdef123456"},
|
||||
},
|
||||
},
|
||||
@@ -125,10 +125,10 @@ func TestNodePreferAvoidPriority(t *testing.T) {
|
||||
test: "owner without Controller field set should be ignored",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Namespace: "default",
|
||||
OwnerReferences: []api.OwnerReference{
|
||||
OwnerReferences: []v1.OwnerReference{
|
||||
{Kind: "ReplicaSet", Name: "foo", UID: "qwert12345", Controller: &trueVar},
|
||||
},
|
||||
},
|
||||
|
@@ -20,8 +20,8 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||
@@ -57,7 +57,7 @@ func NewSelectorSpreadPriority(
|
||||
}
|
||||
|
||||
// Returns selectors of services, RCs and RSs matching the given pod.
|
||||
func getSelectors(pod *api.Pod, sl algorithm.ServiceLister, cl algorithm.ControllerLister, rsl algorithm.ReplicaSetLister) []labels.Selector {
|
||||
func getSelectors(pod *v1.Pod, sl algorithm.ServiceLister, cl algorithm.ControllerLister, rsl algorithm.ReplicaSetLister) []labels.Selector {
|
||||
selectors := make([]labels.Selector, 0, 3)
|
||||
if services, err := sl.GetPodServices(pod); err == nil {
|
||||
for _, service := range services {
|
||||
@@ -79,7 +79,7 @@ func getSelectors(pod *api.Pod, sl algorithm.ServiceLister, cl algorithm.Control
|
||||
return selectors
|
||||
}
|
||||
|
||||
func (s *SelectorSpread) getSelectors(pod *api.Pod) []labels.Selector {
|
||||
func (s *SelectorSpread) getSelectors(pod *v1.Pod) []labels.Selector {
|
||||
return getSelectors(pod, s.serviceLister, s.controllerLister, s.replicaSetLister)
|
||||
}
|
||||
|
||||
@@ -89,7 +89,7 @@ func (s *SelectorSpread) getSelectors(pod *api.Pod) []labels.Selector {
|
||||
// i.e. it pushes the scheduler towards a node where there's the smallest number of
|
||||
// pods which match the same service, RC or RS selectors as the pod being scheduled.
|
||||
// Where zone information is included on the nodes, it favors nodes in zones with fewer existing matching pods.
|
||||
func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) {
|
||||
func (s *SelectorSpread) CalculateSpreadPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||
selectors := s.getSelectors(pod)
|
||||
|
||||
// Count similar pods by node
|
||||
@@ -199,8 +199,8 @@ func NewServiceAntiAffinityPriority(podLister algorithm.PodLister, serviceLister
|
||||
// CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service
|
||||
// on machines with the same value for a particular label.
|
||||
// The label to be considered is provided to the struct (ServiceAntiAffinity).
|
||||
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) {
|
||||
var nsServicePods []*api.Pod
|
||||
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||
var nsServicePods []*v1.Pod
|
||||
if services, err := s.serviceLister.GetPodServices(pod); err == nil && len(services) > 0 {
|
||||
// just use the first service and get the other pods within the service
|
||||
// TODO: a separate predicate can be created that tries to handle all services for the pod
|
||||
|
@@ -21,19 +21,19 @@ import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
|
||||
func controllerRef(kind, name, uid string) []api.OwnerReference {
|
||||
func controllerRef(kind, name, uid string) []v1.OwnerReference {
|
||||
// TODO: When ControllerRef will be implemented uncomment code below.
|
||||
return nil
|
||||
//trueVar := true
|
||||
//return []api.OwnerReference{
|
||||
//return []v1.OwnerReference{
|
||||
// {Kind: kind, Name: name, UID: types.UID(uid), Controller: &trueVar},
|
||||
//}
|
||||
}
|
||||
@@ -47,208 +47,208 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
"bar": "foo",
|
||||
"baz": "blah",
|
||||
}
|
||||
zone1Spec := api.PodSpec{
|
||||
zone1Spec := v1.PodSpec{
|
||||
NodeName: "machine1",
|
||||
}
|
||||
zone2Spec := api.PodSpec{
|
||||
zone2Spec := v1.PodSpec{
|
||||
NodeName: "machine2",
|
||||
}
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []string
|
||||
rcs []*api.ReplicationController
|
||||
rcs []*v1.ReplicationController
|
||||
rss []*extensions.ReplicaSet
|
||||
services []*api.Service
|
||||
services []*v1.Service
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: new(api.Pod),
|
||||
pod: new(v1.Pod),
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||
test: "nothing scheduled",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{{Spec: zone1Spec}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{{Spec: zone1Spec}},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||
test: "no services",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||
test: "different services",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
||||
test: "two pods, one service pod",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
||||
test: "five pods, one service pod in no namespace",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: v1.ObjectMeta{Namespace: v1.NamespaceDefault}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
||||
test: "four pods, one service pod in default namespace",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns2"}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: "ns2"}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: v1.ObjectMeta{Namespace: "ns1"}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
||||
test: "five pods, one service pod in specific namespace",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||
test: "three pods, two service pods on different machines",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 0}},
|
||||
test: "four pods, three service pods",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||
test: "service with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
rcs: []*api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
// "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to
|
||||
// do spreading between all pods. The result should be exactly as above.
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||
test: "service with partial pod label matches with service and replication controller",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||
test: "service with partial pod label matches with service and replica set",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
rcs: []*api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
||||
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
||||
// Taken together Service and Replication Controller should match all Pods, hence result should be equal to one above.
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||
test: "disjoined service and replication controller should be treated equally",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
||||
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||
test: "disjoined service and replica set should be treated equally",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
rcs: []*api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
||||
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
||||
// Both Nodes have one pod from the given RC, hence both get 0 score.
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||
test: "Replication controller with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||
@@ -257,23 +257,23 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
test: "Replica set with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
rcs: []*api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||
test: "Another replication controller with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||
},
|
||||
nodes: []string{"machine1", "machine2"},
|
||||
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
|
||||
@@ -300,10 +300,10 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func buildPod(nodeName string, labels map[string]string, ownerRefs []api.OwnerReference) *api.Pod {
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{Labels: labels, OwnerReferences: ownerRefs},
|
||||
Spec: api.PodSpec{NodeName: nodeName},
|
||||
func buildPod(nodeName string, labels map[string]string, ownerRefs []v1.OwnerReference) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{Labels: labels, OwnerReferences: ownerRefs},
|
||||
Spec: v1.PodSpec{NodeName: nodeName},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -340,17 +340,17 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes []string
|
||||
rcs []*api.ReplicationController
|
||||
rcs []*v1.ReplicationController
|
||||
rss []*extensions.ReplicaSet
|
||||
services []*api.Service
|
||||
services []*v1.Service
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: new(api.Pod),
|
||||
pod: new(v1.Pod),
|
||||
expectedList: []schedulerapi.HostPriority{
|
||||
{Host: nodeMachine1Zone1, Score: 10},
|
||||
{Host: nodeMachine1Zone2, Score: 10},
|
||||
@@ -363,7 +363,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
||||
},
|
||||
{
|
||||
pod: buildPod("", labels1, nil),
|
||||
pods: []*api.Pod{buildPod(nodeMachine1Zone1, nil, nil)},
|
||||
pods: []*v1.Pod{buildPod(nodeMachine1Zone1, nil, nil)},
|
||||
expectedList: []schedulerapi.HostPriority{
|
||||
{Host: nodeMachine1Zone1, Score: 10},
|
||||
{Host: nodeMachine1Zone2, Score: 10},
|
||||
@@ -376,8 +376,8 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
||||
},
|
||||
{
|
||||
pod: buildPod("", labels1, nil),
|
||||
pods: []*api.Pod{buildPod(nodeMachine1Zone1, labels2, nil)},
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||
pods: []*v1.Pod{buildPod(nodeMachine1Zone1, labels2, nil)},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||
expectedList: []schedulerapi.HostPriority{
|
||||
{Host: nodeMachine1Zone1, Score: 10},
|
||||
{Host: nodeMachine1Zone2, Score: 10},
|
||||
@@ -390,11 +390,11 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
||||
},
|
||||
{
|
||||
pod: buildPod("", labels1, nil),
|
||||
pods: []*api.Pod{
|
||||
pods: []*v1.Pod{
|
||||
buildPod(nodeMachine1Zone1, labels2, nil),
|
||||
buildPod(nodeMachine1Zone2, labels1, nil),
|
||||
},
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []schedulerapi.HostPriority{
|
||||
{Host: nodeMachine1Zone1, Score: 10},
|
||||
{Host: nodeMachine1Zone2, Score: 0}, // Already have pod on machine
|
||||
@@ -407,14 +407,14 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
||||
},
|
||||
{
|
||||
pod: buildPod("", labels1, nil),
|
||||
pods: []*api.Pod{
|
||||
pods: []*v1.Pod{
|
||||
buildPod(nodeMachine1Zone1, labels2, nil),
|
||||
buildPod(nodeMachine1Zone2, labels1, nil),
|
||||
buildPod(nodeMachine2Zone2, labels1, nil),
|
||||
buildPod(nodeMachine1Zone3, labels2, nil),
|
||||
buildPod(nodeMachine2Zone3, labels1, nil),
|
||||
},
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []schedulerapi.HostPriority{
|
||||
{Host: nodeMachine1Zone1, Score: 10},
|
||||
{Host: nodeMachine1Zone2, Score: 0}, // Pod on node
|
||||
@@ -427,13 +427,13 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
||||
},
|
||||
{
|
||||
pod: buildPod("", labels1, nil),
|
||||
pods: []*api.Pod{
|
||||
pods: []*v1.Pod{
|
||||
buildPod(nodeMachine1Zone1, labels1, nil),
|
||||
buildPod(nodeMachine1Zone2, labels1, nil),
|
||||
buildPod(nodeMachine2Zone2, labels2, nil),
|
||||
buildPod(nodeMachine1Zone3, labels1, nil),
|
||||
},
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []schedulerapi.HostPriority{
|
||||
{Host: nodeMachine1Zone1, Score: 0}, // Pod on node
|
||||
{Host: nodeMachine1Zone2, Score: 0}, // Pod on node
|
||||
@@ -446,13 +446,13 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
||||
},
|
||||
{
|
||||
pod: buildPod("", labels1, nil),
|
||||
pods: []*api.Pod{
|
||||
pods: []*v1.Pod{
|
||||
buildPod(nodeMachine1Zone1, labels1, nil),
|
||||
buildPod(nodeMachine1Zone2, labels1, nil),
|
||||
buildPod(nodeMachine1Zone3, labels1, nil),
|
||||
buildPod(nodeMachine2Zone2, labels2, nil),
|
||||
},
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []schedulerapi.HostPriority{
|
||||
{Host: nodeMachine1Zone1, Score: 0}, // Pod on node
|
||||
{Host: nodeMachine1Zone2, Score: 0}, // Pod on node
|
||||
@@ -465,12 +465,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
||||
},
|
||||
{
|
||||
pod: buildPod("", labels1, controllerRef("ReplicationController", "name", "abc123")),
|
||||
pods: []*api.Pod{
|
||||
pods: []*v1.Pod{
|
||||
buildPod(nodeMachine1Zone3, labels1, controllerRef("ReplicationController", "name", "abc123")),
|
||||
buildPod(nodeMachine1Zone2, labels1, controllerRef("ReplicationController", "name", "abc123")),
|
||||
buildPod(nodeMachine1Zone3, labels1, controllerRef("ReplicationController", "name", "abc123")),
|
||||
},
|
||||
rcs: []*api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: labels1}}},
|
||||
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: labels1}}},
|
||||
expectedList: []schedulerapi.HostPriority{
|
||||
// Note that because we put two pods on the same node (nodeMachine1Zone3),
|
||||
// the values here are questionable for zone2, in particular for nodeMachine1Zone2.
|
||||
@@ -528,13 +528,13 @@ func TestZoneSpreadPriority(t *testing.T) {
|
||||
nozone := map[string]string{
|
||||
"name": "value",
|
||||
}
|
||||
zone0Spec := api.PodSpec{
|
||||
zone0Spec := v1.PodSpec{
|
||||
NodeName: "machine01",
|
||||
}
|
||||
zone1Spec := api.PodSpec{
|
||||
zone1Spec := v1.PodSpec{
|
||||
NodeName: "machine11",
|
||||
}
|
||||
zone2Spec := api.PodSpec{
|
||||
zone2Spec := v1.PodSpec{
|
||||
NodeName: "machine21",
|
||||
}
|
||||
labeledNodes := map[string]map[string]string{
|
||||
@@ -543,15 +543,15 @@ func TestZoneSpreadPriority(t *testing.T) {
|
||||
"machine21": zone2, "machine22": zone2,
|
||||
}
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
pods []*api.Pod
|
||||
pod *v1.Pod
|
||||
pods []*v1.Pod
|
||||
nodes map[string]map[string]string
|
||||
services []*api.Service
|
||||
services []*v1.Service
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
{
|
||||
pod: new(api.Pod),
|
||||
pod: new(v1.Pod),
|
||||
nodes: labeledNodes,
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
|
||||
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
|
||||
@@ -559,8 +559,8 @@ func TestZoneSpreadPriority(t *testing.T) {
|
||||
test: "nothing scheduled",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{{Spec: zone1Spec}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{{Spec: zone1Spec}},
|
||||
nodes: labeledNodes,
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
|
||||
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
|
||||
@@ -568,97 +568,97 @@ func TestZoneSpreadPriority(t *testing.T) {
|
||||
test: "no services",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}},
|
||||
nodes: labeledNodes,
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
|
||||
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
|
||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||
test: "different services",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone0Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone0Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
|
||||
{Host: "machine21", Score: 0}, {Host: "machine22", Score: 0},
|
||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||
test: "three pods, one service pod",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 5}, {Host: "machine12", Score: 5},
|
||||
{Host: "machine21", Score: 5}, {Host: "machine22", Score: 5},
|
||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||
test: "three pods, two service pods on different machines",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: v1.ObjectMeta{Namespace: v1.NamespaceDefault}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 0}, {Host: "machine12", Score: 0},
|
||||
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
|
||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||
test: "three service label match pods in different namespaces",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 6}, {Host: "machine12", Score: 6},
|
||||
{Host: "machine21", Score: 3}, {Host: "machine22", Score: 3},
|
||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||
test: "four pods, three service pods",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 3}, {Host: "machine12", Score: 3},
|
||||
{Host: "machine21", Score: 6}, {Host: "machine22", Score: 6},
|
||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||
test: "service with partial pod label matches",
|
||||
},
|
||||
{
|
||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pods: []*api.Pod{
|
||||
{Spec: zone0Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
||||
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
pods: []*v1.Pod{
|
||||
{Spec: zone0Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||
},
|
||||
nodes: labeledNodes,
|
||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
||||
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 7}, {Host: "machine12", Score: 7},
|
||||
{Host: "machine21", Score: 5}, {Host: "machine22", Score: 5},
|
||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||
@@ -682,18 +682,18 @@ func TestZoneSpreadPriority(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func makeLabeledNodeList(nodeMap map[string]map[string]string) []*api.Node {
|
||||
nodes := make([]*api.Node, 0, len(nodeMap))
|
||||
func makeLabeledNodeList(nodeMap map[string]map[string]string) []*v1.Node {
|
||||
nodes := make([]*v1.Node, 0, len(nodeMap))
|
||||
for nodeName, labels := range nodeMap {
|
||||
nodes = append(nodes, &api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName, Labels: labels}})
|
||||
nodes = append(nodes, &v1.Node{ObjectMeta: v1.ObjectMeta{Name: nodeName, Labels: labels}})
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
||||
func makeNodeList(nodeNames []string) []*api.Node {
|
||||
nodes := make([]*api.Node, 0, len(nodeNames))
|
||||
func makeNodeList(nodeNames []string) []*v1.Node {
|
||||
nodes := make([]*v1.Node, 0, len(nodeNames))
|
||||
for _, nodeName := range nodeNames {
|
||||
nodes = append(nodes, &api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName}})
|
||||
nodes = append(nodes, &v1.Node{ObjectMeta: v1.ObjectMeta{Name: nodeName}})
|
||||
}
|
||||
return nodes
|
||||
}
|
||||
|
@@ -20,21 +20,21 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
|
||||
// CountIntolerableTaintsPreferNoSchedule gives the count of intolerable taints of a pod with effect PreferNoSchedule
|
||||
func countIntolerableTaintsPreferNoSchedule(taints []api.Taint, tolerations []api.Toleration) (intolerableTaints int) {
|
||||
func countIntolerableTaintsPreferNoSchedule(taints []v1.Taint, tolerations []v1.Toleration) (intolerableTaints int) {
|
||||
for i := range taints {
|
||||
taint := &taints[i]
|
||||
// check only on taints that have effect PreferNoSchedule
|
||||
if taint.Effect != api.TaintEffectPreferNoSchedule {
|
||||
if taint.Effect != v1.TaintEffectPreferNoSchedule {
|
||||
continue
|
||||
}
|
||||
|
||||
if !api.TaintToleratedByTolerations(taint, tolerations) {
|
||||
if !v1.TaintToleratedByTolerations(taint, tolerations) {
|
||||
intolerableTaints++
|
||||
}
|
||||
}
|
||||
@@ -42,18 +42,18 @@ func countIntolerableTaintsPreferNoSchedule(taints []api.Taint, tolerations []ap
|
||||
}
|
||||
|
||||
// getAllTolerationEffectPreferNoSchedule gets the list of all Toleration with Effect PreferNoSchedule
|
||||
func getAllTolerationPreferNoSchedule(tolerations []api.Toleration) (tolerationList []api.Toleration) {
|
||||
func getAllTolerationPreferNoSchedule(tolerations []v1.Toleration) (tolerationList []v1.Toleration) {
|
||||
for i := range tolerations {
|
||||
toleration := &tolerations[i]
|
||||
if len(toleration.Effect) == 0 || toleration.Effect == api.TaintEffectPreferNoSchedule {
|
||||
if len(toleration.Effect) == 0 || toleration.Effect == v1.TaintEffectPreferNoSchedule {
|
||||
tolerationList = append(tolerationList, *toleration)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getTolerationListFromPod(pod *api.Pod) ([]api.Toleration, error) {
|
||||
tolerations, err := api.GetTolerationsFromPodAnnotations(pod.Annotations)
|
||||
func getTolerationListFromPod(pod *v1.Pod) ([]v1.Toleration, error) {
|
||||
tolerations, err := v1.GetTolerationsFromPodAnnotations(pod.Annotations)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -61,13 +61,13 @@ func getTolerationListFromPod(pod *api.Pod) ([]api.Toleration, error) {
|
||||
}
|
||||
|
||||
// ComputeTaintTolerationPriority prepares the priority list for all the nodes based on the number of intolerable taints on the node
|
||||
func ComputeTaintTolerationPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||
node := nodeInfo.Node()
|
||||
if node == nil {
|
||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||
}
|
||||
|
||||
var tolerationList []api.Toleration
|
||||
var tolerationList []v1.Toleration
|
||||
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
||||
tolerationList = priorityMeta.podTolerations
|
||||
} else {
|
||||
@@ -78,7 +78,7 @@ func ComputeTaintTolerationPriorityMap(pod *api.Pod, meta interface{}, nodeInfo
|
||||
}
|
||||
}
|
||||
|
||||
taints, err := api.GetTaintsFromNodeAnnotations(node.Annotations)
|
||||
taints, err := v1.GetTaintsFromNodeAnnotations(node.Annotations)
|
||||
if err != nil {
|
||||
return schedulerapi.HostPriority{}, err
|
||||
}
|
||||
@@ -88,7 +88,7 @@ func ComputeTaintTolerationPriorityMap(pod *api.Pod, meta interface{}, nodeInfo
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ComputeTaintTolerationPriorityReduce(pod *api.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error {
|
||||
func ComputeTaintTolerationPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error {
|
||||
var maxCount int
|
||||
for i := range result {
|
||||
if result[i].Score > maxCount {
|
||||
|
@@ -21,29 +21,29 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
|
||||
func nodeWithTaints(nodeName string, taints []api.Taint) *api.Node {
|
||||
func nodeWithTaints(nodeName string, taints []v1.Taint) *v1.Node {
|
||||
taintsData, _ := json.Marshal(taints)
|
||||
return &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
return &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: nodeName,
|
||||
Annotations: map[string]string{
|
||||
api.TaintsAnnotationKey: string(taintsData),
|
||||
v1.TaintsAnnotationKey: string(taintsData),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func podWithTolerations(tolerations []api.Toleration) *api.Pod {
|
||||
func podWithTolerations(tolerations []v1.Toleration) *v1.Pod {
|
||||
tolerationData, _ := json.Marshal(tolerations)
|
||||
return &api.Pod{
|
||||
ObjectMeta: api.ObjectMeta{
|
||||
return &v1.Pod{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
api.TolerationsAnnotationKey: string(tolerationData),
|
||||
v1.TolerationsAnnotationKey: string(tolerationData),
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -55,30 +55,30 @@ func podWithTolerations(tolerations []api.Toleration) *api.Pod {
|
||||
|
||||
func TestTaintAndToleration(t *testing.T) {
|
||||
tests := []struct {
|
||||
pod *api.Pod
|
||||
nodes []*api.Node
|
||||
pod *v1.Pod
|
||||
nodes []*v1.Node
|
||||
expectedList schedulerapi.HostPriorityList
|
||||
test string
|
||||
}{
|
||||
// basic test case
|
||||
{
|
||||
test: "node with taints tolerated by the pod, gets a higher score than those node with intolerable taints",
|
||||
pod: podWithTolerations([]api.Toleration{{
|
||||
pod: podWithTolerations([]v1.Toleration{{
|
||||
Key: "foo",
|
||||
Operator: api.TolerationOpEqual,
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "bar",
|
||||
Effect: api.TaintEffectPreferNoSchedule,
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
}}),
|
||||
nodes: []*api.Node{
|
||||
nodeWithTaints("nodeA", []api.Taint{{
|
||||
nodes: []*v1.Node{
|
||||
nodeWithTaints("nodeA", []v1.Taint{{
|
||||
Key: "foo",
|
||||
Value: "bar",
|
||||
Effect: api.TaintEffectPreferNoSchedule,
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
}}),
|
||||
nodeWithTaints("nodeB", []api.Taint{{
|
||||
nodeWithTaints("nodeB", []v1.Taint{{
|
||||
Key: "foo",
|
||||
Value: "blah",
|
||||
Effect: api.TaintEffectPreferNoSchedule,
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
}}),
|
||||
},
|
||||
expectedList: []schedulerapi.HostPriority{
|
||||
@@ -89,37 +89,37 @@ func TestTaintAndToleration(t *testing.T) {
|
||||
// the count of taints that are tolerated by pod, does not matter.
|
||||
{
|
||||
test: "the nodes that all of their taints are tolerated by the pod, get the same score, no matter how many tolerable taints a node has",
|
||||
pod: podWithTolerations([]api.Toleration{
|
||||
pod: podWithTolerations([]v1.Toleration{
|
||||
{
|
||||
Key: "cpu-type",
|
||||
Operator: api.TolerationOpEqual,
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "arm64",
|
||||
Effect: api.TaintEffectPreferNoSchedule,
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
}, {
|
||||
Key: "disk-type",
|
||||
Operator: api.TolerationOpEqual,
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "ssd",
|
||||
Effect: api.TaintEffectPreferNoSchedule,
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
}),
|
||||
nodes: []*api.Node{
|
||||
nodeWithTaints("nodeA", []api.Taint{}),
|
||||
nodeWithTaints("nodeB", []api.Taint{
|
||||
nodes: []*v1.Node{
|
||||
nodeWithTaints("nodeA", []v1.Taint{}),
|
||||
nodeWithTaints("nodeB", []v1.Taint{
|
||||
{
|
||||
Key: "cpu-type",
|
||||
Value: "arm64",
|
||||
Effect: api.TaintEffectPreferNoSchedule,
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
}),
|
||||
nodeWithTaints("nodeC", []api.Taint{
|
||||
nodeWithTaints("nodeC", []v1.Taint{
|
||||
{
|
||||
Key: "cpu-type",
|
||||
Value: "arm64",
|
||||
Effect: api.TaintEffectPreferNoSchedule,
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
}, {
|
||||
Key: "disk-type",
|
||||
Value: "ssd",
|
||||
Effect: api.TaintEffectPreferNoSchedule,
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
}),
|
||||
},
|
||||
@@ -132,30 +132,30 @@ func TestTaintAndToleration(t *testing.T) {
|
||||
// the count of taints on a node that are not tolerated by pod, matters.
|
||||
{
|
||||
test: "the more intolerable taints a node has, the lower score it gets.",
|
||||
pod: podWithTolerations([]api.Toleration{{
|
||||
pod: podWithTolerations([]v1.Toleration{{
|
||||
Key: "foo",
|
||||
Operator: api.TolerationOpEqual,
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "bar",
|
||||
Effect: api.TaintEffectPreferNoSchedule,
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
}}),
|
||||
nodes: []*api.Node{
|
||||
nodeWithTaints("nodeA", []api.Taint{}),
|
||||
nodeWithTaints("nodeB", []api.Taint{
|
||||
nodes: []*v1.Node{
|
||||
nodeWithTaints("nodeA", []v1.Taint{}),
|
||||
nodeWithTaints("nodeB", []v1.Taint{
|
||||
{
|
||||
Key: "cpu-type",
|
||||
Value: "arm64",
|
||||
Effect: api.TaintEffectPreferNoSchedule,
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
}),
|
||||
nodeWithTaints("nodeC", []api.Taint{
|
||||
nodeWithTaints("nodeC", []v1.Taint{
|
||||
{
|
||||
Key: "cpu-type",
|
||||
Value: "arm64",
|
||||
Effect: api.TaintEffectPreferNoSchedule,
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
}, {
|
||||
Key: "disk-type",
|
||||
Value: "ssd",
|
||||
Effect: api.TaintEffectPreferNoSchedule,
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
}),
|
||||
},
|
||||
@@ -168,37 +168,37 @@ func TestTaintAndToleration(t *testing.T) {
|
||||
// taints-tolerations priority only takes care about the taints and tolerations that have effect PreferNoSchedule
|
||||
{
|
||||
test: "only taints and tolerations that have effect PreferNoSchedule are checked by taints-tolerations priority function",
|
||||
pod: podWithTolerations([]api.Toleration{
|
||||
pod: podWithTolerations([]v1.Toleration{
|
||||
{
|
||||
Key: "cpu-type",
|
||||
Operator: api.TolerationOpEqual,
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "arm64",
|
||||
Effect: api.TaintEffectNoSchedule,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
}, {
|
||||
Key: "disk-type",
|
||||
Operator: api.TolerationOpEqual,
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Value: "ssd",
|
||||
Effect: api.TaintEffectNoSchedule,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}),
|
||||
nodes: []*api.Node{
|
||||
nodeWithTaints("nodeA", []api.Taint{}),
|
||||
nodeWithTaints("nodeB", []api.Taint{
|
||||
nodes: []*v1.Node{
|
||||
nodeWithTaints("nodeA", []v1.Taint{}),
|
||||
nodeWithTaints("nodeB", []v1.Taint{
|
||||
{
|
||||
Key: "cpu-type",
|
||||
Value: "arm64",
|
||||
Effect: api.TaintEffectNoSchedule,
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
},
|
||||
}),
|
||||
nodeWithTaints("nodeC", []api.Taint{
|
||||
nodeWithTaints("nodeC", []v1.Taint{
|
||||
{
|
||||
Key: "cpu-type",
|
||||
Value: "arm64",
|
||||
Effect: api.TaintEffectPreferNoSchedule,
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
}, {
|
||||
Key: "disk-type",
|
||||
Value: "ssd",
|
||||
Effect: api.TaintEffectPreferNoSchedule,
|
||||
Effect: v1.TaintEffectPreferNoSchedule,
|
||||
},
|
||||
}),
|
||||
},
|
||||
|
@@ -17,22 +17,22 @@ limitations under the License.
|
||||
package priorities
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/resource"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
|
||||
func makeNode(node string, milliCPU, memory int64) *api.Node {
|
||||
return &api.Node{
|
||||
ObjectMeta: api.ObjectMeta{Name: node},
|
||||
Status: api.NodeStatus{
|
||||
Capacity: api.ResourceList{
|
||||
func makeNode(node string, milliCPU, memory int64) *v1.Node {
|
||||
return &v1.Node{
|
||||
ObjectMeta: v1.ObjectMeta{Name: node},
|
||||
Status: v1.NodeStatus{
|
||||
Capacity: v1.ResourceList{
|
||||
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
||||
},
|
||||
Allocatable: api.ResourceList{
|
||||
Allocatable: v1.ResourceList{
|
||||
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
||||
},
|
||||
@@ -41,7 +41,7 @@ func makeNode(node string, milliCPU, memory int64) *api.Node {
|
||||
}
|
||||
|
||||
func priorityFunction(mapFn algorithm.PriorityMapFunction, reduceFn algorithm.PriorityReduceFunction) algorithm.PriorityFunction {
|
||||
return func(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) {
|
||||
return func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
|
||||
for i := range nodes {
|
||||
hostResult, err := mapFn(pod, nil, nodeNameToInfo[nodes[i].Name])
|
||||
|
@@ -16,9 +16,7 @@ limitations under the License.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
import "k8s.io/kubernetes/pkg/api/v1"
|
||||
|
||||
// For each of these resources, a pod that doesn't request the resource explicitly
|
||||
// will be treated as having requested the amount indicated below, for the purpose
|
||||
@@ -32,18 +30,18 @@ const DefaultMilliCpuRequest int64 = 100 // 0.1 core
|
||||
const DefaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB
|
||||
|
||||
// GetNonzeroRequests returns the default resource request if none is found or what is provided on the request
|
||||
// TODO: Consider setting default as a fixed fraction of machine capacity (take "capacity api.ResourceList"
|
||||
// TODO: Consider setting default as a fixed fraction of machine capacity (take "capacity v1.ResourceList"
|
||||
// as an additional argument here) rather than using constants
|
||||
func GetNonzeroRequests(requests *api.ResourceList) (int64, int64) {
|
||||
func GetNonzeroRequests(requests *v1.ResourceList) (int64, int64) {
|
||||
var outMilliCPU, outMemory int64
|
||||
// Override if un-set, but not if explicitly set to zero
|
||||
if _, found := (*requests)[api.ResourceCPU]; !found {
|
||||
if _, found := (*requests)[v1.ResourceCPU]; !found {
|
||||
outMilliCPU = DefaultMilliCpuRequest
|
||||
} else {
|
||||
outMilliCPU = requests.Cpu().MilliValue()
|
||||
}
|
||||
// Override if un-set, but not if explicitly set to zero
|
||||
if _, found := (*requests)[api.ResourceMemory]; !found {
|
||||
if _, found := (*requests)[v1.ResourceMemory]; !found {
|
||||
outMemory = DefaultMemoryRequest
|
||||
} else {
|
||||
outMemory = requests.Memory().Value()
|
||||
|
@@ -17,8 +17,8 @@ limitations under the License.
|
||||
package util
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
"k8s.io/kubernetes/pkg/labels"
|
||||
"k8s.io/kubernetes/pkg/util/sets"
|
||||
)
|
||||
@@ -27,7 +27,7 @@ import (
|
||||
// according to the namespaces indicated in podAffinityTerm.
|
||||
// 1. If the namespaces is nil considers the given pod's namespace
|
||||
// 2. If the namespaces is empty list then considers all the namespaces
|
||||
func getNamespacesFromPodAffinityTerm(pod *api.Pod, podAffinityTerm api.PodAffinityTerm) sets.String {
|
||||
func getNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm v1.PodAffinityTerm) sets.String {
|
||||
names := sets.String{}
|
||||
if podAffinityTerm.Namespaces == nil {
|
||||
names.Insert(pod.Namespace)
|
||||
@@ -39,7 +39,7 @@ func getNamespacesFromPodAffinityTerm(pod *api.Pod, podAffinityTerm api.PodAffin
|
||||
|
||||
// PodMatchesTermsNamespaceAndSelector returns true if the given <pod>
|
||||
// matches the namespace and selector defined by <affinityPod>`s <term>.
|
||||
func PodMatchesTermsNamespaceAndSelector(pod *api.Pod, affinityPod *api.Pod, term *api.PodAffinityTerm) (bool, error) {
|
||||
func PodMatchesTermsNamespaceAndSelector(pod *v1.Pod, affinityPod *v1.Pod, term *v1.PodAffinityTerm) (bool, error) {
|
||||
namespaces := getNamespacesFromPodAffinityTerm(affinityPod, *term)
|
||||
if len(namespaces) != 0 && !namespaces.Has(pod.Namespace) {
|
||||
return false, nil
|
||||
@@ -53,7 +53,7 @@ func PodMatchesTermsNamespaceAndSelector(pod *api.Pod, affinityPod *api.Pod, ter
|
||||
}
|
||||
|
||||
// nodesHaveSameTopologyKeyInternal checks if nodeA and nodeB have same label value with given topologyKey as label key.
|
||||
func nodesHaveSameTopologyKeyInternal(nodeA, nodeB *api.Node, topologyKey string) bool {
|
||||
func nodesHaveSameTopologyKeyInternal(nodeA, nodeB *v1.Node, topologyKey string) bool {
|
||||
return nodeA.Labels != nil && nodeB.Labels != nil && len(nodeA.Labels[topologyKey]) > 0 && nodeA.Labels[topologyKey] == nodeB.Labels[topologyKey]
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@ type Topologies struct {
|
||||
|
||||
// NodesHaveSameTopologyKey checks if nodeA and nodeB have same label value with given topologyKey as label key.
|
||||
// If the topologyKey is nil/empty, check if the two nodes have any of the default topologyKeys, and have same corresponding label value.
|
||||
func (tps *Topologies) NodesHaveSameTopologyKey(nodeA, nodeB *api.Node, topologyKey string) bool {
|
||||
func (tps *Topologies) NodesHaveSameTopologyKey(nodeA, nodeB *v1.Node, topologyKey string) bool {
|
||||
if len(topologyKey) == 0 {
|
||||
// assumes this is allowed only for PreferredDuringScheduling pod anti-affinity (ensured by api/validation)
|
||||
for _, defaultKey := range tps.DefaultKeys {
|
||||
|
@@ -16,11 +16,9 @@ limitations under the License.
|
||||
|
||||
package util
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
)
|
||||
import "k8s.io/kubernetes/pkg/api/v1"
|
||||
|
||||
func GetControllerRef(pod *api.Pod) *api.OwnerReference {
|
||||
func GetControllerRef(pod *v1.Pod) *v1.OwnerReference {
|
||||
if len(pod.OwnerReferences) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
@@ -17,7 +17,7 @@ limitations under the License.
|
||||
package algorithm
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
)
|
||||
|
||||
@@ -28,16 +28,16 @@ type SchedulerExtender interface {
|
||||
// Filter based on extender-implemented predicate functions. The filtered list is
|
||||
// expected to be a subset of the supplied list. failedNodesMap optionally contains
|
||||
// the list of failed nodes and failure reasons.
|
||||
Filter(pod *api.Pod, nodes []*api.Node) (filteredNodes []*api.Node, failedNodesMap schedulerapi.FailedNodesMap, err error)
|
||||
Filter(pod *v1.Pod, nodes []*v1.Node) (filteredNodes []*v1.Node, failedNodesMap schedulerapi.FailedNodesMap, err error)
|
||||
|
||||
// Prioritize based on extender-implemented priority functions. The returned scores & weight
|
||||
// are used to compute the weighted score for an extender. The weighted scores are added to
|
||||
// the scores computed by Kubernetes scheduler. The total scores are used to do the host selection.
|
||||
Prioritize(pod *api.Pod, nodes []*api.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error)
|
||||
Prioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error)
|
||||
}
|
||||
|
||||
// ScheduleAlgorithm is an interface implemented by things that know how to schedule pods
|
||||
// onto machines.
|
||||
type ScheduleAlgorithm interface {
|
||||
Schedule(*api.Pod, NodeLister) (selectedMachine string, err error)
|
||||
Schedule(*v1.Pod, NodeLister) (selectedMachine string, err error)
|
||||
}
|
||||
|
@@ -19,7 +19,7 @@ package algorithm
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
)
|
||||
|
||||
// Some functions used by multiple scheduler tests.
|
||||
@@ -31,7 +31,7 @@ type schedulerTester struct {
|
||||
}
|
||||
|
||||
// Call if you know exactly where pod should get scheduled.
|
||||
func (st *schedulerTester) expectSchedule(pod *api.Pod, expected string) {
|
||||
func (st *schedulerTester) expectSchedule(pod *v1.Pod, expected string) {
|
||||
actual, err := st.scheduler.Schedule(pod, st.nodeLister)
|
||||
if err != nil {
|
||||
st.t.Errorf("Unexpected error %v\nTried to schedule: %#v", err, pod)
|
||||
@@ -43,7 +43,7 @@ func (st *schedulerTester) expectSchedule(pod *api.Pod, expected string) {
|
||||
}
|
||||
|
||||
// Call if you can't predict where pod will be scheduled.
|
||||
func (st *schedulerTester) expectSuccess(pod *api.Pod) {
|
||||
func (st *schedulerTester) expectSuccess(pod *v1.Pod) {
|
||||
_, err := st.scheduler.Schedule(pod, st.nodeLister)
|
||||
if err != nil {
|
||||
st.t.Errorf("Unexpected error %v\nTried to schedule: %#v", err, pod)
|
||||
@@ -52,7 +52,7 @@ func (st *schedulerTester) expectSuccess(pod *api.Pod) {
|
||||
}
|
||||
|
||||
// Call if pod should *not* schedule.
|
||||
func (st *schedulerTester) expectFailure(pod *api.Pod) {
|
||||
func (st *schedulerTester) expectFailure(pod *v1.Pod) {
|
||||
_, err := st.scheduler.Schedule(pod, st.nodeLister)
|
||||
if err == nil {
|
||||
st.t.Error("Unexpected non-error")
|
||||
|
@@ -17,7 +17,7 @@ limitations under the License.
|
||||
package algorithm
|
||||
|
||||
import (
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||
)
|
||||
@@ -25,25 +25,25 @@ import (
|
||||
// FitPredicate is a function that indicates if a pod fits into an existing node.
|
||||
// The failure information is given by the error.
|
||||
// TODO: Change interface{} to a specific type.
|
||||
type FitPredicate func(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []PredicateFailureReason, error)
|
||||
type FitPredicate func(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []PredicateFailureReason, error)
|
||||
|
||||
// PriorityMapFunction is a function that computes per-node results for a given node.
|
||||
// TODO: Figure out the exact API of this method.
|
||||
// TODO: Change interface{} to a specific type.
|
||||
type PriorityMapFunction func(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error)
|
||||
type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error)
|
||||
|
||||
// PriorityReduceFunction is a function that aggregated per-node results and computes
|
||||
// final scores for all nodes.
|
||||
// TODO: Figure out the exact API of this method.
|
||||
// TODO: Change interface{} to a specific type.
|
||||
type PriorityReduceFunction func(pod *api.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error
|
||||
type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error
|
||||
|
||||
// MetdataProducer is a function that computes metadata for a given pod.
|
||||
type MetadataProducer func(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{}
|
||||
type MetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{}
|
||||
|
||||
// DEPRECATED
|
||||
// Use Map-Reduce pattern for priority functions.
|
||||
type PriorityFunction func(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error)
|
||||
type PriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error)
|
||||
|
||||
type PriorityConfig struct {
|
||||
Map PriorityMapFunction
|
||||
@@ -55,7 +55,7 @@ type PriorityConfig struct {
|
||||
}
|
||||
|
||||
// EmptyMetadataProducer returns a no-op MetadataProducer type.
|
||||
func EmptyMetadataProducer(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} {
|
||||
func EmptyMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -63,4 +63,4 @@ type PredicateFailureReason interface {
|
||||
GetReason() string
|
||||
}
|
||||
|
||||
type GetEquivalencePodFunc func(pod *api.Pod) interface{}
|
||||
type GetEquivalencePodFunc func(pod *v1.Pod) interface{}
|
||||
|
Reference in New Issue
Block a user