plugin/scheduler
This commit is contained in:
parent
5e1adf91df
commit
f782aba56e
@ -26,9 +26,9 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion"
|
v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/leaderelection"
|
"k8s.io/kubernetes/pkg/client/leaderelection"
|
||||||
"k8s.io/kubernetes/pkg/client/leaderelection/resourcelock"
|
"k8s.io/kubernetes/pkg/client/leaderelection/resourcelock"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
@ -122,9 +122,9 @@ func Run(s *options.SchedulerServer) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: s.SchedulerName})
|
config.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: s.SchedulerName})
|
||||||
eventBroadcaster.StartLogging(glog.Infof)
|
eventBroadcaster.StartLogging(glog.Infof)
|
||||||
eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: leaderElectionClient.Core().Events("")})
|
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: leaderElectionClient.Core().Events("")})
|
||||||
|
|
||||||
sched := scheduler.New(config)
|
sched := scheduler.New(config)
|
||||||
|
|
||||||
@ -147,7 +147,7 @@ func Run(s *options.SchedulerServer) error {
|
|||||||
|
|
||||||
// TODO: enable other lock types
|
// TODO: enable other lock types
|
||||||
rl := resourcelock.EndpointsLock{
|
rl := resourcelock.EndpointsLock{
|
||||||
EndpointsMeta: api.ObjectMeta{
|
EndpointsMeta: v1.ObjectMeta{
|
||||||
Namespace: "kube-system",
|
Namespace: "kube-system",
|
||||||
Name: "kube-scheduler",
|
Name: "kube-scheduler",
|
||||||
},
|
},
|
||||||
|
@ -19,39 +19,39 @@ package algorithm
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
)
|
)
|
||||||
|
|
||||||
// NodeLister interface represents anything that can list nodes for a scheduler.
|
// NodeLister interface represents anything that can list nodes for a scheduler.
|
||||||
type NodeLister interface {
|
type NodeLister interface {
|
||||||
// We explicitly return []*api.Node, instead of api.NodeList, to avoid
|
// We explicitly return []*v1.Node, instead of v1.NodeList, to avoid
|
||||||
// performing expensive copies that are unneded.
|
// performing expensive copies that are unneded.
|
||||||
List() ([]*api.Node, error)
|
List() ([]*v1.Node, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FakeNodeLister implements NodeLister on a []string for test purposes.
|
// FakeNodeLister implements NodeLister on a []string for test purposes.
|
||||||
type FakeNodeLister []*api.Node
|
type FakeNodeLister []*v1.Node
|
||||||
|
|
||||||
// List returns nodes as a []string.
|
// List returns nodes as a []string.
|
||||||
func (f FakeNodeLister) List() ([]*api.Node, error) {
|
func (f FakeNodeLister) List() ([]*v1.Node, error) {
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodLister interface represents anything that can list pods for a scheduler.
|
// PodLister interface represents anything that can list pods for a scheduler.
|
||||||
type PodLister interface {
|
type PodLister interface {
|
||||||
// We explicitly return []*api.Pod, instead of api.PodList, to avoid
|
// We explicitly return []*v1.Pod, instead of v1.PodList, to avoid
|
||||||
// performing expensive copies that are unneded.
|
// performing expensive copies that are unneded.
|
||||||
List(labels.Selector) ([]*api.Pod, error)
|
List(labels.Selector) ([]*v1.Pod, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FakePodLister implements PodLister on an []api.Pods for test purposes.
|
// FakePodLister implements PodLister on an []v1.Pods for test purposes.
|
||||||
type FakePodLister []*api.Pod
|
type FakePodLister []*v1.Pod
|
||||||
|
|
||||||
// List returns []*api.Pod matching a query.
|
// List returns []*v1.Pod matching a query.
|
||||||
func (f FakePodLister) List(s labels.Selector) (selected []*api.Pod, err error) {
|
func (f FakePodLister) List(s labels.Selector) (selected []*v1.Pod, err error) {
|
||||||
for _, pod := range f {
|
for _, pod := range f {
|
||||||
if s.Matches(labels.Set(pod.Labels)) {
|
if s.Matches(labels.Set(pod.Labels)) {
|
||||||
selected = append(selected, pod)
|
selected = append(selected, pod)
|
||||||
@ -63,21 +63,21 @@ func (f FakePodLister) List(s labels.Selector) (selected []*api.Pod, err error)
|
|||||||
// ServiceLister interface represents anything that can produce a list of services; the list is consumed by a scheduler.
|
// ServiceLister interface represents anything that can produce a list of services; the list is consumed by a scheduler.
|
||||||
type ServiceLister interface {
|
type ServiceLister interface {
|
||||||
// Lists all the services
|
// Lists all the services
|
||||||
List(labels.Selector) ([]*api.Service, error)
|
List(labels.Selector) ([]*v1.Service, error)
|
||||||
// Gets the services for the given pod
|
// Gets the services for the given pod
|
||||||
GetPodServices(*api.Pod) ([]*api.Service, error)
|
GetPodServices(*v1.Pod) ([]*v1.Service, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FakeServiceLister implements ServiceLister on []api.Service for test purposes.
|
// FakeServiceLister implements ServiceLister on []v1.Service for test purposes.
|
||||||
type FakeServiceLister []*api.Service
|
type FakeServiceLister []*v1.Service
|
||||||
|
|
||||||
// List returns api.ServiceList, the list of all services.
|
// List returns v1.ServiceList, the list of all services.
|
||||||
func (f FakeServiceLister) List(labels.Selector) ([]*api.Service, error) {
|
func (f FakeServiceLister) List(labels.Selector) ([]*v1.Service, error) {
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPodServices gets the services that have the selector that match the labels on the given pod.
|
// GetPodServices gets the services that have the selector that match the labels on the given pod.
|
||||||
func (f FakeServiceLister) GetPodServices(pod *api.Pod) (services []*api.Service, err error) {
|
func (f FakeServiceLister) GetPodServices(pod *v1.Pod) (services []*v1.Service, err error) {
|
||||||
var selector labels.Selector
|
var selector labels.Selector
|
||||||
|
|
||||||
for i := range f {
|
for i := range f {
|
||||||
@ -97,34 +97,34 @@ func (f FakeServiceLister) GetPodServices(pod *api.Pod) (services []*api.Service
|
|||||||
// ControllerLister interface represents anything that can produce a list of ReplicationController; the list is consumed by a scheduler.
|
// ControllerLister interface represents anything that can produce a list of ReplicationController; the list is consumed by a scheduler.
|
||||||
type ControllerLister interface {
|
type ControllerLister interface {
|
||||||
// Lists all the replication controllers
|
// Lists all the replication controllers
|
||||||
List(labels.Selector) ([]*api.ReplicationController, error)
|
List(labels.Selector) ([]*v1.ReplicationController, error)
|
||||||
// Gets the services for the given pod
|
// Gets the services for the given pod
|
||||||
GetPodControllers(*api.Pod) ([]*api.ReplicationController, error)
|
GetPodControllers(*v1.Pod) ([]*v1.ReplicationController, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EmptyControllerLister implements ControllerLister on []api.ReplicationController returning empty data
|
// EmptyControllerLister implements ControllerLister on []v1.ReplicationController returning empty data
|
||||||
type EmptyControllerLister struct{}
|
type EmptyControllerLister struct{}
|
||||||
|
|
||||||
// List returns nil
|
// List returns nil
|
||||||
func (f EmptyControllerLister) List(labels.Selector) ([]*api.ReplicationController, error) {
|
func (f EmptyControllerLister) List(labels.Selector) ([]*v1.ReplicationController, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPodControllers returns nil
|
// GetPodControllers returns nil
|
||||||
func (f EmptyControllerLister) GetPodControllers(pod *api.Pod) (controllers []*api.ReplicationController, err error) {
|
func (f EmptyControllerLister) GetPodControllers(pod *v1.Pod) (controllers []*v1.ReplicationController, err error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FakeControllerLister implements ControllerLister on []api.ReplicationController for test purposes.
|
// FakeControllerLister implements ControllerLister on []v1.ReplicationController for test purposes.
|
||||||
type FakeControllerLister []*api.ReplicationController
|
type FakeControllerLister []*v1.ReplicationController
|
||||||
|
|
||||||
// List returns []api.ReplicationController, the list of all ReplicationControllers.
|
// List returns []v1.ReplicationController, the list of all ReplicationControllers.
|
||||||
func (f FakeControllerLister) List(labels.Selector) ([]*api.ReplicationController, error) {
|
func (f FakeControllerLister) List(labels.Selector) ([]*v1.ReplicationController, error) {
|
||||||
return f, nil
|
return f, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPodControllers gets the ReplicationControllers that have the selector that match the labels on the given pod
|
// GetPodControllers gets the ReplicationControllers that have the selector that match the labels on the given pod
|
||||||
func (f FakeControllerLister) GetPodControllers(pod *api.Pod) (controllers []*api.ReplicationController, err error) {
|
func (f FakeControllerLister) GetPodControllers(pod *v1.Pod) (controllers []*v1.ReplicationController, err error) {
|
||||||
var selector labels.Selector
|
var selector labels.Selector
|
||||||
|
|
||||||
for i := range f {
|
for i := range f {
|
||||||
@ -147,14 +147,14 @@ func (f FakeControllerLister) GetPodControllers(pod *api.Pod) (controllers []*ap
|
|||||||
// ReplicaSetLister interface represents anything that can produce a list of ReplicaSet; the list is consumed by a scheduler.
|
// ReplicaSetLister interface represents anything that can produce a list of ReplicaSet; the list is consumed by a scheduler.
|
||||||
type ReplicaSetLister interface {
|
type ReplicaSetLister interface {
|
||||||
// Gets the replicasets for the given pod
|
// Gets the replicasets for the given pod
|
||||||
GetPodReplicaSets(*api.Pod) ([]*extensions.ReplicaSet, error)
|
GetPodReplicaSets(*v1.Pod) ([]*extensions.ReplicaSet, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// EmptyReplicaSetLister implements ReplicaSetLister on []extensions.ReplicaSet returning empty data
|
// EmptyReplicaSetLister implements ReplicaSetLister on []extensions.ReplicaSet returning empty data
|
||||||
type EmptyReplicaSetLister struct{}
|
type EmptyReplicaSetLister struct{}
|
||||||
|
|
||||||
// GetPodReplicaSets returns nil
|
// GetPodReplicaSets returns nil
|
||||||
func (f EmptyReplicaSetLister) GetPodReplicaSets(pod *api.Pod) (rss []*extensions.ReplicaSet, err error) {
|
func (f EmptyReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*extensions.ReplicaSet, err error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -162,7 +162,7 @@ func (f EmptyReplicaSetLister) GetPodReplicaSets(pod *api.Pod) (rss []*extension
|
|||||||
type FakeReplicaSetLister []*extensions.ReplicaSet
|
type FakeReplicaSetLister []*extensions.ReplicaSet
|
||||||
|
|
||||||
// GetPodReplicaSets gets the ReplicaSets that have the selector that match the labels on the given pod
|
// GetPodReplicaSets gets the ReplicaSets that have the selector that match the labels on the given pod
|
||||||
func (f FakeReplicaSetLister) GetPodReplicaSets(pod *api.Pod) (rss []*extensions.ReplicaSet, err error) {
|
func (f FakeReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*extensions.ReplicaSet, err error) {
|
||||||
var selector labels.Selector
|
var selector labels.Selector
|
||||||
|
|
||||||
for _, rs := range f {
|
for _, rs := range f {
|
||||||
|
@ -19,7 +19,7 @@ package predicates
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -46,13 +46,13 @@ var (
|
|||||||
// hit and caused the unfitting failure.
|
// hit and caused the unfitting failure.
|
||||||
type InsufficientResourceError struct {
|
type InsufficientResourceError struct {
|
||||||
// resourceName is the name of the resource that is insufficient
|
// resourceName is the name of the resource that is insufficient
|
||||||
ResourceName api.ResourceName
|
ResourceName v1.ResourceName
|
||||||
requested int64
|
requested int64
|
||||||
used int64
|
used int64
|
||||||
capacity int64
|
capacity int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewInsufficientResourceError(resourceName api.ResourceName, requested, used, capacity int64) *InsufficientResourceError {
|
func NewInsufficientResourceError(resourceName v1.ResourceName, requested, used, capacity int64) *InsufficientResourceError {
|
||||||
return &InsufficientResourceError{
|
return &InsufficientResourceError{
|
||||||
ResourceName: resourceName,
|
ResourceName: resourceName,
|
||||||
requested: requested,
|
requested: requested,
|
||||||
|
@ -18,7 +18,7 @@ package predicates
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
@ -35,7 +35,7 @@ func NewPredicateMetadataFactory(podLister algorithm.PodLister) algorithm.Metada
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetMetadata returns the predicateMetadata used which will be used by various predicates.
|
// GetMetadata returns the predicateMetadata used which will be used by various predicates.
|
||||||
func (pfactory *PredicateMetadataFactory) GetMetadata(pod *api.Pod, nodeNameToInfoMap map[string]*schedulercache.NodeInfo) interface{} {
|
func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulercache.NodeInfo) interface{} {
|
||||||
// If we cannot compute metadata, just return nil
|
// If we cannot compute metadata, just return nil
|
||||||
if pod == nil {
|
if pod == nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -24,8 +24,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
@ -50,15 +50,15 @@ func RegisterPredicatePrecomputation(predicateName string, precomp PredicateMeta
|
|||||||
|
|
||||||
// Other types for predicate functions...
|
// Other types for predicate functions...
|
||||||
type NodeInfo interface {
|
type NodeInfo interface {
|
||||||
GetNodeInfo(nodeID string) (*api.Node, error)
|
GetNodeInfo(nodeID string) (*v1.Node, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type PersistentVolumeInfo interface {
|
type PersistentVolumeInfo interface {
|
||||||
GetPersistentVolumeInfo(pvID string) (*api.PersistentVolume, error)
|
GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type PersistentVolumeClaimInfo interface {
|
type PersistentVolumeClaimInfo interface {
|
||||||
GetPersistentVolumeClaimInfo(namespace string, name string) (*api.PersistentVolumeClaim, error)
|
GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// CachedPersistentVolumeClaimInfo implements PersistentVolumeClaimInfo
|
// CachedPersistentVolumeClaimInfo implements PersistentVolumeClaimInfo
|
||||||
@ -67,7 +67,7 @@ type CachedPersistentVolumeClaimInfo struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetPersistentVolumeClaimInfo fetches the claim in specified namespace with specified name
|
// GetPersistentVolumeClaimInfo fetches the claim in specified namespace with specified name
|
||||||
func (c *CachedPersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, name string) (*api.PersistentVolumeClaim, error) {
|
func (c *CachedPersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error) {
|
||||||
return c.PersistentVolumeClaims(namespace).Get(name)
|
return c.PersistentVolumeClaims(namespace).Get(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -76,8 +76,8 @@ type CachedNodeInfo struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetNodeInfo returns cached data for the node 'id'.
|
// GetNodeInfo returns cached data for the node 'id'.
|
||||||
func (c *CachedNodeInfo) GetNodeInfo(id string) (*api.Node, error) {
|
func (c *CachedNodeInfo) GetNodeInfo(id string) (*v1.Node, error) {
|
||||||
node, exists, err := c.Get(&api.Node{ObjectMeta: api.ObjectMeta{Name: id}})
|
node, exists, err := c.Get(&v1.Node{ObjectMeta: v1.ObjectMeta{Name: id}})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error retrieving node '%v' from cache: %v", id, err)
|
return nil, fmt.Errorf("error retrieving node '%v' from cache: %v", id, err)
|
||||||
@ -87,27 +87,27 @@ func (c *CachedNodeInfo) GetNodeInfo(id string) (*api.Node, error) {
|
|||||||
return nil, fmt.Errorf("node '%v' not found", id)
|
return nil, fmt.Errorf("node '%v' not found", id)
|
||||||
}
|
}
|
||||||
|
|
||||||
return node.(*api.Node), nil
|
return node.(*v1.Node), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note that predicateMetdata and matchingPodAntiAffinityTerm need to be declared in the same file
|
// Note that predicateMetdata and matchingPodAntiAffinityTerm need to be declared in the same file
|
||||||
// due to the way declarations are processed in predicate declaration unit tests.
|
// due to the way declarations are processed in predicate declaration unit tests.
|
||||||
type matchingPodAntiAffinityTerm struct {
|
type matchingPodAntiAffinityTerm struct {
|
||||||
term *api.PodAffinityTerm
|
term *v1.PodAffinityTerm
|
||||||
node *api.Node
|
node *v1.Node
|
||||||
}
|
}
|
||||||
|
|
||||||
type predicateMetadata struct {
|
type predicateMetadata struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
podBestEffort bool
|
podBestEffort bool
|
||||||
podRequest *schedulercache.Resource
|
podRequest *schedulercache.Resource
|
||||||
podPorts map[int]bool
|
podPorts map[int]bool
|
||||||
matchingAntiAffinityTerms []matchingPodAntiAffinityTerm
|
matchingAntiAffinityTerms []matchingPodAntiAffinityTerm
|
||||||
serviceAffinityMatchingPodList []*api.Pod
|
serviceAffinityMatchingPodList []*v1.Pod
|
||||||
serviceAffinityMatchingPodServices []*api.Service
|
serviceAffinityMatchingPodServices []*v1.Service
|
||||||
}
|
}
|
||||||
|
|
||||||
func isVolumeConflict(volume api.Volume, pod *api.Pod) bool {
|
func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool {
|
||||||
// fast path if there is no conflict checking targets.
|
// fast path if there is no conflict checking targets.
|
||||||
if volume.GCEPersistentDisk == nil && volume.AWSElasticBlockStore == nil && volume.RBD == nil {
|
if volume.GCEPersistentDisk == nil && volume.AWSElasticBlockStore == nil && volume.RBD == nil {
|
||||||
return false
|
return false
|
||||||
@ -151,7 +151,7 @@ func isVolumeConflict(volume api.Volume, pod *api.Pod) bool {
|
|||||||
// - AWS EBS forbids any two pods mounting the same volume ID
|
// - AWS EBS forbids any two pods mounting the same volume ID
|
||||||
// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image.
|
// - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image.
|
||||||
// TODO: migrate this into some per-volume specific code?
|
// TODO: migrate this into some per-volume specific code?
|
||||||
func NoDiskConflict(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func NoDiskConflict(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
for _, v := range pod.Spec.Volumes {
|
for _, v := range pod.Spec.Volumes {
|
||||||
for _, ev := range nodeInfo.Pods() {
|
for _, ev := range nodeInfo.Pods() {
|
||||||
if isVolumeConflict(v, ev) {
|
if isVolumeConflict(v, ev) {
|
||||||
@ -172,8 +172,8 @@ type MaxPDVolumeCountChecker struct {
|
|||||||
// VolumeFilter contains information on how to filter PD Volumes when checking PD Volume caps
|
// VolumeFilter contains information on how to filter PD Volumes when checking PD Volume caps
|
||||||
type VolumeFilter struct {
|
type VolumeFilter struct {
|
||||||
// Filter normal volumes
|
// Filter normal volumes
|
||||||
FilterVolume func(vol *api.Volume) (id string, relevant bool)
|
FilterVolume func(vol *v1.Volume) (id string, relevant bool)
|
||||||
FilterPersistentVolume func(pv *api.PersistentVolume) (id string, relevant bool)
|
FilterPersistentVolume func(pv *v1.PersistentVolume) (id string, relevant bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMaxPDVolumeCountPredicate creates a predicate which evaluates whether a pod can fit based on the
|
// NewMaxPDVolumeCountPredicate creates a predicate which evaluates whether a pod can fit based on the
|
||||||
@ -194,7 +194,7 @@ func NewMaxPDVolumeCountPredicate(filter VolumeFilter, maxVolumes int, pvInfo Pe
|
|||||||
return c.predicate
|
return c.predicate
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []api.Volume, namespace string, filteredVolumes map[string]bool) error {
|
func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace string, filteredVolumes map[string]bool) error {
|
||||||
for _, vol := range volumes {
|
for _, vol := range volumes {
|
||||||
if id, ok := c.filter.FilterVolume(&vol); ok {
|
if id, ok := c.filter.FilterVolume(&vol); ok {
|
||||||
filteredVolumes[id] = true
|
filteredVolumes[id] = true
|
||||||
@ -248,7 +248,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []api.Volume, namespace
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *MaxPDVolumeCountChecker) predicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
||||||
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
||||||
if len(pod.Spec.Volumes) == 0 {
|
if len(pod.Spec.Volumes) == 0 {
|
||||||
@ -293,14 +293,14 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *api.Pod, meta interface{}, node
|
|||||||
|
|
||||||
// EBSVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes
|
// EBSVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes
|
||||||
var EBSVolumeFilter VolumeFilter = VolumeFilter{
|
var EBSVolumeFilter VolumeFilter = VolumeFilter{
|
||||||
FilterVolume: func(vol *api.Volume) (string, bool) {
|
FilterVolume: func(vol *v1.Volume) (string, bool) {
|
||||||
if vol.AWSElasticBlockStore != nil {
|
if vol.AWSElasticBlockStore != nil {
|
||||||
return vol.AWSElasticBlockStore.VolumeID, true
|
return vol.AWSElasticBlockStore.VolumeID, true
|
||||||
}
|
}
|
||||||
return "", false
|
return "", false
|
||||||
},
|
},
|
||||||
|
|
||||||
FilterPersistentVolume: func(pv *api.PersistentVolume) (string, bool) {
|
FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
|
||||||
if pv.Spec.AWSElasticBlockStore != nil {
|
if pv.Spec.AWSElasticBlockStore != nil {
|
||||||
return pv.Spec.AWSElasticBlockStore.VolumeID, true
|
return pv.Spec.AWSElasticBlockStore.VolumeID, true
|
||||||
}
|
}
|
||||||
@ -310,14 +310,14 @@ var EBSVolumeFilter VolumeFilter = VolumeFilter{
|
|||||||
|
|
||||||
// GCEPDVolumeFilter is a VolumeFilter for filtering GCE PersistentDisk Volumes
|
// GCEPDVolumeFilter is a VolumeFilter for filtering GCE PersistentDisk Volumes
|
||||||
var GCEPDVolumeFilter VolumeFilter = VolumeFilter{
|
var GCEPDVolumeFilter VolumeFilter = VolumeFilter{
|
||||||
FilterVolume: func(vol *api.Volume) (string, bool) {
|
FilterVolume: func(vol *v1.Volume) (string, bool) {
|
||||||
if vol.GCEPersistentDisk != nil {
|
if vol.GCEPersistentDisk != nil {
|
||||||
return vol.GCEPersistentDisk.PDName, true
|
return vol.GCEPersistentDisk.PDName, true
|
||||||
}
|
}
|
||||||
return "", false
|
return "", false
|
||||||
},
|
},
|
||||||
|
|
||||||
FilterPersistentVolume: func(pv *api.PersistentVolume) (string, bool) {
|
FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) {
|
||||||
if pv.Spec.GCEPersistentDisk != nil {
|
if pv.Spec.GCEPersistentDisk != nil {
|
||||||
return pv.Spec.GCEPersistentDisk.PDName, true
|
return pv.Spec.GCEPersistentDisk.PDName, true
|
||||||
}
|
}
|
||||||
@ -352,7 +352,7 @@ func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolum
|
|||||||
return c.predicate
|
return c.predicate
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *VolumeZoneChecker) predicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
// If a pod doesn't have any volume attached to it, the predicate will always be true.
|
||||||
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
// Thus we make a fast path for it, to avoid unnecessary computations in this case.
|
||||||
if len(pod.Spec.Volumes) == 0 {
|
if len(pod.Spec.Volumes) == 0 {
|
||||||
@ -427,22 +427,22 @@ func (c *VolumeZoneChecker) predicate(pod *api.Pod, meta interface{}, nodeInfo *
|
|||||||
return true, nil, nil
|
return true, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetResourceRequest(pod *api.Pod) *schedulercache.Resource {
|
func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource {
|
||||||
result := schedulercache.Resource{}
|
result := schedulercache.Resource{}
|
||||||
for _, container := range pod.Spec.Containers {
|
for _, container := range pod.Spec.Containers {
|
||||||
for rName, rQuantity := range container.Resources.Requests {
|
for rName, rQuantity := range container.Resources.Requests {
|
||||||
switch rName {
|
switch rName {
|
||||||
case api.ResourceMemory:
|
case v1.ResourceMemory:
|
||||||
result.Memory += rQuantity.Value()
|
result.Memory += rQuantity.Value()
|
||||||
case api.ResourceCPU:
|
case v1.ResourceCPU:
|
||||||
result.MilliCPU += rQuantity.MilliValue()
|
result.MilliCPU += rQuantity.MilliValue()
|
||||||
case api.ResourceNvidiaGPU:
|
case v1.ResourceNvidiaGPU:
|
||||||
result.NvidiaGPU += rQuantity.Value()
|
result.NvidiaGPU += rQuantity.Value()
|
||||||
default:
|
default:
|
||||||
if api.IsOpaqueIntResourceName(rName) {
|
if v1.IsOpaqueIntResourceName(rName) {
|
||||||
// Lazily allocate this map only if required.
|
// Lazily allocate this map only if required.
|
||||||
if result.OpaqueIntResources == nil {
|
if result.OpaqueIntResources == nil {
|
||||||
result.OpaqueIntResources = map[api.ResourceName]int64{}
|
result.OpaqueIntResources = map[v1.ResourceName]int64{}
|
||||||
}
|
}
|
||||||
result.OpaqueIntResources[rName] += rQuantity.Value()
|
result.OpaqueIntResources[rName] += rQuantity.Value()
|
||||||
}
|
}
|
||||||
@ -453,23 +453,23 @@ func GetResourceRequest(pod *api.Pod) *schedulercache.Resource {
|
|||||||
for _, container := range pod.Spec.InitContainers {
|
for _, container := range pod.Spec.InitContainers {
|
||||||
for rName, rQuantity := range container.Resources.Requests {
|
for rName, rQuantity := range container.Resources.Requests {
|
||||||
switch rName {
|
switch rName {
|
||||||
case api.ResourceMemory:
|
case v1.ResourceMemory:
|
||||||
if mem := rQuantity.Value(); mem > result.Memory {
|
if mem := rQuantity.Value(); mem > result.Memory {
|
||||||
result.Memory = mem
|
result.Memory = mem
|
||||||
}
|
}
|
||||||
case api.ResourceCPU:
|
case v1.ResourceCPU:
|
||||||
if cpu := rQuantity.MilliValue(); cpu > result.MilliCPU {
|
if cpu := rQuantity.MilliValue(); cpu > result.MilliCPU {
|
||||||
result.MilliCPU = cpu
|
result.MilliCPU = cpu
|
||||||
}
|
}
|
||||||
case api.ResourceNvidiaGPU:
|
case v1.ResourceNvidiaGPU:
|
||||||
if gpu := rQuantity.Value(); gpu > result.NvidiaGPU {
|
if gpu := rQuantity.Value(); gpu > result.NvidiaGPU {
|
||||||
result.NvidiaGPU = gpu
|
result.NvidiaGPU = gpu
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
if api.IsOpaqueIntResourceName(rName) {
|
if v1.IsOpaqueIntResourceName(rName) {
|
||||||
// Lazily allocate this map only if required.
|
// Lazily allocate this map only if required.
|
||||||
if result.OpaqueIntResources == nil {
|
if result.OpaqueIntResources == nil {
|
||||||
result.OpaqueIntResources = map[api.ResourceName]int64{}
|
result.OpaqueIntResources = map[v1.ResourceName]int64{}
|
||||||
}
|
}
|
||||||
value := rQuantity.Value()
|
value := rQuantity.Value()
|
||||||
if value > result.OpaqueIntResources[rName] {
|
if value > result.OpaqueIntResources[rName] {
|
||||||
@ -482,11 +482,11 @@ func GetResourceRequest(pod *api.Pod) *schedulercache.Resource {
|
|||||||
return &result
|
return &result
|
||||||
}
|
}
|
||||||
|
|
||||||
func podName(pod *api.Pod) string {
|
func podName(pod *v1.Pod) string {
|
||||||
return pod.Namespace + "/" + pod.Name
|
return pod.Namespace + "/" + pod.Name
|
||||||
}
|
}
|
||||||
|
|
||||||
func PodFitsResources(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func PodFitsResources(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return false, nil, fmt.Errorf("node not found")
|
return false, nil, fmt.Errorf("node not found")
|
||||||
@ -495,7 +495,7 @@ func PodFitsResources(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.N
|
|||||||
var predicateFails []algorithm.PredicateFailureReason
|
var predicateFails []algorithm.PredicateFailureReason
|
||||||
allowedPodNumber := nodeInfo.AllowedPodNumber()
|
allowedPodNumber := nodeInfo.AllowedPodNumber()
|
||||||
if len(nodeInfo.Pods())+1 > allowedPodNumber {
|
if len(nodeInfo.Pods())+1 > allowedPodNumber {
|
||||||
predicateFails = append(predicateFails, NewInsufficientResourceError(api.ResourcePods, 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber)))
|
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourcePods, 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber)))
|
||||||
}
|
}
|
||||||
|
|
||||||
var podRequest *schedulercache.Resource
|
var podRequest *schedulercache.Resource
|
||||||
@ -511,13 +511,13 @@ func PodFitsResources(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.N
|
|||||||
|
|
||||||
allocatable := nodeInfo.AllocatableResource()
|
allocatable := nodeInfo.AllocatableResource()
|
||||||
if allocatable.MilliCPU < podRequest.MilliCPU+nodeInfo.RequestedResource().MilliCPU {
|
if allocatable.MilliCPU < podRequest.MilliCPU+nodeInfo.RequestedResource().MilliCPU {
|
||||||
predicateFails = append(predicateFails, NewInsufficientResourceError(api.ResourceCPU, podRequest.MilliCPU, nodeInfo.RequestedResource().MilliCPU, allocatable.MilliCPU))
|
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceCPU, podRequest.MilliCPU, nodeInfo.RequestedResource().MilliCPU, allocatable.MilliCPU))
|
||||||
}
|
}
|
||||||
if allocatable.Memory < podRequest.Memory+nodeInfo.RequestedResource().Memory {
|
if allocatable.Memory < podRequest.Memory+nodeInfo.RequestedResource().Memory {
|
||||||
predicateFails = append(predicateFails, NewInsufficientResourceError(api.ResourceMemory, podRequest.Memory, nodeInfo.RequestedResource().Memory, allocatable.Memory))
|
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceMemory, podRequest.Memory, nodeInfo.RequestedResource().Memory, allocatable.Memory))
|
||||||
}
|
}
|
||||||
if allocatable.NvidiaGPU < podRequest.NvidiaGPU+nodeInfo.RequestedResource().NvidiaGPU {
|
if allocatable.NvidiaGPU < podRequest.NvidiaGPU+nodeInfo.RequestedResource().NvidiaGPU {
|
||||||
predicateFails = append(predicateFails, NewInsufficientResourceError(api.ResourceNvidiaGPU, podRequest.NvidiaGPU, nodeInfo.RequestedResource().NvidiaGPU, allocatable.NvidiaGPU))
|
predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceNvidiaGPU, podRequest.NvidiaGPU, nodeInfo.RequestedResource().NvidiaGPU, allocatable.NvidiaGPU))
|
||||||
}
|
}
|
||||||
for rName, rQuant := range podRequest.OpaqueIntResources {
|
for rName, rQuant := range podRequest.OpaqueIntResources {
|
||||||
if allocatable.OpaqueIntResources[rName] < rQuant+nodeInfo.RequestedResource().OpaqueIntResources[rName] {
|
if allocatable.OpaqueIntResources[rName] < rQuant+nodeInfo.RequestedResource().OpaqueIntResources[rName] {
|
||||||
@ -536,9 +536,9 @@ func PodFitsResources(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.N
|
|||||||
|
|
||||||
// nodeMatchesNodeSelectorTerms checks if a node's labels satisfy a list of node selector terms,
|
// nodeMatchesNodeSelectorTerms checks if a node's labels satisfy a list of node selector terms,
|
||||||
// terms are ORed, and an empty list of terms will match nothing.
|
// terms are ORed, and an empty list of terms will match nothing.
|
||||||
func nodeMatchesNodeSelectorTerms(node *api.Node, nodeSelectorTerms []api.NodeSelectorTerm) bool {
|
func nodeMatchesNodeSelectorTerms(node *v1.Node, nodeSelectorTerms []v1.NodeSelectorTerm) bool {
|
||||||
for _, req := range nodeSelectorTerms {
|
for _, req := range nodeSelectorTerms {
|
||||||
nodeSelector, err := api.NodeSelectorRequirementsAsSelector(req.MatchExpressions)
|
nodeSelector, err := v1.NodeSelectorRequirementsAsSelector(req.MatchExpressions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(10).Infof("Failed to parse MatchExpressions: %+v, regarding as not match.", req.MatchExpressions)
|
glog.V(10).Infof("Failed to parse MatchExpressions: %+v, regarding as not match.", req.MatchExpressions)
|
||||||
return false
|
return false
|
||||||
@ -551,7 +551,7 @@ func nodeMatchesNodeSelectorTerms(node *api.Node, nodeSelectorTerms []api.NodeSe
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The pod can only schedule onto nodes that satisfy requirements in both NodeAffinity and nodeSelector.
|
// The pod can only schedule onto nodes that satisfy requirements in both NodeAffinity and nodeSelector.
|
||||||
func podMatchesNodeLabels(pod *api.Pod, node *api.Node) bool {
|
func podMatchesNodeLabels(pod *v1.Pod, node *v1.Node) bool {
|
||||||
// Check if node.Labels match pod.Spec.NodeSelector.
|
// Check if node.Labels match pod.Spec.NodeSelector.
|
||||||
if len(pod.Spec.NodeSelector) > 0 {
|
if len(pod.Spec.NodeSelector) > 0 {
|
||||||
selector := labels.SelectorFromSet(pod.Spec.NodeSelector)
|
selector := labels.SelectorFromSet(pod.Spec.NodeSelector)
|
||||||
@ -562,7 +562,7 @@ func podMatchesNodeLabels(pod *api.Pod, node *api.Node) bool {
|
|||||||
|
|
||||||
// Parse required node affinity scheduling requirements
|
// Parse required node affinity scheduling requirements
|
||||||
// and check if the current node match the requirements.
|
// and check if the current node match the requirements.
|
||||||
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
|
affinity, err := v1.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(10).Infof("Failed to get Affinity from Pod %+v, err: %+v", podName(pod), err)
|
glog.V(10).Infof("Failed to get Affinity from Pod %+v, err: %+v", podName(pod), err)
|
||||||
return false
|
return false
|
||||||
@ -603,7 +603,7 @@ func podMatchesNodeLabels(pod *api.Pod, node *api.Node) bool {
|
|||||||
return nodeAffinityMatches
|
return nodeAffinityMatches
|
||||||
}
|
}
|
||||||
|
|
||||||
func PodSelectorMatches(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func PodSelectorMatches(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return false, nil, fmt.Errorf("node not found")
|
return false, nil, fmt.Errorf("node not found")
|
||||||
@ -614,7 +614,7 @@ func PodSelectorMatches(pod *api.Pod, meta interface{}, nodeInfo *schedulercache
|
|||||||
return false, []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}, nil
|
return false, []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func PodFitsHost(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func PodFitsHost(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
if len(pod.Spec.NodeName) == 0 {
|
if len(pod.Spec.NodeName) == 0 {
|
||||||
return true, nil, nil
|
return true, nil, nil
|
||||||
}
|
}
|
||||||
@ -653,7 +653,7 @@ func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicat
|
|||||||
// Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
|
// Alternately, eliminating nodes that have a certain label, regardless of value, is also useful
|
||||||
// A node may have a label with "retiring" as key and the date as the value
|
// A node may have a label with "retiring" as key and the date as the value
|
||||||
// and it may be desirable to avoid scheduling new pods on this node
|
// and it may be desirable to avoid scheduling new pods on this node
|
||||||
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return false, nil, fmt.Errorf("node not found")
|
return false, nil, fmt.Errorf("node not found")
|
||||||
@ -732,9 +732,9 @@ func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister al
|
|||||||
//
|
//
|
||||||
// WARNING: This Predicate is NOT guaranteed to work if some of the predicateMetadata data isn't precomputed...
|
// WARNING: This Predicate is NOT guaranteed to work if some of the predicateMetadata data isn't precomputed...
|
||||||
// For that reason it is not exported, i.e. it is highly coupled to the implementation of the FitPredicate construction.
|
// For that reason it is not exported, i.e. it is highly coupled to the implementation of the FitPredicate construction.
|
||||||
func (s *ServiceAffinity) checkServiceAffinity(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
var services []*api.Service
|
var services []*v1.Service
|
||||||
var pods []*api.Pod
|
var pods []*v1.Pod
|
||||||
if pm, ok := meta.(*predicateMetadata); ok && (pm.serviceAffinityMatchingPodList != nil || pm.serviceAffinityMatchingPodServices != nil) {
|
if pm, ok := meta.(*predicateMetadata); ok && (pm.serviceAffinityMatchingPodList != nil || pm.serviceAffinityMatchingPodServices != nil) {
|
||||||
services = pm.serviceAffinityMatchingPodServices
|
services = pm.serviceAffinityMatchingPodServices
|
||||||
pods = pm.serviceAffinityMatchingPodList
|
pods = pm.serviceAffinityMatchingPodList
|
||||||
@ -769,7 +769,7 @@ func (s *ServiceAffinity) checkServiceAffinity(pod *api.Pod, meta interface{}, n
|
|||||||
return false, []algorithm.PredicateFailureReason{ErrServiceAffinityViolated}, nil
|
return false, []algorithm.PredicateFailureReason{ErrServiceAffinityViolated}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func PodFitsHostPorts(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func PodFitsHostPorts(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
var wantPorts map[int]bool
|
var wantPorts map[int]bool
|
||||||
if predicateMeta, ok := meta.(*predicateMetadata); ok {
|
if predicateMeta, ok := meta.(*predicateMetadata); ok {
|
||||||
wantPorts = predicateMeta.podPorts
|
wantPorts = predicateMeta.podPorts
|
||||||
@ -791,7 +791,7 @@ func PodFitsHostPorts(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.N
|
|||||||
return true, nil, nil
|
return true, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetUsedPorts(pods ...*api.Pod) map[int]bool {
|
func GetUsedPorts(pods ...*v1.Pod) map[int]bool {
|
||||||
ports := make(map[int]bool)
|
ports := make(map[int]bool)
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
for j := range pod.Spec.Containers {
|
for j := range pod.Spec.Containers {
|
||||||
@ -821,7 +821,7 @@ func haveSame(a1, a2 []string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func GeneralPredicates(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func GeneralPredicates(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
var predicateFails []algorithm.PredicateFailureReason
|
var predicateFails []algorithm.PredicateFailureReason
|
||||||
fit, reasons, err := PodFitsResources(pod, meta, nodeInfo)
|
fit, reasons, err := PodFitsResources(pod, meta, nodeInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -873,7 +873,7 @@ func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister, failu
|
|||||||
return checker.InterPodAffinityMatches
|
return checker.InterPodAffinityMatches
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *PodAffinityChecker) InterPodAffinityMatches(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return false, nil, fmt.Errorf("node not found")
|
return false, nil, fmt.Errorf("node not found")
|
||||||
@ -883,7 +883,7 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *api.Pod, meta interfac
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Now check if <pod> requirements will be satisfied on this node.
|
// Now check if <pod> requirements will be satisfied on this node.
|
||||||
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
|
affinity, err := v1.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil, err
|
return false, nil, err
|
||||||
}
|
}
|
||||||
@ -907,7 +907,7 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *api.Pod, meta interfac
|
|||||||
// First return value indicates whether a matching pod exists on a node that matches the topology key,
|
// First return value indicates whether a matching pod exists on a node that matches the topology key,
|
||||||
// while the second return value indicates whether a matching pod exists anywhere.
|
// while the second return value indicates whether a matching pod exists anywhere.
|
||||||
// TODO: Do we really need any pod matching, or all pods matching? I think the latter.
|
// TODO: Do we really need any pod matching, or all pods matching? I think the latter.
|
||||||
func (c *PodAffinityChecker) anyPodMatchesPodAffinityTerm(pod *api.Pod, allPods []*api.Pod, node *api.Node, term *api.PodAffinityTerm) (bool, bool, error) {
|
func (c *PodAffinityChecker) anyPodMatchesPodAffinityTerm(pod *v1.Pod, allPods []*v1.Pod, node *v1.Node, term *v1.PodAffinityTerm) (bool, bool, error) {
|
||||||
matchingPodExists := false
|
matchingPodExists := false
|
||||||
for _, existingPod := range allPods {
|
for _, existingPod := range allPods {
|
||||||
match, err := priorityutil.PodMatchesTermsNamespaceAndSelector(existingPod, pod, term)
|
match, err := priorityutil.PodMatchesTermsNamespaceAndSelector(existingPod, pod, term)
|
||||||
@ -928,7 +928,7 @@ func (c *PodAffinityChecker) anyPodMatchesPodAffinityTerm(pod *api.Pod, allPods
|
|||||||
return false, matchingPodExists, nil
|
return false, matchingPodExists, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPodAffinityTerms(podAffinity *api.PodAffinity) (terms []api.PodAffinityTerm) {
|
func getPodAffinityTerms(podAffinity *v1.PodAffinity) (terms []v1.PodAffinityTerm) {
|
||||||
if podAffinity != nil {
|
if podAffinity != nil {
|
||||||
if len(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
if len(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
||||||
terms = podAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
terms = podAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||||
@ -941,7 +941,7 @@ func getPodAffinityTerms(podAffinity *api.PodAffinity) (terms []api.PodAffinityT
|
|||||||
return terms
|
return terms
|
||||||
}
|
}
|
||||||
|
|
||||||
func getPodAntiAffinityTerms(podAntiAffinity *api.PodAntiAffinity) (terms []api.PodAffinityTerm) {
|
func getPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.PodAffinityTerm) {
|
||||||
if podAntiAffinity != nil {
|
if podAntiAffinity != nil {
|
||||||
if len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
if len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 {
|
||||||
terms = podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
terms = podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution
|
||||||
@ -954,7 +954,7 @@ func getPodAntiAffinityTerms(podAntiAffinity *api.PodAntiAffinity) (terms []api.
|
|||||||
return terms
|
return terms
|
||||||
}
|
}
|
||||||
|
|
||||||
func getMatchingAntiAffinityTerms(pod *api.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) ([]matchingPodAntiAffinityTerm, error) {
|
func getMatchingAntiAffinityTerms(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) ([]matchingPodAntiAffinityTerm, error) {
|
||||||
allNodeNames := make([]string, 0, len(nodeInfoMap))
|
allNodeNames := make([]string, 0, len(nodeInfoMap))
|
||||||
for name := range nodeInfoMap {
|
for name := range nodeInfoMap {
|
||||||
allNodeNames = append(allNodeNames, name)
|
allNodeNames = append(allNodeNames, name)
|
||||||
@ -985,7 +985,7 @@ func getMatchingAntiAffinityTerms(pod *api.Pod, nodeInfoMap map[string]*schedule
|
|||||||
}
|
}
|
||||||
var nodeResult []matchingPodAntiAffinityTerm
|
var nodeResult []matchingPodAntiAffinityTerm
|
||||||
for _, existingPod := range nodeInfo.PodsWithAffinity() {
|
for _, existingPod := range nodeInfo.PodsWithAffinity() {
|
||||||
affinity, err := api.GetAffinityFromPodAnnotations(existingPod.Annotations)
|
affinity, err := v1.GetAffinityFromPodAnnotations(existingPod.Annotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
catchError(err)
|
catchError(err)
|
||||||
return
|
return
|
||||||
@ -1012,10 +1012,10 @@ func getMatchingAntiAffinityTerms(pod *api.Pod, nodeInfoMap map[string]*schedule
|
|||||||
return result, firstError
|
return result, firstError
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *api.Pod, allPods []*api.Pod) ([]matchingPodAntiAffinityTerm, error) {
|
func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *v1.Pod, allPods []*v1.Pod) ([]matchingPodAntiAffinityTerm, error) {
|
||||||
var result []matchingPodAntiAffinityTerm
|
var result []matchingPodAntiAffinityTerm
|
||||||
for _, existingPod := range allPods {
|
for _, existingPod := range allPods {
|
||||||
affinity, err := api.GetAffinityFromPodAnnotations(existingPod.Annotations)
|
affinity, err := v1.GetAffinityFromPodAnnotations(existingPod.Annotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1040,7 +1040,7 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *api.Pod, allPods
|
|||||||
|
|
||||||
// Checks if scheduling the pod onto this node would break any anti-affinity
|
// Checks if scheduling the pod onto this node would break any anti-affinity
|
||||||
// rules indicated by the existing pods.
|
// rules indicated by the existing pods.
|
||||||
func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *api.Pod, meta interface{}, node *api.Node) bool {
|
func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta interface{}, node *v1.Node) bool {
|
||||||
var matchingTerms []matchingPodAntiAffinityTerm
|
var matchingTerms []matchingPodAntiAffinityTerm
|
||||||
if predicateMeta, ok := meta.(*predicateMetadata); ok {
|
if predicateMeta, ok := meta.(*predicateMetadata); ok {
|
||||||
matchingTerms = predicateMeta.matchingAntiAffinityTerms
|
matchingTerms = predicateMeta.matchingAntiAffinityTerms
|
||||||
@ -1072,7 +1072,7 @@ func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *api.Pod, met
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Checks if scheduling the pod onto this node would break any rules of this pod.
|
// Checks if scheduling the pod onto this node would break any rules of this pod.
|
||||||
func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *api.Pod, node *api.Node, affinity *api.Affinity) bool {
|
func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, node *v1.Node, affinity *v1.Affinity) bool {
|
||||||
allPods, err := c.podLister.List(labels.Everything())
|
allPods, err := c.podLister.List(labels.Everything())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
@ -1118,18 +1118,18 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *api.Pod, nod
|
|||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
func PodToleratesNodeTaints(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func PodToleratesNodeTaints(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return false, nil, fmt.Errorf("node not found")
|
return false, nil, fmt.Errorf("node not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
taints, err := api.GetTaintsFromNodeAnnotations(node.Annotations)
|
taints, err := v1.GetTaintsFromNodeAnnotations(node.Annotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil, err
|
return false, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
tolerations, err := api.GetTolerationsFromPodAnnotations(pod.Annotations)
|
tolerations, err := v1.GetTolerationsFromPodAnnotations(pod.Annotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil, err
|
return false, nil, err
|
||||||
}
|
}
|
||||||
@ -1140,7 +1140,7 @@ func PodToleratesNodeTaints(pod *api.Pod, meta interface{}, nodeInfo *schedulerc
|
|||||||
return false, []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil
|
return false, []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func tolerationsToleratesTaints(tolerations []api.Toleration, taints []api.Taint) bool {
|
func tolerationsToleratesTaints(tolerations []v1.Toleration, taints []v1.Taint) bool {
|
||||||
// If the taint list is nil/empty, it is tolerated by all tolerations by default.
|
// If the taint list is nil/empty, it is tolerated by all tolerations by default.
|
||||||
if len(taints) == 0 {
|
if len(taints) == 0 {
|
||||||
return true
|
return true
|
||||||
@ -1154,11 +1154,11 @@ func tolerationsToleratesTaints(tolerations []api.Toleration, taints []api.Taint
|
|||||||
for i := range taints {
|
for i := range taints {
|
||||||
taint := &taints[i]
|
taint := &taints[i]
|
||||||
// skip taints that have effect PreferNoSchedule, since it is for priorities
|
// skip taints that have effect PreferNoSchedule, since it is for priorities
|
||||||
if taint.Effect == api.TaintEffectPreferNoSchedule {
|
if taint.Effect == v1.TaintEffectPreferNoSchedule {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if !api.TaintToleratedByTolerations(taint, tolerations) {
|
if !v1.TaintToleratedByTolerations(taint, tolerations) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1167,13 +1167,13 @@ func tolerationsToleratesTaints(tolerations []api.Toleration, taints []api.Taint
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Determine if a pod is scheduled with best-effort QoS
|
// Determine if a pod is scheduled with best-effort QoS
|
||||||
func isPodBestEffort(pod *api.Pod) bool {
|
func isPodBestEffort(pod *v1.Pod) bool {
|
||||||
return qos.GetPodQOS(pod) == qos.BestEffort
|
return qos.GetPodQOS(pod) == qos.BestEffort
|
||||||
}
|
}
|
||||||
|
|
||||||
// CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node
|
// CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node
|
||||||
// reporting memory pressure condition.
|
// reporting memory pressure condition.
|
||||||
func CheckNodeMemoryPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return false, nil, fmt.Errorf("node not found")
|
return false, nil, fmt.Errorf("node not found")
|
||||||
@ -1194,7 +1194,7 @@ func CheckNodeMemoryPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *
|
|||||||
|
|
||||||
// is node under pressure?
|
// is node under pressure?
|
||||||
for _, cond := range node.Status.Conditions {
|
for _, cond := range node.Status.Conditions {
|
||||||
if cond.Type == api.NodeMemoryPressure && cond.Status == api.ConditionTrue {
|
if cond.Type == v1.NodeMemoryPressure && cond.Status == v1.ConditionTrue {
|
||||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnderMemoryPressure}, nil
|
return false, []algorithm.PredicateFailureReason{ErrNodeUnderMemoryPressure}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1204,7 +1204,7 @@ func CheckNodeMemoryPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *
|
|||||||
|
|
||||||
// CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node
|
// CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node
|
||||||
// reporting disk pressure condition.
|
// reporting disk pressure condition.
|
||||||
func CheckNodeDiskPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return false, nil, fmt.Errorf("node not found")
|
return false, nil, fmt.Errorf("node not found")
|
||||||
@ -1212,7 +1212,7 @@ func CheckNodeDiskPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *sc
|
|||||||
|
|
||||||
// is node under pressure?
|
// is node under pressure?
|
||||||
for _, cond := range node.Status.Conditions {
|
for _, cond := range node.Status.Conditions {
|
||||||
if cond.Type == api.NodeDiskPressure && cond.Status == api.ConditionTrue {
|
if cond.Type == v1.NodeDiskPressure && cond.Status == v1.ConditionTrue {
|
||||||
return false, []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure}, nil
|
return false, []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure}, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -16,8 +16,10 @@ limitations under the License.
|
|||||||
|
|
||||||
package predicates
|
package predicates
|
||||||
|
|
||||||
import "k8s.io/kubernetes/pkg/labels"
|
import (
|
||||||
import "k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
|
)
|
||||||
|
|
||||||
// FindLabelsInSet gets as many key/value pairs as possible out of a label set.
|
// FindLabelsInSet gets as many key/value pairs as possible out of a label set.
|
||||||
func FindLabelsInSet(labelsToKeep []string, selector labels.Set) map[string]string {
|
func FindLabelsInSet(labelsToKeep []string, selector labels.Set) map[string]string {
|
||||||
@ -45,8 +47,8 @@ func AddUnsetLabelsToMap(aL map[string]string, labelsToAdd []string, labelSet la
|
|||||||
}
|
}
|
||||||
|
|
||||||
// FilterPodsByNamespace filters pods outside a namespace from the given list.
|
// FilterPodsByNamespace filters pods outside a namespace from the given list.
|
||||||
func FilterPodsByNamespace(pods []*api.Pod, ns string) []*api.Pod {
|
func FilterPodsByNamespace(pods []*v1.Pod, ns string) []*v1.Pod {
|
||||||
filtered := []*api.Pod{}
|
filtered := []*v1.Pod{}
|
||||||
for _, nsPod := range pods {
|
for _, nsPod := range pods {
|
||||||
if nsPod.Namespace == ns {
|
if nsPod.Namespace == ns {
|
||||||
filtered = append(filtered, nsPod)
|
filtered = append(filtered, nsPod)
|
||||||
|
@ -19,7 +19,7 @@ package predicates
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -30,9 +30,9 @@ func ExampleFindLabelsInSet() {
|
|||||||
labelSubset["label2"] = "value2"
|
labelSubset["label2"] = "value2"
|
||||||
// Lets make believe that these pods are on the cluster.
|
// Lets make believe that these pods are on the cluster.
|
||||||
// Utility functions will inspect their labels, filter them, and so on.
|
// Utility functions will inspect their labels, filter them, and so on.
|
||||||
nsPods := []*api.Pod{
|
nsPods := []*v1.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: "pod1",
|
Name: "pod1",
|
||||||
Namespace: "ns1",
|
Namespace: "ns1",
|
||||||
Labels: map[string]string{
|
Labels: map[string]string{
|
||||||
@ -43,14 +43,14 @@ func ExampleFindLabelsInSet() {
|
|||||||
},
|
},
|
||||||
}, // first pod which will be used via the utilities
|
}, // first pod which will be used via the utilities
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: "pod2",
|
Name: "pod2",
|
||||||
Namespace: "ns1",
|
Namespace: "ns1",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: "pod3ThatWeWontSee",
|
Name: "pod3ThatWeWontSee",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"math"
|
"math"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
@ -37,7 +37,7 @@ const (
|
|||||||
|
|
||||||
// Also used in most/least_requested nad metadata.
|
// Also used in most/least_requested nad metadata.
|
||||||
// TODO: despaghettify it
|
// TODO: despaghettify it
|
||||||
func getNonZeroRequests(pod *api.Pod) *schedulercache.Resource {
|
func getNonZeroRequests(pod *v1.Pod) *schedulercache.Resource {
|
||||||
result := &schedulercache.Resource{}
|
result := &schedulercache.Resource{}
|
||||||
for i := range pod.Spec.Containers {
|
for i := range pod.Spec.Containers {
|
||||||
container := &pod.Spec.Containers[i]
|
container := &pod.Spec.Containers[i]
|
||||||
@ -48,7 +48,7 @@ func getNonZeroRequests(pod *api.Pod) *schedulercache.Resource {
|
|||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
|
|
||||||
func calculateBalancedResourceAllocation(pod *api.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
func calculateBalancedResourceAllocation(pod *v1.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||||
@ -104,7 +104,7 @@ func fractionOfCapacity(requested, capacity int64) float64 {
|
|||||||
// close the two metrics are to each other.
|
// close the two metrics are to each other.
|
||||||
// Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by:
|
// Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by:
|
||||||
// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization"
|
// "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization"
|
||||||
func BalancedResourceAllocationMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
func BalancedResourceAllocationMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||||
var nonZeroRequest *schedulercache.Resource
|
var nonZeroRequest *schedulercache.Resource
|
||||||
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
||||||
nonZeroRequest = priorityMeta.nonZeroRequest
|
nonZeroRequest = priorityMeta.nonZeroRequest
|
||||||
|
@ -20,8 +20,8 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
@ -35,29 +35,29 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
"bar": "foo",
|
"bar": "foo",
|
||||||
"baz": "blah",
|
"baz": "blah",
|
||||||
}
|
}
|
||||||
machine1Spec := api.PodSpec{
|
machine1Spec := v1.PodSpec{
|
||||||
NodeName: "machine1",
|
NodeName: "machine1",
|
||||||
}
|
}
|
||||||
machine2Spec := api.PodSpec{
|
machine2Spec := v1.PodSpec{
|
||||||
NodeName: "machine2",
|
NodeName: "machine2",
|
||||||
}
|
}
|
||||||
noResources := api.PodSpec{
|
noResources := v1.PodSpec{
|
||||||
Containers: []api.Container{},
|
Containers: []v1.Container{},
|
||||||
}
|
}
|
||||||
cpuOnly := api.PodSpec{
|
cpuOnly := v1.PodSpec{
|
||||||
NodeName: "machine1",
|
NodeName: "machine1",
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Resources: api.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("1000m"),
|
"cpu": resource.MustParse("1000m"),
|
||||||
"memory": resource.MustParse("0"),
|
"memory": resource.MustParse("0"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Resources: api.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("2000m"),
|
"cpu": resource.MustParse("2000m"),
|
||||||
"memory": resource.MustParse("0"),
|
"memory": resource.MustParse("0"),
|
||||||
},
|
},
|
||||||
@ -67,20 +67,20 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
}
|
}
|
||||||
cpuOnly2 := cpuOnly
|
cpuOnly2 := cpuOnly
|
||||||
cpuOnly2.NodeName = "machine2"
|
cpuOnly2.NodeName = "machine2"
|
||||||
cpuAndMemory := api.PodSpec{
|
cpuAndMemory := v1.PodSpec{
|
||||||
NodeName: "machine2",
|
NodeName: "machine2",
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Resources: api.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("1000m"),
|
"cpu": resource.MustParse("1000m"),
|
||||||
"memory": resource.MustParse("2000"),
|
"memory": resource.MustParse("2000"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Resources: api.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("2000m"),
|
"cpu": resource.MustParse("2000m"),
|
||||||
"memory": resource.MustParse("3000"),
|
"memory": resource.MustParse("3000"),
|
||||||
},
|
},
|
||||||
@ -89,9 +89,9 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
pods []*api.Pod
|
pods []*v1.Pod
|
||||||
nodes []*api.Node
|
nodes []*v1.Node
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
@ -107,8 +107,8 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
Memory Fraction: 0 / 10000 = 0%
|
Memory Fraction: 0 / 10000 = 0%
|
||||||
Node2 Score: 10 - (0-0)*10 = 10
|
Node2 Score: 10 - (0-0)*10 = 10
|
||||||
*/
|
*/
|
||||||
pod: &api.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||||
test: "nothing scheduled, nothing requested",
|
test: "nothing scheduled, nothing requested",
|
||||||
},
|
},
|
||||||
@ -124,8 +124,8 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
Memory Fraction: 5000/10000 = 50%
|
Memory Fraction: 5000/10000 = 50%
|
||||||
Node2 Score: 10 - (0.5-0.5)*10 = 10
|
Node2 Score: 10 - (0.5-0.5)*10 = 10
|
||||||
*/
|
*/
|
||||||
pod: &api.Pod{Spec: cpuAndMemory},
|
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: 10}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: 10}},
|
||||||
test: "nothing scheduled, resources requested, differently sized machines",
|
test: "nothing scheduled, resources requested, differently sized machines",
|
||||||
},
|
},
|
||||||
@ -141,15 +141,15 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
Memory Fraction: 0 / 10000 = 0%
|
Memory Fraction: 0 / 10000 = 0%
|
||||||
Node2 Score: 10 - (0-0)*10 = 10
|
Node2 Score: 10 - (0-0)*10 = 10
|
||||||
*/
|
*/
|
||||||
pod: &api.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||||
test: "no resources requested, pods scheduled",
|
test: "no resources requested, pods scheduled",
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: machine1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: machine1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: machine2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: machine2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -164,15 +164,15 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
Memory Fraction: 5000 / 20000 = 25%
|
Memory Fraction: 5000 / 20000 = 25%
|
||||||
Node2 Score: 10 - (0.6-0.25)*10 = 6
|
Node2 Score: 10 - (0.6-0.25)*10 = 6
|
||||||
*/
|
*/
|
||||||
pod: &api.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 6}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 6}},
|
||||||
test: "no resources requested, pods scheduled with resources",
|
test: "no resources requested, pods scheduled with resources",
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: cpuOnly, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: cpuOnly, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: cpuOnly2, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: cpuAndMemory, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: cpuAndMemory, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -187,11 +187,11 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
Memory Fraction: 10000 / 20000 = 50%
|
Memory Fraction: 10000 / 20000 = 50%
|
||||||
Node2 Score: 10 - (0.6-0.5)*10 = 9
|
Node2 Score: 10 - (0.6-0.5)*10 = 9
|
||||||
*/
|
*/
|
||||||
pod: &api.Pod{Spec: cpuAndMemory},
|
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 9}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 9}},
|
||||||
test: "resources requested, pods scheduled with resources",
|
test: "resources requested, pods scheduled with resources",
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
},
|
},
|
||||||
@ -208,11 +208,11 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
Memory Fraction: 10000 / 50000 = 20%
|
Memory Fraction: 10000 / 50000 = 20%
|
||||||
Node2 Score: 10 - (0.6-0.2)*10 = 6
|
Node2 Score: 10 - (0.6-0.2)*10 = 6
|
||||||
*/
|
*/
|
||||||
pod: &api.Pod{Spec: cpuAndMemory},
|
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 6}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 6}},
|
||||||
test: "resources requested, pods scheduled with resources, differently sized machines",
|
test: "resources requested, pods scheduled with resources, differently sized machines",
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
},
|
},
|
||||||
@ -229,21 +229,21 @@ func TestBalancedResourceAllocation(t *testing.T) {
|
|||||||
Memory Fraction 5000 / 10000 = 50%
|
Memory Fraction 5000 / 10000 = 50%
|
||||||
Node2 Score: 0
|
Node2 Score: 0
|
||||||
*/
|
*/
|
||||||
pod: &api.Pod{Spec: cpuOnly},
|
pod: &v1.Pod{Spec: cpuOnly},
|
||||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||||
test: "requested resources exceed node capacity",
|
test: "requested resources exceed node capacity",
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||||
test: "zero node resources, pods scheduled with resources",
|
test: "zero node resources, pods scheduled with resources",
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
},
|
},
|
||||||
|
@ -19,7 +19,7 @@ package priorities
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
@ -29,7 +29,7 @@ import (
|
|||||||
// based on the total size of those images.
|
// based on the total size of those images.
|
||||||
// - If none of the images are present, this node will be given the lowest priority.
|
// - If none of the images are present, this node will be given the lowest priority.
|
||||||
// - If some of the images are present on a node, the larger their sizes' sum, the higher the node's priority.
|
// - If some of the images are present on a node, the larger their sizes' sum, the higher the node's priority.
|
||||||
func ImageLocalityPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||||
@ -66,7 +66,7 @@ func calculateScoreFromSize(sumSize int64) int {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// checkContainerImageOnNode checks if a container image is present on a node and returns its size.
|
// checkContainerImageOnNode checks if a container image is present on a node and returns its size.
|
||||||
func checkContainerImageOnNode(node *api.Node, container *api.Container) int64 {
|
func checkContainerImageOnNode(node *v1.Node, container *v1.Container) int64 {
|
||||||
for _, image := range node.Status.Images {
|
for _, image := range node.Status.Images {
|
||||||
for _, name := range image.Names {
|
for _, name := range image.Names {
|
||||||
if container.Image == name {
|
if container.Image == name {
|
||||||
|
@ -21,14 +21,14 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestImageLocalityPriority(t *testing.T) {
|
func TestImageLocalityPriority(t *testing.T) {
|
||||||
test_40_250 := api.PodSpec{
|
test_40_250 := v1.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Image: "gcr.io/40",
|
Image: "gcr.io/40",
|
||||||
},
|
},
|
||||||
@ -38,8 +38,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
test_40_140 := api.PodSpec{
|
test_40_140 := v1.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Image: "gcr.io/40",
|
Image: "gcr.io/40",
|
||||||
},
|
},
|
||||||
@ -49,8 +49,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
test_min_max := api.PodSpec{
|
test_min_max := v1.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Image: "gcr.io/10",
|
Image: "gcr.io/10",
|
||||||
},
|
},
|
||||||
@ -60,8 +60,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
node_40_140_2000 := api.NodeStatus{
|
node_40_140_2000 := v1.NodeStatus{
|
||||||
Images: []api.ContainerImage{
|
Images: []v1.ContainerImage{
|
||||||
{
|
{
|
||||||
Names: []string{
|
Names: []string{
|
||||||
"gcr.io/40",
|
"gcr.io/40",
|
||||||
@ -86,8 +86,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
node_250_10 := api.NodeStatus{
|
node_250_10 := v1.NodeStatus{
|
||||||
Images: []api.ContainerImage{
|
Images: []v1.ContainerImage{
|
||||||
{
|
{
|
||||||
Names: []string{
|
Names: []string{
|
||||||
"gcr.io/250",
|
"gcr.io/250",
|
||||||
@ -105,9 +105,9 @@ func TestImageLocalityPriority(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
pods []*api.Pod
|
pods []*v1.Pod
|
||||||
nodes []*api.Node
|
nodes []*v1.Node
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
@ -121,8 +121,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
|||||||
// Node2
|
// Node2
|
||||||
// Image: gcr.io/250 250MB
|
// Image: gcr.io/250 250MB
|
||||||
// Score: (250M-23M)/97.7M + 1 = 3
|
// Score: (250M-23M)/97.7M + 1 = 3
|
||||||
pod: &api.Pod{Spec: test_40_250},
|
pod: &v1.Pod{Spec: test_40_250},
|
||||||
nodes: []*api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 3}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 3}},
|
||||||
test: "two images spread on two nodes, prefer the larger image one",
|
test: "two images spread on two nodes, prefer the larger image one",
|
||||||
},
|
},
|
||||||
@ -136,8 +136,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
|||||||
// Node2
|
// Node2
|
||||||
// Image: not present
|
// Image: not present
|
||||||
// Score: 0
|
// Score: 0
|
||||||
pod: &api.Pod{Spec: test_40_140},
|
pod: &v1.Pod{Spec: test_40_140},
|
||||||
nodes: []*api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 0}},
|
||||||
test: "two images on one node, prefer this node",
|
test: "two images on one node, prefer this node",
|
||||||
},
|
},
|
||||||
@ -151,8 +151,8 @@ func TestImageLocalityPriority(t *testing.T) {
|
|||||||
// Node2
|
// Node2
|
||||||
// Image: gcr.io/10 10MB
|
// Image: gcr.io/10 10MB
|
||||||
// Score: 10 < min score = 0
|
// Score: 10 < min score = 0
|
||||||
pod: &api.Pod{Spec: test_min_max},
|
pod: &v1.Pod{Spec: test_min_max},
|
||||||
nodes: []*api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
||||||
test: "if exceed limit, use limit",
|
test: "if exceed limit, use limit",
|
||||||
},
|
},
|
||||||
@ -174,9 +174,9 @@ func TestImageLocalityPriority(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeImageNode(node string, status api.NodeStatus) *api.Node {
|
func makeImageNode(node string, status v1.NodeStatus) *v1.Node {
|
||||||
return &api.Node{
|
return &v1.Node{
|
||||||
ObjectMeta: api.ObjectMeta{Name: node},
|
ObjectMeta: v1.ObjectMeta{Name: node},
|
||||||
Status: status,
|
Status: status,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates"
|
||||||
@ -57,7 +57,7 @@ type podAffinityPriorityMap struct {
|
|||||||
sync.Mutex
|
sync.Mutex
|
||||||
|
|
||||||
// nodes contain all nodes that should be considered
|
// nodes contain all nodes that should be considered
|
||||||
nodes []*api.Node
|
nodes []*v1.Node
|
||||||
// counts store the mapping from node name to so-far computed score of
|
// counts store the mapping from node name to so-far computed score of
|
||||||
// the node.
|
// the node.
|
||||||
counts map[string]float64
|
counts map[string]float64
|
||||||
@ -67,7 +67,7 @@ type podAffinityPriorityMap struct {
|
|||||||
firstError error
|
firstError error
|
||||||
}
|
}
|
||||||
|
|
||||||
func newPodAffinityPriorityMap(nodes []*api.Node, failureDomains priorityutil.Topologies) *podAffinityPriorityMap {
|
func newPodAffinityPriorityMap(nodes []*v1.Node, failureDomains priorityutil.Topologies) *podAffinityPriorityMap {
|
||||||
return &podAffinityPriorityMap{
|
return &podAffinityPriorityMap{
|
||||||
nodes: nodes,
|
nodes: nodes,
|
||||||
counts: make(map[string]float64, len(nodes)),
|
counts: make(map[string]float64, len(nodes)),
|
||||||
@ -83,7 +83,7 @@ func (p *podAffinityPriorityMap) setError(err error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *podAffinityPriorityMap) processTerm(term *api.PodAffinityTerm, podDefiningAffinityTerm, podToCheck *api.Pod, fixedNode *api.Node, weight float64) {
|
func (p *podAffinityPriorityMap) processTerm(term *v1.PodAffinityTerm, podDefiningAffinityTerm, podToCheck *v1.Pod, fixedNode *v1.Node, weight float64) {
|
||||||
match, err := priorityutil.PodMatchesTermsNamespaceAndSelector(podToCheck, podDefiningAffinityTerm, term)
|
match, err := priorityutil.PodMatchesTermsNamespaceAndSelector(podToCheck, podDefiningAffinityTerm, term)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
p.setError(err)
|
p.setError(err)
|
||||||
@ -102,7 +102,7 @@ func (p *podAffinityPriorityMap) processTerm(term *api.PodAffinityTerm, podDefin
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *podAffinityPriorityMap) processTerms(terms []api.WeightedPodAffinityTerm, podDefiningAffinityTerm, podToCheck *api.Pod, fixedNode *api.Node, multiplier int) {
|
func (p *podAffinityPriorityMap) processTerms(terms []v1.WeightedPodAffinityTerm, podDefiningAffinityTerm, podToCheck *v1.Pod, fixedNode *v1.Node, multiplier int) {
|
||||||
for i := range terms {
|
for i := range terms {
|
||||||
term := &terms[i]
|
term := &terms[i]
|
||||||
p.processTerm(&term.PodAffinityTerm, podDefiningAffinityTerm, podToCheck, fixedNode, float64(term.Weight*int32(multiplier)))
|
p.processTerm(&term.PodAffinityTerm, podDefiningAffinityTerm, podToCheck, fixedNode, float64(term.Weight*int32(multiplier)))
|
||||||
@ -114,8 +114,8 @@ func (p *podAffinityPriorityMap) processTerms(terms []api.WeightedPodAffinityTer
|
|||||||
// that node; the node(s) with the highest sum are the most preferred.
|
// that node; the node(s) with the highest sum are the most preferred.
|
||||||
// Symmetry need to be considered for preferredDuringSchedulingIgnoredDuringExecution from podAffinity & podAntiAffinity,
|
// Symmetry need to be considered for preferredDuringSchedulingIgnoredDuringExecution from podAffinity & podAntiAffinity,
|
||||||
// symmetry need to be considered for hard requirements from podAffinity
|
// symmetry need to be considered for hard requirements from podAffinity
|
||||||
func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) {
|
func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||||
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
|
affinity, err := v1.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -134,12 +134,12 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nod
|
|||||||
// the node.
|
// the node.
|
||||||
pm := newPodAffinityPriorityMap(nodes, ipa.failureDomains)
|
pm := newPodAffinityPriorityMap(nodes, ipa.failureDomains)
|
||||||
|
|
||||||
processPod := func(existingPod *api.Pod) error {
|
processPod := func(existingPod *v1.Pod) error {
|
||||||
existingPodNode, err := ipa.info.GetNodeInfo(existingPod.Spec.NodeName)
|
existingPodNode, err := ipa.info.GetNodeInfo(existingPod.Spec.NodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
existingPodAffinity, err := api.GetAffinityFromPodAnnotations(existingPod.Annotations)
|
existingPodAffinity, err := v1.GetAffinityFromPodAnnotations(existingPod.Annotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -22,17 +22,17 @@ import (
|
|||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||||
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
type FakeNodeListInfo []*api.Node
|
type FakeNodeListInfo []*v1.Node
|
||||||
|
|
||||||
func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*api.Node, error) {
|
func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
if node.Name == nodeName {
|
if node.Name == nodeName {
|
||||||
return node, nil
|
return node, nil
|
||||||
@ -66,7 +66,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// considered only preferredDuringSchedulingIgnoredDuringExecution in pod affinity
|
// considered only preferredDuringSchedulingIgnoredDuringExecution in pod affinity
|
||||||
stayWithS1InRegion := map[string]string{
|
stayWithS1InRegion := map[string]string{
|
||||||
api.AffinityAnnotationKey: `
|
v1.AffinityAnnotationKey: `
|
||||||
{"podAffinity": {
|
{"podAffinity": {
|
||||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||||
"weight": 5,
|
"weight": 5,
|
||||||
@ -85,7 +85,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
}}`,
|
}}`,
|
||||||
}
|
}
|
||||||
stayWithS2InRegion := map[string]string{
|
stayWithS2InRegion := map[string]string{
|
||||||
api.AffinityAnnotationKey: `
|
v1.AffinityAnnotationKey: `
|
||||||
{"podAffinity": {
|
{"podAffinity": {
|
||||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||||
"weight": 6,
|
"weight": 6,
|
||||||
@ -104,7 +104,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
}}`,
|
}}`,
|
||||||
}
|
}
|
||||||
affinity3 := map[string]string{
|
affinity3 := map[string]string{
|
||||||
api.AffinityAnnotationKey: `
|
v1.AffinityAnnotationKey: `
|
||||||
{"podAffinity": {
|
{"podAffinity": {
|
||||||
"preferredDuringSchedulingIgnoredDuringExecution": [
|
"preferredDuringSchedulingIgnoredDuringExecution": [
|
||||||
{
|
{
|
||||||
@ -144,7 +144,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
}}`,
|
}}`,
|
||||||
}
|
}
|
||||||
hardAffinity := map[string]string{
|
hardAffinity := map[string]string{
|
||||||
api.AffinityAnnotationKey: `
|
v1.AffinityAnnotationKey: `
|
||||||
{"podAffinity": {
|
{"podAffinity": {
|
||||||
"requiredDuringSchedulingIgnoredDuringExecution": [
|
"requiredDuringSchedulingIgnoredDuringExecution": [
|
||||||
{
|
{
|
||||||
@ -174,7 +174,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
}}`,
|
}}`,
|
||||||
}
|
}
|
||||||
awayFromS1InAz := map[string]string{
|
awayFromS1InAz := map[string]string{
|
||||||
api.AffinityAnnotationKey: `
|
v1.AffinityAnnotationKey: `
|
||||||
{"podAntiAffinity": {
|
{"podAntiAffinity": {
|
||||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||||
"weight": 5,
|
"weight": 5,
|
||||||
@ -194,7 +194,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// to stay away from security S2 in any az.
|
// to stay away from security S2 in any az.
|
||||||
awayFromS2InAz := map[string]string{
|
awayFromS2InAz := map[string]string{
|
||||||
api.AffinityAnnotationKey: `
|
v1.AffinityAnnotationKey: `
|
||||||
{"podAntiAffinity": {
|
{"podAntiAffinity": {
|
||||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||||
"weight": 5,
|
"weight": 5,
|
||||||
@ -214,7 +214,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// to stay with security S1 in same region, stay away from security S2 in any az.
|
// to stay with security S1 in same region, stay away from security S2 in any az.
|
||||||
stayWithS1InRegionAwayFromS2InAz := map[string]string{
|
stayWithS1InRegionAwayFromS2InAz := map[string]string{
|
||||||
api.AffinityAnnotationKey: `
|
v1.AffinityAnnotationKey: `
|
||||||
{"podAffinity": {
|
{"podAffinity": {
|
||||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||||
"weight": 8,
|
"weight": 8,
|
||||||
@ -250,18 +250,18 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
pods []*api.Pod
|
pods []*v1.Pod
|
||||||
nodes []*api.Node
|
nodes []*v1.Node
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: map[string]string{}}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: map[string]string{}}},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||||
test: "all machines are same priority as Affinity is nil",
|
test: "all machines are same priority as Affinity is nil",
|
||||||
@ -270,16 +270,16 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
// the node(machine3) that don't have the label {"region": "whatever the value is"} (mismatch the topology key) but that have existing pods that match the labelSelector get low score
|
// the node(machine3) that don't have the label {"region": "whatever the value is"} (mismatch the topology key) but that have existing pods that match the labelSelector get low score
|
||||||
// the node(machine2) that have the label {"region": "China"} (match the topology key) but that have existing pods that mismatch the labelSelector get low score
|
// the node(machine2) that have the label {"region": "China"} (match the topology key) but that have existing pods that mismatch the labelSelector get low score
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||||
test: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" +
|
test: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" +
|
||||||
@ -290,14 +290,14 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
// the node3(machine3) that have the label {"region": "India"}, match the topology key but have a different label value, don't have existing pods that match the labelSelector,
|
// the node3(machine3) that have the label {"region": "India"}, match the topology key but have a different label value, don't have existing pods that match the labelSelector,
|
||||||
// get a low score.
|
// get a low score.
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Annotations: stayWithS1InRegion}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Annotations: stayWithS1InRegion}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||||
test: "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score",
|
test: "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score",
|
||||||
@ -307,37 +307,37 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
// Then, nodes in regionChina get higher score than nodes in regionIndia, and all the nodes in regionChina should get a same score(high score),
|
// Then, nodes in regionChina get higher score than nodes in regionIndia, and all the nodes in regionChina should get a same score(high score),
|
||||||
// while all the nodes in regionIndia should get another same score(low score).
|
// while all the nodes in regionIndia should get another same score(low score).
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS2InRegion}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS2InRegion}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
{Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
{Spec: v1.PodSpec{NodeName: "machine5"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgChina}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelRgChina}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 5}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 5}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 5}},
|
||||||
test: "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score",
|
test: "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score",
|
||||||
},
|
},
|
||||||
// Test with the different operators and values for pod affinity scheduling preference, including some match failures.
|
// Test with the different operators and values for pod affinity scheduling preference, including some match failures.
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: affinity3}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: affinity3}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||||
test: "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ",
|
test: "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ",
|
||||||
@ -345,29 +345,29 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
// Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods,
|
// Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods,
|
||||||
// but the existing pods have the inter pod affinity preference while the pod to schedule satisfy the preference.
|
// but the existing pods have the inter pod affinity preference while the pod to schedule satisfy the preference.
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: stayWithS2InRegion}},
|
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2, Annotations: stayWithS2InRegion}},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||||
test: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
test: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: hardAffinity}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: hardAffinity}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: hardAffinity}},
|
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2, Annotations: hardAffinity}},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||||
test: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
test: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry",
|
||||||
@ -380,69 +380,69 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
// there are 2 nodes, say node1 and node2, both nodes have pods that match the labelSelector and have topology-key in node.Labels.
|
// there are 2 nodes, say node1 and node2, both nodes have pods that match the labelSelector and have topology-key in node.Labels.
|
||||||
// But there are more pods on node1 that match the preference than node2. Then, node1 get a lower score than node2.
|
// But there are more pods on node1 that match the preference than node2. Then, node1 get a lower score than node2.
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
||||||
test: "Anti Affinity: pod that doesnot match existing pods in node will get high score ",
|
test: "Anti Affinity: pod that doesnot match existing pods in node will get high score ",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgChina}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
||||||
test: "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ",
|
test: "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
||||||
test: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score",
|
test: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score",
|
||||||
},
|
},
|
||||||
// Test the symmetry cases for anti affinity
|
// Test the symmetry cases for anti affinity
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS2InAz}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS2InAz}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: awayFromS1InAz}},
|
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2, Annotations: awayFromS1InAz}},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz2}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAz2}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
||||||
test: "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score",
|
test: "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score",
|
||||||
},
|
},
|
||||||
// Test both affinity and anti-affinity
|
// Test both affinity and anti-affinity
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
||||||
test: "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity",
|
test: "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity",
|
||||||
@ -452,22 +452,22 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
// so that all the pods of a RC/service can stay in a same region but trying to separate with each other
|
// so that all the pods of a RC/service can stay in a same region but trying to separate with each other
|
||||||
// machine-1,machine-3,machine-4 are in ChinaRegion others machin-2,machine-5 are in IndiaRegion
|
// machine-1,machine-3,machine-4 are in ChinaRegion others machin-2,machine-5 are in IndiaRegion
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine5"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChinaAzAz1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChinaAzAz1}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgChina}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelRgChina}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine4", Labels: labelRgChina}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 4}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 4}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 4}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 4}},
|
||||||
test: "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels",
|
test: "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels",
|
||||||
@ -478,18 +478,18 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
// for Affinity symmetry, the weights are: 0, 0, 8, 0
|
// for Affinity symmetry, the weights are: 0, 0, 8, 0
|
||||||
// for Anti Affinity symmetry, the weights are: 0, 0, 0, -5
|
// for Anti Affinity symmetry, the weights are: 0, 0, 0, -5
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}},
|
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Annotations: stayWithS1InRegionAwayFromS2InAz}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Annotations: awayFromS1InAz}},
|
{Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: v1.ObjectMeta{Annotations: awayFromS1InAz}},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labelAzAz2}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine4", Labels: labelAzAz2}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 0}},
|
||||||
test: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry",
|
test: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry",
|
||||||
@ -501,8 +501,8 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
info: FakeNodeListInfo(test.nodes),
|
info: FakeNodeListInfo(test.nodes),
|
||||||
nodeLister: algorithm.FakeNodeLister(test.nodes),
|
nodeLister: algorithm.FakeNodeLister(test.nodes),
|
||||||
podLister: algorithm.FakePodLister(test.pods),
|
podLister: algorithm.FakePodLister(test.pods),
|
||||||
hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight,
|
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||||
failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")},
|
failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(v1.DefaultFailureDomains, ",")},
|
||||||
}
|
}
|
||||||
list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes)
|
list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -528,7 +528,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
|||||||
"az": "az1",
|
"az": "az1",
|
||||||
}
|
}
|
||||||
hardPodAffinity := map[string]string{
|
hardPodAffinity := map[string]string{
|
||||||
api.AffinityAnnotationKey: `
|
v1.AffinityAnnotationKey: `
|
||||||
{"podAffinity": {
|
{"podAffinity": {
|
||||||
"requiredDuringSchedulingIgnoredDuringExecution": [
|
"requiredDuringSchedulingIgnoredDuringExecution": [
|
||||||
{
|
{
|
||||||
@ -546,38 +546,38 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
|||||||
}}`,
|
}}`,
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
pods []*api.Pod
|
pods []*v1.Pod
|
||||||
nodes []*api.Node
|
nodes []*v1.Node
|
||||||
hardPodAffinityWeight int
|
hardPodAffinityWeight int
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelServiceS1}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelServiceS1}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Annotations: hardPodAffinity}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
|
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Annotations: hardPodAffinity}},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||||
},
|
},
|
||||||
hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight,
|
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}},
|
||||||
test: "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score",
|
test: "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelServiceS1}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelServiceS1}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Annotations: hardPodAffinity}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}},
|
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Annotations: hardPodAffinity}},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}},
|
||||||
},
|
},
|
||||||
hardPodAffinityWeight: 0,
|
hardPodAffinityWeight: 0,
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||||
@ -613,7 +613,7 @@ func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) {
|
|||||||
"security": "S1",
|
"security": "S1",
|
||||||
}
|
}
|
||||||
antiAffinity1 := map[string]string{
|
antiAffinity1 := map[string]string{
|
||||||
api.AffinityAnnotationKey: `
|
v1.AffinityAnnotationKey: `
|
||||||
{"podAntiAffinity": {
|
{"podAntiAffinity": {
|
||||||
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
"preferredDuringSchedulingIgnoredDuringExecution": [{
|
||||||
"weight": 5,
|
"weight": 5,
|
||||||
@ -632,36 +632,36 @@ func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) {
|
|||||||
}}`,
|
}}`,
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
pods []*api.Pod
|
pods []*v1.Pod
|
||||||
nodes []*api.Node
|
nodes []*v1.Node
|
||||||
failureDomains priorityutil.Topologies
|
failureDomains priorityutil.Topologies
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabel1, Annotations: antiAffinity1}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1, Annotations: antiAffinity1}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
|
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1}},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}},
|
||||||
},
|
},
|
||||||
failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")},
|
failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(v1.DefaultFailureDomains, ",")},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}},
|
||||||
test: "Soft Pod Anti Affinity: when the topologyKey is emtpy, match among topologyKeys indicated by failure domains.",
|
test: "Soft Pod Anti Affinity: when the topologyKey is emtpy, match among topologyKeys indicated by failure domains.",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabel1, Annotations: antiAffinity1}},
|
pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1, Annotations: antiAffinity1}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1}},
|
||||||
{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}},
|
{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1}},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}},
|
||||||
},
|
},
|
||||||
failureDomains: priorityutil.Topologies{},
|
failureDomains: priorityutil.Topologies{},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||||
@ -674,7 +674,7 @@ func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) {
|
|||||||
info: FakeNodeListInfo(test.nodes),
|
info: FakeNodeListInfo(test.nodes),
|
||||||
nodeLister: algorithm.FakeNodeLister(test.nodes),
|
nodeLister: algorithm.FakeNodeLister(test.nodes),
|
||||||
podLister: algorithm.FakePodLister(test.pods),
|
podLister: algorithm.FakePodLister(test.pods),
|
||||||
hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight,
|
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||||
failureDomains: test.failureDomains,
|
failureDomains: test.failureDomains,
|
||||||
}
|
}
|
||||||
list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes)
|
list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes)
|
||||||
|
@ -19,7 +19,7 @@ package priorities
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
|
|
||||||
@ -30,7 +30,7 @@ import (
|
|||||||
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
|
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
|
||||||
// based on the minimum of the average of the fraction of requested to capacity.
|
// based on the minimum of the average of the fraction of requested to capacity.
|
||||||
// Details: cpu((capacity - sum(requested)) * 10 / capacity) + memory((capacity - sum(requested)) * 10 / capacity) / 2
|
// Details: cpu((capacity - sum(requested)) * 10 / capacity) + memory((capacity - sum(requested)) * 10 / capacity) / 2
|
||||||
func LeastRequestedPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
func LeastRequestedPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||||
var nonZeroRequest *schedulercache.Resource
|
var nonZeroRequest *schedulercache.Resource
|
||||||
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
||||||
nonZeroRequest = priorityMeta.nonZeroRequest
|
nonZeroRequest = priorityMeta.nonZeroRequest
|
||||||
@ -59,7 +59,7 @@ func calculateUnusedScore(requested int64, capacity int64, node string) int64 {
|
|||||||
// Calculates host priority based on the amount of unused resources.
|
// Calculates host priority based on the amount of unused resources.
|
||||||
// 'node' has information about the resources on the node.
|
// 'node' has information about the resources on the node.
|
||||||
// 'pods' is a list of pods currently scheduled on the node.
|
// 'pods' is a list of pods currently scheduled on the node.
|
||||||
func calculateUnusedPriority(pod *api.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
func calculateUnusedPriority(pod *v1.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||||
|
@ -20,8 +20,8 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
@ -35,29 +35,29 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
"bar": "foo",
|
"bar": "foo",
|
||||||
"baz": "blah",
|
"baz": "blah",
|
||||||
}
|
}
|
||||||
machine1Spec := api.PodSpec{
|
machine1Spec := v1.PodSpec{
|
||||||
NodeName: "machine1",
|
NodeName: "machine1",
|
||||||
}
|
}
|
||||||
machine2Spec := api.PodSpec{
|
machine2Spec := v1.PodSpec{
|
||||||
NodeName: "machine2",
|
NodeName: "machine2",
|
||||||
}
|
}
|
||||||
noResources := api.PodSpec{
|
noResources := v1.PodSpec{
|
||||||
Containers: []api.Container{},
|
Containers: []v1.Container{},
|
||||||
}
|
}
|
||||||
cpuOnly := api.PodSpec{
|
cpuOnly := v1.PodSpec{
|
||||||
NodeName: "machine1",
|
NodeName: "machine1",
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Resources: api.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("1000m"),
|
"cpu": resource.MustParse("1000m"),
|
||||||
"memory": resource.MustParse("0"),
|
"memory": resource.MustParse("0"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Resources: api.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("2000m"),
|
"cpu": resource.MustParse("2000m"),
|
||||||
"memory": resource.MustParse("0"),
|
"memory": resource.MustParse("0"),
|
||||||
},
|
},
|
||||||
@ -67,20 +67,20 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
}
|
}
|
||||||
cpuOnly2 := cpuOnly
|
cpuOnly2 := cpuOnly
|
||||||
cpuOnly2.NodeName = "machine2"
|
cpuOnly2.NodeName = "machine2"
|
||||||
cpuAndMemory := api.PodSpec{
|
cpuAndMemory := v1.PodSpec{
|
||||||
NodeName: "machine2",
|
NodeName: "machine2",
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Resources: api.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("1000m"),
|
"cpu": resource.MustParse("1000m"),
|
||||||
"memory": resource.MustParse("2000"),
|
"memory": resource.MustParse("2000"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Resources: api.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("2000m"),
|
"cpu": resource.MustParse("2000m"),
|
||||||
"memory": resource.MustParse("3000"),
|
"memory": resource.MustParse("3000"),
|
||||||
},
|
},
|
||||||
@ -89,9 +89,9 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
pods []*api.Pod
|
pods []*v1.Pod
|
||||||
nodes []*api.Node
|
nodes []*v1.Node
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
@ -107,8 +107,8 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
||||||
Node2 Score: (10 + 10) / 2 = 10
|
Node2 Score: (10 + 10) / 2 = 10
|
||||||
*/
|
*/
|
||||||
pod: &api.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||||
test: "nothing scheduled, nothing requested",
|
test: "nothing scheduled, nothing requested",
|
||||||
},
|
},
|
||||||
@ -124,8 +124,8 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
Memory Score: ((10000 - 5000) *10) / 10000 = 5
|
Memory Score: ((10000 - 5000) *10) / 10000 = 5
|
||||||
Node2 Score: (5 + 5) / 2 = 5
|
Node2 Score: (5 + 5) / 2 = 5
|
||||||
*/
|
*/
|
||||||
pod: &api.Pod{Spec: cpuAndMemory},
|
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 5}},
|
||||||
test: "nothing scheduled, resources requested, differently sized machines",
|
test: "nothing scheduled, resources requested, differently sized machines",
|
||||||
},
|
},
|
||||||
@ -141,15 +141,15 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
Memory Score: ((10000 - 0) *10) / 10000 = 10
|
||||||
Node2 Score: (10 + 10) / 2 = 10
|
Node2 Score: (10 + 10) / 2 = 10
|
||||||
*/
|
*/
|
||||||
pod: &api.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||||
test: "no resources requested, pods scheduled",
|
test: "no resources requested, pods scheduled",
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: machine1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: machine1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: machine2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: machine2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -164,15 +164,15 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
|
Memory Score: ((20000 - 5000) *10) / 20000 = 7.5
|
||||||
Node2 Score: (4 + 7.5) / 2 = 5
|
Node2 Score: (4 + 7.5) / 2 = 5
|
||||||
*/
|
*/
|
||||||
pod: &api.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: 5}},
|
||||||
test: "no resources requested, pods scheduled with resources",
|
test: "no resources requested, pods scheduled with resources",
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: cpuOnly, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: cpuOnly, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: cpuOnly2, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: cpuAndMemory, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: cpuAndMemory, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -187,11 +187,11 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
Memory Score: ((20000 - 10000) *10) / 20000 = 5
|
Memory Score: ((20000 - 10000) *10) / 20000 = 5
|
||||||
Node2 Score: (4 + 5) / 2 = 4
|
Node2 Score: (4 + 5) / 2 = 4
|
||||||
*/
|
*/
|
||||||
pod: &api.Pod{Spec: cpuAndMemory},
|
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 4}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 4}},
|
||||||
test: "resources requested, pods scheduled with resources",
|
test: "resources requested, pods scheduled with resources",
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
},
|
},
|
||||||
@ -208,11 +208,11 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
Memory Score: ((50000 - 10000) *10) / 50000 = 8
|
Memory Score: ((50000 - 10000) *10) / 50000 = 8
|
||||||
Node2 Score: (4 + 8) / 2 = 6
|
Node2 Score: (4 + 8) / 2 = 6
|
||||||
*/
|
*/
|
||||||
pod: &api.Pod{Spec: cpuAndMemory},
|
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 6}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 6}},
|
||||||
test: "resources requested, pods scheduled with resources, differently sized machines",
|
test: "resources requested, pods scheduled with resources, differently sized machines",
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
},
|
},
|
||||||
@ -229,21 +229,21 @@ func TestLeastRequested(t *testing.T) {
|
|||||||
Memory Score: ((10000 - 5000) *10) / 10000 = 5
|
Memory Score: ((10000 - 5000) *10) / 10000 = 5
|
||||||
Node2 Score: (0 + 5) / 2 = 2
|
Node2 Score: (0 + 5) / 2 = 2
|
||||||
*/
|
*/
|
||||||
pod: &api.Pod{Spec: cpuOnly},
|
pod: &v1.Pod{Spec: cpuOnly},
|
||||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 2}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 2}},
|
||||||
test: "requested resources exceed node capacity",
|
test: "requested resources exceed node capacity",
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||||
test: "zero node resources, pods scheduled with resources",
|
test: "zero node resources, pods scheduled with resources",
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
},
|
},
|
||||||
|
@ -17,19 +17,19 @@ limitations under the License.
|
|||||||
package priorities
|
package priorities
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
// priorityMetadata is a type that is passed as metadata for priority functions
|
// priorityMetadata is a type that is passed as metadata for priority functions
|
||||||
type priorityMetadata struct {
|
type priorityMetadata struct {
|
||||||
nonZeroRequest *schedulercache.Resource
|
nonZeroRequest *schedulercache.Resource
|
||||||
podTolerations []api.Toleration
|
podTolerations []v1.Toleration
|
||||||
affinity *api.Affinity
|
affinity *v1.Affinity
|
||||||
}
|
}
|
||||||
|
|
||||||
// PriorityMetadata is a MetadataProducer. Node info can be nil.
|
// PriorityMetadata is a MetadataProducer. Node info can be nil.
|
||||||
func PriorityMetadata(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} {
|
func PriorityMetadata(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} {
|
||||||
// If we cannot compute metadata, just return nil
|
// If we cannot compute metadata, just return nil
|
||||||
if pod == nil {
|
if pod == nil {
|
||||||
return nil
|
return nil
|
||||||
@ -38,7 +38,7 @@ func PriorityMetadata(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.No
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
|
affinity, err := v1.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@ package priorities
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
|
|
||||||
@ -30,7 +30,7 @@ import (
|
|||||||
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
|
// It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes
|
||||||
// based on the maximum of the average of the fraction of requested to capacity.
|
// based on the maximum of the average of the fraction of requested to capacity.
|
||||||
// Details: (cpu(10 * sum(requested) / capacity) + memory(10 * sum(requested) / capacity)) / 2
|
// Details: (cpu(10 * sum(requested) / capacity) + memory(10 * sum(requested) / capacity)) / 2
|
||||||
func MostRequestedPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
func MostRequestedPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||||
var nonZeroRequest *schedulercache.Resource
|
var nonZeroRequest *schedulercache.Resource
|
||||||
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
||||||
nonZeroRequest = priorityMeta.nonZeroRequest
|
nonZeroRequest = priorityMeta.nonZeroRequest
|
||||||
@ -62,7 +62,7 @@ func calculateUsedScore(requested int64, capacity int64, node string) int64 {
|
|||||||
|
|
||||||
// Calculate the resource used on a node. 'node' has information about the resources on the node.
|
// Calculate the resource used on a node. 'node' has information about the resources on the node.
|
||||||
// 'pods' is a list of pods currently scheduled on the node.
|
// 'pods' is a list of pods currently scheduled on the node.
|
||||||
func calculateUsedPriority(pod *api.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
func calculateUsedPriority(pod *v1.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||||
|
@ -20,8 +20,8 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
@ -35,23 +35,23 @@ func TestMostRequested(t *testing.T) {
|
|||||||
"bar": "foo",
|
"bar": "foo",
|
||||||
"baz": "blah",
|
"baz": "blah",
|
||||||
}
|
}
|
||||||
noResources := api.PodSpec{
|
noResources := v1.PodSpec{
|
||||||
Containers: []api.Container{},
|
Containers: []v1.Container{},
|
||||||
}
|
}
|
||||||
cpuOnly := api.PodSpec{
|
cpuOnly := v1.PodSpec{
|
||||||
NodeName: "machine1",
|
NodeName: "machine1",
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Resources: api.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("1000m"),
|
"cpu": resource.MustParse("1000m"),
|
||||||
"memory": resource.MustParse("0"),
|
"memory": resource.MustParse("0"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Resources: api.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("2000m"),
|
"cpu": resource.MustParse("2000m"),
|
||||||
"memory": resource.MustParse("0"),
|
"memory": resource.MustParse("0"),
|
||||||
},
|
},
|
||||||
@ -61,20 +61,20 @@ func TestMostRequested(t *testing.T) {
|
|||||||
}
|
}
|
||||||
cpuOnly2 := cpuOnly
|
cpuOnly2 := cpuOnly
|
||||||
cpuOnly2.NodeName = "machine2"
|
cpuOnly2.NodeName = "machine2"
|
||||||
cpuAndMemory := api.PodSpec{
|
cpuAndMemory := v1.PodSpec{
|
||||||
NodeName: "machine2",
|
NodeName: "machine2",
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Resources: api.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("1000m"),
|
"cpu": resource.MustParse("1000m"),
|
||||||
"memory": resource.MustParse("2000"),
|
"memory": resource.MustParse("2000"),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
Resources: api.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse("2000m"),
|
"cpu": resource.MustParse("2000m"),
|
||||||
"memory": resource.MustParse("3000"),
|
"memory": resource.MustParse("3000"),
|
||||||
},
|
},
|
||||||
@ -83,9 +83,9 @@ func TestMostRequested(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
pods []*api.Pod
|
pods []*v1.Pod
|
||||||
nodes []*api.Node
|
nodes []*v1.Node
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
@ -101,8 +101,8 @@ func TestMostRequested(t *testing.T) {
|
|||||||
Memory Score: (0 * 10 / 10000 = 0
|
Memory Score: (0 * 10 / 10000 = 0
|
||||||
Node2 Score: (0 + 0) / 2 = 0
|
Node2 Score: (0 + 0) / 2 = 0
|
||||||
*/
|
*/
|
||||||
pod: &api.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||||
test: "nothing scheduled, nothing requested",
|
test: "nothing scheduled, nothing requested",
|
||||||
},
|
},
|
||||||
@ -118,8 +118,8 @@ func TestMostRequested(t *testing.T) {
|
|||||||
Memory Score: (5000 * 10 / 10000 = 5
|
Memory Score: (5000 * 10 / 10000 = 5
|
||||||
Node2 Score: (5 + 5) / 2 = 5
|
Node2 Score: (5 + 5) / 2 = 5
|
||||||
*/
|
*/
|
||||||
pod: &api.Pod{Spec: cpuAndMemory},
|
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 5}},
|
||||||
test: "nothing scheduled, resources requested, differently sized machines",
|
test: "nothing scheduled, resources requested, differently sized machines",
|
||||||
},
|
},
|
||||||
@ -135,15 +135,15 @@ func TestMostRequested(t *testing.T) {
|
|||||||
Memory Score: (5000 * 10) / 20000 = 2.5
|
Memory Score: (5000 * 10) / 20000 = 2.5
|
||||||
Node2 Score: (6 + 2.5) / 2 = 4
|
Node2 Score: (6 + 2.5) / 2 = 4
|
||||||
*/
|
*/
|
||||||
pod: &api.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 4}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 4}},
|
||||||
test: "no resources requested, pods scheduled with resources",
|
test: "no resources requested, pods scheduled with resources",
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: cpuOnly, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: cpuOnly, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: cpuOnly2, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: cpuAndMemory, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: cpuAndMemory, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -158,11 +158,11 @@ func TestMostRequested(t *testing.T) {
|
|||||||
Memory Score: (10000 * 10) / 20000 = 5
|
Memory Score: (10000 * 10) / 20000 = 5
|
||||||
Node2 Score: (6 + 5) / 2 = 5
|
Node2 Score: (6 + 5) / 2 = 5
|
||||||
*/
|
*/
|
||||||
pod: &api.Pod{Spec: cpuAndMemory},
|
pod: &v1.Pod{Spec: cpuAndMemory},
|
||||||
nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 5}},
|
||||||
test: "resources requested, pods scheduled with resources",
|
test: "resources requested, pods scheduled with resources",
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: cpuOnly},
|
{Spec: cpuOnly},
|
||||||
{Spec: cpuAndMemory},
|
{Spec: cpuAndMemory},
|
||||||
},
|
},
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
@ -31,19 +31,19 @@ import (
|
|||||||
// it will a get an add of preferredSchedulingTerm.Weight. Thus, the more preferredSchedulingTerms
|
// it will a get an add of preferredSchedulingTerm.Weight. Thus, the more preferredSchedulingTerms
|
||||||
// the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher
|
// the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher
|
||||||
// score the node gets.
|
// score the node gets.
|
||||||
func CalculateNodeAffinityPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
var affinity *api.Affinity
|
var affinity *v1.Affinity
|
||||||
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
||||||
affinity = priorityMeta.affinity
|
affinity = priorityMeta.affinity
|
||||||
} else {
|
} else {
|
||||||
// We couldn't parse metadata - fallback to computing it.
|
// We couldn't parse metadata - fallback to computing it.
|
||||||
var err error
|
var err error
|
||||||
affinity, err = api.GetAffinityFromPodAnnotations(pod.Annotations)
|
affinity, err = v1.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return schedulerapi.HostPriority{}, err
|
return schedulerapi.HostPriority{}, err
|
||||||
}
|
}
|
||||||
@ -62,7 +62,7 @@ func CalculateNodeAffinityPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Avoid computing it for all nodes if this becomes a performance problem.
|
// TODO: Avoid computing it for all nodes if this becomes a performance problem.
|
||||||
nodeSelector, err := api.NodeSelectorRequirementsAsSelector(preferredSchedulingTerm.Preference.MatchExpressions)
|
nodeSelector, err := v1.NodeSelectorRequirementsAsSelector(preferredSchedulingTerm.Preference.MatchExpressions)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return schedulerapi.HostPriority{}, err
|
return schedulerapi.HostPriority{}, err
|
||||||
}
|
}
|
||||||
@ -78,7 +78,7 @@ func CalculateNodeAffinityPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func CalculateNodeAffinityPriorityReduce(pod *api.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error {
|
func CalculateNodeAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error {
|
||||||
var maxCount int
|
var maxCount int
|
||||||
for i := range result {
|
for i := range result {
|
||||||
if result[i].Score > maxCount {
|
if result[i].Score > maxCount {
|
||||||
|
@ -20,7 +20,7 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
@ -33,7 +33,7 @@ func TestNodeAffinityPriority(t *testing.T) {
|
|||||||
label5 := map[string]string{"foo": "bar", "key": "value", "az": "az1"}
|
label5 := map[string]string{"foo": "bar", "key": "value", "az": "az1"}
|
||||||
|
|
||||||
affinity1 := map[string]string{
|
affinity1 := map[string]string{
|
||||||
api.AffinityAnnotationKey: `
|
v1.AffinityAnnotationKey: `
|
||||||
{"nodeAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [
|
{"nodeAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [
|
||||||
{
|
{
|
||||||
"weight": 2,
|
"weight": 2,
|
||||||
@ -50,7 +50,7 @@ func TestNodeAffinityPriority(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
affinity2 := map[string]string{
|
affinity2 := map[string]string{
|
||||||
api.AffinityAnnotationKey: `
|
v1.AffinityAnnotationKey: `
|
||||||
{"nodeAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [
|
{"nodeAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [
|
||||||
{
|
{
|
||||||
"weight": 2,
|
"weight": 2,
|
||||||
@ -91,63 +91,63 @@ func TestNodeAffinityPriority(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
nodes []*api.Node
|
nodes []*v1.Node
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: &api.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Annotations: map[string]string{},
|
Annotations: map[string]string{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||||
test: "all machines are same priority as NodeAffinity is nil",
|
test: "all machines are same priority as NodeAffinity is nil",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Annotations: affinity1,
|
Annotations: affinity1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label4}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label4}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||||
test: "no machine macthes preferred scheduling requirements in NodeAffinity of pod so all machines' priority is zero",
|
test: "no machine macthes preferred scheduling requirements in NodeAffinity of pod so all machines' priority is zero",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Annotations: affinity1,
|
Annotations: affinity1,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||||
test: "only machine1 matches the preferred scheduling requirements of pod",
|
test: "only machine1 matches the preferred scheduling requirements of pod",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Annotations: affinity2,
|
Annotations: affinity2,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: label5}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine5", Labels: label5}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine5", Score: 10}, {Host: "machine2", Score: 3}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine5", Score: 10}, {Host: "machine2", Score: 3}},
|
||||||
test: "all machines matches the preferred scheduling requirements of pod but with different priorities ",
|
test: "all machines matches the preferred scheduling requirements of pod but with different priorities ",
|
||||||
|
@ -19,7 +19,7 @@ package priorities
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
@ -42,7 +42,7 @@ func NewNodeLabelPriority(label string, presence bool) (algorithm.PriorityMapFun
|
|||||||
// CalculateNodeLabelPriority checks whether a particular label exists on a node or not, regardless of its value.
|
// CalculateNodeLabelPriority checks whether a particular label exists on a node or not, regardless of its value.
|
||||||
// If presence is true, prioritizes nodes that have the specified label, regardless of value.
|
// If presence is true, prioritizes nodes that have the specified label, regardless of value.
|
||||||
// If presence is false, prioritizes nodes that do not have the specified label.
|
// If presence is false, prioritizes nodes that do not have the specified label.
|
||||||
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
@ -31,17 +31,17 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||||||
label2 := map[string]string{"bar": "foo"}
|
label2 := map[string]string{"bar": "foo"}
|
||||||
label3 := map[string]string{"bar": "baz"}
|
label3 := map[string]string{"bar": "baz"}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
nodes []*api.Node
|
nodes []*v1.Node
|
||||||
label string
|
label string
|
||||||
presence bool
|
presence bool
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||||
label: "baz",
|
label: "baz",
|
||||||
@ -49,10 +49,10 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||||||
test: "no match found, presence true",
|
test: "no match found, presence true",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
|
||||||
label: "baz",
|
label: "baz",
|
||||||
@ -60,10 +60,10 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||||||
test: "no match found, presence false",
|
test: "no match found, presence false",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||||
label: "foo",
|
label: "foo",
|
||||||
@ -71,10 +71,10 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||||||
test: "one match found, presence true",
|
test: "one match found, presence true",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
|
||||||
label: "foo",
|
label: "foo",
|
||||||
@ -82,10 +82,10 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||||||
test: "one match found, presence false",
|
test: "one match found, presence false",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}},
|
||||||
label: "bar",
|
label: "bar",
|
||||||
@ -93,10 +93,10 @@ func TestNewNodeLabelPriority(t *testing.T) {
|
|||||||
test: "two matches found, presence true",
|
test: "two matches found, presence true",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
|
{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}},
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}},
|
||||||
label: "bar",
|
label: "bar",
|
||||||
|
@ -19,13 +19,13 @@ package priorities
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
func CalculateNodePreferAvoidPodsPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||||
@ -43,7 +43,7 @@ func CalculateNodePreferAvoidPodsPriorityMap(pod *api.Pod, meta interface{}, nod
|
|||||||
return schedulerapi.HostPriority{Host: node.Name, Score: 10}, nil
|
return schedulerapi.HostPriority{Host: node.Name, Score: 10}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
avoids, err := api.GetAvoidPodsFromNodeAnnotations(node.Annotations)
|
avoids, err := v1.GetAvoidPodsFromNodeAnnotations(node.Annotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If we cannot get annotation, assume it's schedulable there.
|
// If we cannot get annotation, assume it's schedulable there.
|
||||||
return schedulerapi.HostPriority{Host: node.Name, Score: 10}, nil
|
return schedulerapi.HostPriority{Host: node.Name, Score: 10}, nil
|
||||||
|
@ -21,14 +21,14 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNodePreferAvoidPriority(t *testing.T) {
|
func TestNodePreferAvoidPriority(t *testing.T) {
|
||||||
annotations1 := map[string]string{
|
annotations1 := map[string]string{
|
||||||
api.PreferAvoidPodsAnnotationKey: `
|
v1.PreferAvoidPodsAnnotationKey: `
|
||||||
{
|
{
|
||||||
"preferAvoidPods": [
|
"preferAvoidPods": [
|
||||||
{
|
{
|
||||||
@ -48,7 +48,7 @@ func TestNodePreferAvoidPriority(t *testing.T) {
|
|||||||
}`,
|
}`,
|
||||||
}
|
}
|
||||||
annotations2 := map[string]string{
|
annotations2 := map[string]string{
|
||||||
api.PreferAvoidPodsAnnotationKey: `
|
v1.PreferAvoidPodsAnnotationKey: `
|
||||||
{
|
{
|
||||||
"preferAvoidPods": [
|
"preferAvoidPods": [
|
||||||
{
|
{
|
||||||
@ -67,29 +67,29 @@ func TestNodePreferAvoidPriority(t *testing.T) {
|
|||||||
]
|
]
|
||||||
}`,
|
}`,
|
||||||
}
|
}
|
||||||
testNodes := []*api.Node{
|
testNodes := []*v1.Node{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "machine1", Annotations: annotations1},
|
ObjectMeta: v1.ObjectMeta{Name: "machine1", Annotations: annotations1},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "machine2", Annotations: annotations2},
|
ObjectMeta: v1.ObjectMeta{Name: "machine2", Annotations: annotations2},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "machine3"},
|
ObjectMeta: v1.ObjectMeta{Name: "machine3"},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
trueVar := true
|
trueVar := true
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
nodes []*api.Node
|
nodes []*v1.Node
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: &api.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Namespace: "default",
|
Namespace: "default",
|
||||||
OwnerReferences: []api.OwnerReference{
|
OwnerReferences: []v1.OwnerReference{
|
||||||
{Kind: "ReplicationController", Name: "foo", UID: "abcdef123456", Controller: &trueVar},
|
{Kind: "ReplicationController", Name: "foo", UID: "abcdef123456", Controller: &trueVar},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -99,10 +99,10 @@ func TestNodePreferAvoidPriority(t *testing.T) {
|
|||||||
test: "pod managed by ReplicationController should avoid a node, this node get lowest priority score",
|
test: "pod managed by ReplicationController should avoid a node, this node get lowest priority score",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Namespace: "default",
|
Namespace: "default",
|
||||||
OwnerReferences: []api.OwnerReference{
|
OwnerReferences: []v1.OwnerReference{
|
||||||
{Kind: "RandomController", Name: "foo", UID: "abcdef123456", Controller: &trueVar},
|
{Kind: "RandomController", Name: "foo", UID: "abcdef123456", Controller: &trueVar},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -112,10 +112,10 @@ func TestNodePreferAvoidPriority(t *testing.T) {
|
|||||||
test: "ownership by random controller should be ignored",
|
test: "ownership by random controller should be ignored",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Namespace: "default",
|
Namespace: "default",
|
||||||
OwnerReferences: []api.OwnerReference{
|
OwnerReferences: []v1.OwnerReference{
|
||||||
{Kind: "ReplicationController", Name: "foo", UID: "abcdef123456"},
|
{Kind: "ReplicationController", Name: "foo", UID: "abcdef123456"},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@ -125,10 +125,10 @@ func TestNodePreferAvoidPriority(t *testing.T) {
|
|||||||
test: "owner without Controller field set should be ignored",
|
test: "owner without Controller field set should be ignored",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{
|
pod: &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Namespace: "default",
|
Namespace: "default",
|
||||||
OwnerReferences: []api.OwnerReference{
|
OwnerReferences: []v1.OwnerReference{
|
||||||
{Kind: "ReplicaSet", Name: "foo", UID: "qwert12345", Controller: &trueVar},
|
{Kind: "ReplicaSet", Name: "foo", UID: "qwert12345", Controller: &trueVar},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
@ -20,8 +20,8 @@ import (
|
|||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||||
@ -57,7 +57,7 @@ func NewSelectorSpreadPriority(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Returns selectors of services, RCs and RSs matching the given pod.
|
// Returns selectors of services, RCs and RSs matching the given pod.
|
||||||
func getSelectors(pod *api.Pod, sl algorithm.ServiceLister, cl algorithm.ControllerLister, rsl algorithm.ReplicaSetLister) []labels.Selector {
|
func getSelectors(pod *v1.Pod, sl algorithm.ServiceLister, cl algorithm.ControllerLister, rsl algorithm.ReplicaSetLister) []labels.Selector {
|
||||||
selectors := make([]labels.Selector, 0, 3)
|
selectors := make([]labels.Selector, 0, 3)
|
||||||
if services, err := sl.GetPodServices(pod); err == nil {
|
if services, err := sl.GetPodServices(pod); err == nil {
|
||||||
for _, service := range services {
|
for _, service := range services {
|
||||||
@ -79,7 +79,7 @@ func getSelectors(pod *api.Pod, sl algorithm.ServiceLister, cl algorithm.Control
|
|||||||
return selectors
|
return selectors
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *SelectorSpread) getSelectors(pod *api.Pod) []labels.Selector {
|
func (s *SelectorSpread) getSelectors(pod *v1.Pod) []labels.Selector {
|
||||||
return getSelectors(pod, s.serviceLister, s.controllerLister, s.replicaSetLister)
|
return getSelectors(pod, s.serviceLister, s.controllerLister, s.replicaSetLister)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -89,7 +89,7 @@ func (s *SelectorSpread) getSelectors(pod *api.Pod) []labels.Selector {
|
|||||||
// i.e. it pushes the scheduler towards a node where there's the smallest number of
|
// i.e. it pushes the scheduler towards a node where there's the smallest number of
|
||||||
// pods which match the same service, RC or RS selectors as the pod being scheduled.
|
// pods which match the same service, RC or RS selectors as the pod being scheduled.
|
||||||
// Where zone information is included on the nodes, it favors nodes in zones with fewer existing matching pods.
|
// Where zone information is included on the nodes, it favors nodes in zones with fewer existing matching pods.
|
||||||
func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) {
|
func (s *SelectorSpread) CalculateSpreadPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||||
selectors := s.getSelectors(pod)
|
selectors := s.getSelectors(pod)
|
||||||
|
|
||||||
// Count similar pods by node
|
// Count similar pods by node
|
||||||
@ -199,8 +199,8 @@ func NewServiceAntiAffinityPriority(podLister algorithm.PodLister, serviceLister
|
|||||||
// CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service
|
// CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service
|
||||||
// on machines with the same value for a particular label.
|
// on machines with the same value for a particular label.
|
||||||
// The label to be considered is provided to the struct (ServiceAntiAffinity).
|
// The label to be considered is provided to the struct (ServiceAntiAffinity).
|
||||||
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) {
|
func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||||
var nsServicePods []*api.Pod
|
var nsServicePods []*v1.Pod
|
||||||
if services, err := s.serviceLister.GetPodServices(pod); err == nil && len(services) > 0 {
|
if services, err := s.serviceLister.GetPodServices(pod); err == nil && len(services) > 0 {
|
||||||
// just use the first service and get the other pods within the service
|
// just use the first service and get the other pods within the service
|
||||||
// TODO: a separate predicate can be created that tries to handle all services for the pod
|
// TODO: a separate predicate can be created that tries to handle all services for the pod
|
||||||
|
@ -21,19 +21,19 @@ import (
|
|||||||
"sort"
|
"sort"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
func controllerRef(kind, name, uid string) []api.OwnerReference {
|
func controllerRef(kind, name, uid string) []v1.OwnerReference {
|
||||||
// TODO: When ControllerRef will be implemented uncomment code below.
|
// TODO: When ControllerRef will be implemented uncomment code below.
|
||||||
return nil
|
return nil
|
||||||
//trueVar := true
|
//trueVar := true
|
||||||
//return []api.OwnerReference{
|
//return []v1.OwnerReference{
|
||||||
// {Kind: kind, Name: name, UID: types.UID(uid), Controller: &trueVar},
|
// {Kind: kind, Name: name, UID: types.UID(uid), Controller: &trueVar},
|
||||||
//}
|
//}
|
||||||
}
|
}
|
||||||
@ -47,208 +47,208 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
"bar": "foo",
|
"bar": "foo",
|
||||||
"baz": "blah",
|
"baz": "blah",
|
||||||
}
|
}
|
||||||
zone1Spec := api.PodSpec{
|
zone1Spec := v1.PodSpec{
|
||||||
NodeName: "machine1",
|
NodeName: "machine1",
|
||||||
}
|
}
|
||||||
zone2Spec := api.PodSpec{
|
zone2Spec := v1.PodSpec{
|
||||||
NodeName: "machine2",
|
NodeName: "machine2",
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
pods []*api.Pod
|
pods []*v1.Pod
|
||||||
nodes []string
|
nodes []string
|
||||||
rcs []*api.ReplicationController
|
rcs []*v1.ReplicationController
|
||||||
rss []*extensions.ReplicaSet
|
rss []*extensions.ReplicaSet
|
||||||
services []*api.Service
|
services []*v1.Service
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: new(api.Pod),
|
pod: new(v1.Pod),
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||||
test: "nothing scheduled",
|
test: "nothing scheduled",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
pods: []*api.Pod{{Spec: zone1Spec}},
|
pods: []*v1.Pod{{Spec: zone1Spec}},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||||
test: "no services",
|
test: "no services",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
|
pods: []*v1.Pod{{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}},
|
||||||
test: "different services",
|
test: "different services",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
},
|
},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
||||||
test: "two pods, one service pod",
|
test: "two pods, one service pod",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
},
|
},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
||||||
test: "five pods, one service pod in no namespace",
|
test: "five pods, one service pod in no namespace",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
},
|
},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: v1.ObjectMeta{Namespace: v1.NamespaceDefault}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
||||||
test: "four pods, one service pod in default namespace",
|
test: "four pods, one service pod in default namespace",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns2"}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: "ns2"}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
},
|
},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: v1.ObjectMeta{Namespace: "ns1"}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}},
|
||||||
test: "five pods, one service pod in specific namespace",
|
test: "five pods, one service pod in specific namespace",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
},
|
},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||||
test: "three pods, two service pods on different machines",
|
test: "three pods, two service pods on different machines",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
},
|
},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 0}},
|
||||||
test: "four pods, three service pods",
|
test: "four pods, three service pods",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
},
|
},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||||
test: "service with partial pod label matches",
|
test: "service with partial pod label matches",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||||
},
|
},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
rcs: []*api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||||
// "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to
|
// "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to
|
||||||
// do spreading between all pods. The result should be exactly as above.
|
// do spreading between all pods. The result should be exactly as above.
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||||
test: "service with partial pod label matches with service and replication controller",
|
test: "service with partial pod label matches with service and replication controller",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||||
},
|
},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||||
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||||
test: "service with partial pod label matches with service and replica set",
|
test: "service with partial pod label matches with service and replica set",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||||
},
|
},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
rcs: []*api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
||||||
// Taken together Service and Replication Controller should match all Pods, hence result should be equal to one above.
|
// Taken together Service and Replication Controller should match all Pods, hence result should be equal to one above.
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||||
test: "disjoined service and replication controller should be treated equally",
|
test: "disjoined service and replication controller should be treated equally",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||||
},
|
},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}},
|
||||||
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||||
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
// We use ReplicaSet, instead of ReplicationController. The result should be exactly as above.
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||||
test: "disjoined service and replica set should be treated equally",
|
test: "disjoined service and replica set should be treated equally",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||||
},
|
},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
rcs: []*api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}},
|
||||||
// Both Nodes have one pod from the given RC, hence both get 0 score.
|
// Both Nodes have one pod from the given RC, hence both get 0 score.
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}},
|
||||||
test: "Replication controller with partial pod label matches",
|
test: "Replication controller with partial pod label matches",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||||
},
|
},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}},
|
||||||
@ -257,23 +257,23 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
test: "Replica set with partial pod label matches",
|
test: "Replica set with partial pod label matches",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}},
|
||||||
},
|
},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
rcs: []*api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}},
|
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}},
|
||||||
test: "Another replication controller with partial pod label matches",
|
test: "Another replication controller with partial pod label matches",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}},
|
||||||
},
|
},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
|
rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}},
|
||||||
@ -300,10 +300,10 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildPod(nodeName string, labels map[string]string, ownerRefs []api.OwnerReference) *api.Pod {
|
func buildPod(nodeName string, labels map[string]string, ownerRefs []v1.OwnerReference) *v1.Pod {
|
||||||
return &api.Pod{
|
return &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{Labels: labels, OwnerReferences: ownerRefs},
|
ObjectMeta: v1.ObjectMeta{Labels: labels, OwnerReferences: ownerRefs},
|
||||||
Spec: api.PodSpec{NodeName: nodeName},
|
Spec: v1.PodSpec{NodeName: nodeName},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -340,17 +340,17 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
pods []*api.Pod
|
pods []*v1.Pod
|
||||||
nodes []string
|
nodes []string
|
||||||
rcs []*api.ReplicationController
|
rcs []*v1.ReplicationController
|
||||||
rss []*extensions.ReplicaSet
|
rss []*extensions.ReplicaSet
|
||||||
services []*api.Service
|
services []*v1.Service
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: new(api.Pod),
|
pod: new(v1.Pod),
|
||||||
expectedList: []schedulerapi.HostPriority{
|
expectedList: []schedulerapi.HostPriority{
|
||||||
{Host: nodeMachine1Zone1, Score: 10},
|
{Host: nodeMachine1Zone1, Score: 10},
|
||||||
{Host: nodeMachine1Zone2, Score: 10},
|
{Host: nodeMachine1Zone2, Score: 10},
|
||||||
@ -363,7 +363,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: buildPod("", labels1, nil),
|
pod: buildPod("", labels1, nil),
|
||||||
pods: []*api.Pod{buildPod(nodeMachine1Zone1, nil, nil)},
|
pods: []*v1.Pod{buildPod(nodeMachine1Zone1, nil, nil)},
|
||||||
expectedList: []schedulerapi.HostPriority{
|
expectedList: []schedulerapi.HostPriority{
|
||||||
{Host: nodeMachine1Zone1, Score: 10},
|
{Host: nodeMachine1Zone1, Score: 10},
|
||||||
{Host: nodeMachine1Zone2, Score: 10},
|
{Host: nodeMachine1Zone2, Score: 10},
|
||||||
@ -376,8 +376,8 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: buildPod("", labels1, nil),
|
pod: buildPod("", labels1, nil),
|
||||||
pods: []*api.Pod{buildPod(nodeMachine1Zone1, labels2, nil)},
|
pods: []*v1.Pod{buildPod(nodeMachine1Zone1, labels2, nil)},
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||||
expectedList: []schedulerapi.HostPriority{
|
expectedList: []schedulerapi.HostPriority{
|
||||||
{Host: nodeMachine1Zone1, Score: 10},
|
{Host: nodeMachine1Zone1, Score: 10},
|
||||||
{Host: nodeMachine1Zone2, Score: 10},
|
{Host: nodeMachine1Zone2, Score: 10},
|
||||||
@ -390,11 +390,11 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: buildPod("", labels1, nil),
|
pod: buildPod("", labels1, nil),
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
buildPod(nodeMachine1Zone1, labels2, nil),
|
buildPod(nodeMachine1Zone1, labels2, nil),
|
||||||
buildPod(nodeMachine1Zone2, labels1, nil),
|
buildPod(nodeMachine1Zone2, labels1, nil),
|
||||||
},
|
},
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||||
expectedList: []schedulerapi.HostPriority{
|
expectedList: []schedulerapi.HostPriority{
|
||||||
{Host: nodeMachine1Zone1, Score: 10},
|
{Host: nodeMachine1Zone1, Score: 10},
|
||||||
{Host: nodeMachine1Zone2, Score: 0}, // Already have pod on machine
|
{Host: nodeMachine1Zone2, Score: 0}, // Already have pod on machine
|
||||||
@ -407,14 +407,14 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: buildPod("", labels1, nil),
|
pod: buildPod("", labels1, nil),
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
buildPod(nodeMachine1Zone1, labels2, nil),
|
buildPod(nodeMachine1Zone1, labels2, nil),
|
||||||
buildPod(nodeMachine1Zone2, labels1, nil),
|
buildPod(nodeMachine1Zone2, labels1, nil),
|
||||||
buildPod(nodeMachine2Zone2, labels1, nil),
|
buildPod(nodeMachine2Zone2, labels1, nil),
|
||||||
buildPod(nodeMachine1Zone3, labels2, nil),
|
buildPod(nodeMachine1Zone3, labels2, nil),
|
||||||
buildPod(nodeMachine2Zone3, labels1, nil),
|
buildPod(nodeMachine2Zone3, labels1, nil),
|
||||||
},
|
},
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||||
expectedList: []schedulerapi.HostPriority{
|
expectedList: []schedulerapi.HostPriority{
|
||||||
{Host: nodeMachine1Zone1, Score: 10},
|
{Host: nodeMachine1Zone1, Score: 10},
|
||||||
{Host: nodeMachine1Zone2, Score: 0}, // Pod on node
|
{Host: nodeMachine1Zone2, Score: 0}, // Pod on node
|
||||||
@ -427,13 +427,13 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: buildPod("", labels1, nil),
|
pod: buildPod("", labels1, nil),
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
buildPod(nodeMachine1Zone1, labels1, nil),
|
buildPod(nodeMachine1Zone1, labels1, nil),
|
||||||
buildPod(nodeMachine1Zone2, labels1, nil),
|
buildPod(nodeMachine1Zone2, labels1, nil),
|
||||||
buildPod(nodeMachine2Zone2, labels2, nil),
|
buildPod(nodeMachine2Zone2, labels2, nil),
|
||||||
buildPod(nodeMachine1Zone3, labels1, nil),
|
buildPod(nodeMachine1Zone3, labels1, nil),
|
||||||
},
|
},
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||||
expectedList: []schedulerapi.HostPriority{
|
expectedList: []schedulerapi.HostPriority{
|
||||||
{Host: nodeMachine1Zone1, Score: 0}, // Pod on node
|
{Host: nodeMachine1Zone1, Score: 0}, // Pod on node
|
||||||
{Host: nodeMachine1Zone2, Score: 0}, // Pod on node
|
{Host: nodeMachine1Zone2, Score: 0}, // Pod on node
|
||||||
@ -446,13 +446,13 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: buildPod("", labels1, nil),
|
pod: buildPod("", labels1, nil),
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
buildPod(nodeMachine1Zone1, labels1, nil),
|
buildPod(nodeMachine1Zone1, labels1, nil),
|
||||||
buildPod(nodeMachine1Zone2, labels1, nil),
|
buildPod(nodeMachine1Zone2, labels1, nil),
|
||||||
buildPod(nodeMachine1Zone3, labels1, nil),
|
buildPod(nodeMachine1Zone3, labels1, nil),
|
||||||
buildPod(nodeMachine2Zone2, labels2, nil),
|
buildPod(nodeMachine2Zone2, labels2, nil),
|
||||||
},
|
},
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||||
expectedList: []schedulerapi.HostPriority{
|
expectedList: []schedulerapi.HostPriority{
|
||||||
{Host: nodeMachine1Zone1, Score: 0}, // Pod on node
|
{Host: nodeMachine1Zone1, Score: 0}, // Pod on node
|
||||||
{Host: nodeMachine1Zone2, Score: 0}, // Pod on node
|
{Host: nodeMachine1Zone2, Score: 0}, // Pod on node
|
||||||
@ -465,12 +465,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: buildPod("", labels1, controllerRef("ReplicationController", "name", "abc123")),
|
pod: buildPod("", labels1, controllerRef("ReplicationController", "name", "abc123")),
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
buildPod(nodeMachine1Zone3, labels1, controllerRef("ReplicationController", "name", "abc123")),
|
buildPod(nodeMachine1Zone3, labels1, controllerRef("ReplicationController", "name", "abc123")),
|
||||||
buildPod(nodeMachine1Zone2, labels1, controllerRef("ReplicationController", "name", "abc123")),
|
buildPod(nodeMachine1Zone2, labels1, controllerRef("ReplicationController", "name", "abc123")),
|
||||||
buildPod(nodeMachine1Zone3, labels1, controllerRef("ReplicationController", "name", "abc123")),
|
buildPod(nodeMachine1Zone3, labels1, controllerRef("ReplicationController", "name", "abc123")),
|
||||||
},
|
},
|
||||||
rcs: []*api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: labels1}}},
|
rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: labels1}}},
|
||||||
expectedList: []schedulerapi.HostPriority{
|
expectedList: []schedulerapi.HostPriority{
|
||||||
// Note that because we put two pods on the same node (nodeMachine1Zone3),
|
// Note that because we put two pods on the same node (nodeMachine1Zone3),
|
||||||
// the values here are questionable for zone2, in particular for nodeMachine1Zone2.
|
// the values here are questionable for zone2, in particular for nodeMachine1Zone2.
|
||||||
@ -528,13 +528,13 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
nozone := map[string]string{
|
nozone := map[string]string{
|
||||||
"name": "value",
|
"name": "value",
|
||||||
}
|
}
|
||||||
zone0Spec := api.PodSpec{
|
zone0Spec := v1.PodSpec{
|
||||||
NodeName: "machine01",
|
NodeName: "machine01",
|
||||||
}
|
}
|
||||||
zone1Spec := api.PodSpec{
|
zone1Spec := v1.PodSpec{
|
||||||
NodeName: "machine11",
|
NodeName: "machine11",
|
||||||
}
|
}
|
||||||
zone2Spec := api.PodSpec{
|
zone2Spec := v1.PodSpec{
|
||||||
NodeName: "machine21",
|
NodeName: "machine21",
|
||||||
}
|
}
|
||||||
labeledNodes := map[string]map[string]string{
|
labeledNodes := map[string]map[string]string{
|
||||||
@ -543,15 +543,15 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
"machine21": zone2, "machine22": zone2,
|
"machine21": zone2, "machine22": zone2,
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
pods []*api.Pod
|
pods []*v1.Pod
|
||||||
nodes map[string]map[string]string
|
nodes map[string]map[string]string
|
||||||
services []*api.Service
|
services []*v1.Service
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
pod: new(api.Pod),
|
pod: new(v1.Pod),
|
||||||
nodes: labeledNodes,
|
nodes: labeledNodes,
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
|
||||||
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
|
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
|
||||||
@ -559,8 +559,8 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
test: "nothing scheduled",
|
test: "nothing scheduled",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
pods: []*api.Pod{{Spec: zone1Spec}},
|
pods: []*v1.Pod{{Spec: zone1Spec}},
|
||||||
nodes: labeledNodes,
|
nodes: labeledNodes,
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
|
||||||
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
|
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
|
||||||
@ -568,97 +568,97 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
test: "no services",
|
test: "no services",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
|
pods: []*v1.Pod{{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}},
|
||||||
nodes: labeledNodes,
|
nodes: labeledNodes,
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
|
||||||
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
|
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
|
||||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||||
test: "different services",
|
test: "different services",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone0Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone0Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
},
|
},
|
||||||
nodes: labeledNodes,
|
nodes: labeledNodes,
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10},
|
||||||
{Host: "machine21", Score: 0}, {Host: "machine22", Score: 0},
|
{Host: "machine21", Score: 0}, {Host: "machine22", Score: 0},
|
||||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||||
test: "three pods, one service pod",
|
test: "three pods, one service pod",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
},
|
},
|
||||||
nodes: labeledNodes,
|
nodes: labeledNodes,
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 5}, {Host: "machine12", Score: 5},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 5}, {Host: "machine12", Score: 5},
|
||||||
{Host: "machine21", Score: 5}, {Host: "machine22", Score: 5},
|
{Host: "machine21", Score: 5}, {Host: "machine22", Score: 5},
|
||||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||||
test: "three pods, two service pods on different machines",
|
test: "three pods, two service pods on different machines",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: "ns1"}},
|
||||||
},
|
},
|
||||||
nodes: labeledNodes,
|
nodes: labeledNodes,
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: v1.ObjectMeta{Namespace: v1.NamespaceDefault}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 0}, {Host: "machine12", Score: 0},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 0}, {Host: "machine12", Score: 0},
|
||||||
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
|
{Host: "machine21", Score: 10}, {Host: "machine22", Score: 10},
|
||||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||||
test: "three service label match pods in different namespaces",
|
test: "three service label match pods in different namespaces",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
},
|
},
|
||||||
nodes: labeledNodes,
|
nodes: labeledNodes,
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 6}, {Host: "machine12", Score: 6},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 6}, {Host: "machine12", Score: 6},
|
||||||
{Host: "machine21", Score: 3}, {Host: "machine22", Score: 3},
|
{Host: "machine21", Score: 3}, {Host: "machine22", Score: 3},
|
||||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||||
test: "four pods, three service pods",
|
test: "four pods, three service pods",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
},
|
},
|
||||||
nodes: labeledNodes,
|
nodes: labeledNodes,
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 3}, {Host: "machine12", Score: 3},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 3}, {Host: "machine12", Score: 3},
|
||||||
{Host: "machine21", Score: 6}, {Host: "machine22", Score: 6},
|
{Host: "machine21", Score: 6}, {Host: "machine22", Score: 6},
|
||||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||||
test: "service with partial pod label matches",
|
test: "service with partial pod label matches",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: zone0Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone0Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
{Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}},
|
{Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}},
|
||||||
},
|
},
|
||||||
nodes: labeledNodes,
|
nodes: labeledNodes,
|
||||||
services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
|
services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}},
|
||||||
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 7}, {Host: "machine12", Score: 7},
|
expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 7}, {Host: "machine12", Score: 7},
|
||||||
{Host: "machine21", Score: 5}, {Host: "machine22", Score: 5},
|
{Host: "machine21", Score: 5}, {Host: "machine22", Score: 5},
|
||||||
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
{Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}},
|
||||||
@ -682,18 +682,18 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeLabeledNodeList(nodeMap map[string]map[string]string) []*api.Node {
|
func makeLabeledNodeList(nodeMap map[string]map[string]string) []*v1.Node {
|
||||||
nodes := make([]*api.Node, 0, len(nodeMap))
|
nodes := make([]*v1.Node, 0, len(nodeMap))
|
||||||
for nodeName, labels := range nodeMap {
|
for nodeName, labels := range nodeMap {
|
||||||
nodes = append(nodes, &api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName, Labels: labels}})
|
nodes = append(nodes, &v1.Node{ObjectMeta: v1.ObjectMeta{Name: nodeName, Labels: labels}})
|
||||||
}
|
}
|
||||||
return nodes
|
return nodes
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeNodeList(nodeNames []string) []*api.Node {
|
func makeNodeList(nodeNames []string) []*v1.Node {
|
||||||
nodes := make([]*api.Node, 0, len(nodeNames))
|
nodes := make([]*v1.Node, 0, len(nodeNames))
|
||||||
for _, nodeName := range nodeNames {
|
for _, nodeName := range nodeNames {
|
||||||
nodes = append(nodes, &api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName}})
|
nodes = append(nodes, &v1.Node{ObjectMeta: v1.ObjectMeta{Name: nodeName}})
|
||||||
}
|
}
|
||||||
return nodes
|
return nodes
|
||||||
}
|
}
|
||||||
|
@ -20,21 +20,21 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
// CountIntolerableTaintsPreferNoSchedule gives the count of intolerable taints of a pod with effect PreferNoSchedule
|
// CountIntolerableTaintsPreferNoSchedule gives the count of intolerable taints of a pod with effect PreferNoSchedule
|
||||||
func countIntolerableTaintsPreferNoSchedule(taints []api.Taint, tolerations []api.Toleration) (intolerableTaints int) {
|
func countIntolerableTaintsPreferNoSchedule(taints []v1.Taint, tolerations []v1.Toleration) (intolerableTaints int) {
|
||||||
for i := range taints {
|
for i := range taints {
|
||||||
taint := &taints[i]
|
taint := &taints[i]
|
||||||
// check only on taints that have effect PreferNoSchedule
|
// check only on taints that have effect PreferNoSchedule
|
||||||
if taint.Effect != api.TaintEffectPreferNoSchedule {
|
if taint.Effect != v1.TaintEffectPreferNoSchedule {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if !api.TaintToleratedByTolerations(taint, tolerations) {
|
if !v1.TaintToleratedByTolerations(taint, tolerations) {
|
||||||
intolerableTaints++
|
intolerableTaints++
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -42,18 +42,18 @@ func countIntolerableTaintsPreferNoSchedule(taints []api.Taint, tolerations []ap
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getAllTolerationEffectPreferNoSchedule gets the list of all Toleration with Effect PreferNoSchedule
|
// getAllTolerationEffectPreferNoSchedule gets the list of all Toleration with Effect PreferNoSchedule
|
||||||
func getAllTolerationPreferNoSchedule(tolerations []api.Toleration) (tolerationList []api.Toleration) {
|
func getAllTolerationPreferNoSchedule(tolerations []v1.Toleration) (tolerationList []v1.Toleration) {
|
||||||
for i := range tolerations {
|
for i := range tolerations {
|
||||||
toleration := &tolerations[i]
|
toleration := &tolerations[i]
|
||||||
if len(toleration.Effect) == 0 || toleration.Effect == api.TaintEffectPreferNoSchedule {
|
if len(toleration.Effect) == 0 || toleration.Effect == v1.TaintEffectPreferNoSchedule {
|
||||||
tolerationList = append(tolerationList, *toleration)
|
tolerationList = append(tolerationList, *toleration)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func getTolerationListFromPod(pod *api.Pod) ([]api.Toleration, error) {
|
func getTolerationListFromPod(pod *v1.Pod) ([]v1.Toleration, error) {
|
||||||
tolerations, err := api.GetTolerationsFromPodAnnotations(pod.Annotations)
|
tolerations, err := v1.GetTolerationsFromPodAnnotations(pod.Annotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -61,13 +61,13 @@ func getTolerationListFromPod(pod *api.Pod) ([]api.Toleration, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ComputeTaintTolerationPriority prepares the priority list for all the nodes based on the number of intolerable taints on the node
|
// ComputeTaintTolerationPriority prepares the priority list for all the nodes based on the number of intolerable taints on the node
|
||||||
func ComputeTaintTolerationPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||||
}
|
}
|
||||||
|
|
||||||
var tolerationList []api.Toleration
|
var tolerationList []v1.Toleration
|
||||||
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
if priorityMeta, ok := meta.(*priorityMetadata); ok {
|
||||||
tolerationList = priorityMeta.podTolerations
|
tolerationList = priorityMeta.podTolerations
|
||||||
} else {
|
} else {
|
||||||
@ -78,7 +78,7 @@ func ComputeTaintTolerationPriorityMap(pod *api.Pod, meta interface{}, nodeInfo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
taints, err := api.GetTaintsFromNodeAnnotations(node.Annotations)
|
taints, err := v1.GetTaintsFromNodeAnnotations(node.Annotations)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return schedulerapi.HostPriority{}, err
|
return schedulerapi.HostPriority{}, err
|
||||||
}
|
}
|
||||||
@ -88,7 +88,7 @@ func ComputeTaintTolerationPriorityMap(pod *api.Pod, meta interface{}, nodeInfo
|
|||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func ComputeTaintTolerationPriorityReduce(pod *api.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error {
|
func ComputeTaintTolerationPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error {
|
||||||
var maxCount int
|
var maxCount int
|
||||||
for i := range result {
|
for i := range result {
|
||||||
if result[i].Score > maxCount {
|
if result[i].Score > maxCount {
|
||||||
|
@ -21,29 +21,29 @@ import (
|
|||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
func nodeWithTaints(nodeName string, taints []api.Taint) *api.Node {
|
func nodeWithTaints(nodeName string, taints []v1.Taint) *v1.Node {
|
||||||
taintsData, _ := json.Marshal(taints)
|
taintsData, _ := json.Marshal(taints)
|
||||||
return &api.Node{
|
return &v1.Node{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Name: nodeName,
|
Name: nodeName,
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
api.TaintsAnnotationKey: string(taintsData),
|
v1.TaintsAnnotationKey: string(taintsData),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func podWithTolerations(tolerations []api.Toleration) *api.Pod {
|
func podWithTolerations(tolerations []v1.Toleration) *v1.Pod {
|
||||||
tolerationData, _ := json.Marshal(tolerations)
|
tolerationData, _ := json.Marshal(tolerations)
|
||||||
return &api.Pod{
|
return &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Annotations: map[string]string{
|
Annotations: map[string]string{
|
||||||
api.TolerationsAnnotationKey: string(tolerationData),
|
v1.TolerationsAnnotationKey: string(tolerationData),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@ -55,30 +55,30 @@ func podWithTolerations(tolerations []api.Toleration) *api.Pod {
|
|||||||
|
|
||||||
func TestTaintAndToleration(t *testing.T) {
|
func TestTaintAndToleration(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
nodes []*api.Node
|
nodes []*v1.Node
|
||||||
expectedList schedulerapi.HostPriorityList
|
expectedList schedulerapi.HostPriorityList
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
// basic test case
|
// basic test case
|
||||||
{
|
{
|
||||||
test: "node with taints tolerated by the pod, gets a higher score than those node with intolerable taints",
|
test: "node with taints tolerated by the pod, gets a higher score than those node with intolerable taints",
|
||||||
pod: podWithTolerations([]api.Toleration{{
|
pod: podWithTolerations([]v1.Toleration{{
|
||||||
Key: "foo",
|
Key: "foo",
|
||||||
Operator: api.TolerationOpEqual,
|
Operator: v1.TolerationOpEqual,
|
||||||
Value: "bar",
|
Value: "bar",
|
||||||
Effect: api.TaintEffectPreferNoSchedule,
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
}}),
|
}}),
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
nodeWithTaints("nodeA", []api.Taint{{
|
nodeWithTaints("nodeA", []v1.Taint{{
|
||||||
Key: "foo",
|
Key: "foo",
|
||||||
Value: "bar",
|
Value: "bar",
|
||||||
Effect: api.TaintEffectPreferNoSchedule,
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
}}),
|
}}),
|
||||||
nodeWithTaints("nodeB", []api.Taint{{
|
nodeWithTaints("nodeB", []v1.Taint{{
|
||||||
Key: "foo",
|
Key: "foo",
|
||||||
Value: "blah",
|
Value: "blah",
|
||||||
Effect: api.TaintEffectPreferNoSchedule,
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
}}),
|
}}),
|
||||||
},
|
},
|
||||||
expectedList: []schedulerapi.HostPriority{
|
expectedList: []schedulerapi.HostPriority{
|
||||||
@ -89,37 +89,37 @@ func TestTaintAndToleration(t *testing.T) {
|
|||||||
// the count of taints that are tolerated by pod, does not matter.
|
// the count of taints that are tolerated by pod, does not matter.
|
||||||
{
|
{
|
||||||
test: "the nodes that all of their taints are tolerated by the pod, get the same score, no matter how many tolerable taints a node has",
|
test: "the nodes that all of their taints are tolerated by the pod, get the same score, no matter how many tolerable taints a node has",
|
||||||
pod: podWithTolerations([]api.Toleration{
|
pod: podWithTolerations([]v1.Toleration{
|
||||||
{
|
{
|
||||||
Key: "cpu-type",
|
Key: "cpu-type",
|
||||||
Operator: api.TolerationOpEqual,
|
Operator: v1.TolerationOpEqual,
|
||||||
Value: "arm64",
|
Value: "arm64",
|
||||||
Effect: api.TaintEffectPreferNoSchedule,
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
}, {
|
}, {
|
||||||
Key: "disk-type",
|
Key: "disk-type",
|
||||||
Operator: api.TolerationOpEqual,
|
Operator: v1.TolerationOpEqual,
|
||||||
Value: "ssd",
|
Value: "ssd",
|
||||||
Effect: api.TaintEffectPreferNoSchedule,
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
nodeWithTaints("nodeA", []api.Taint{}),
|
nodeWithTaints("nodeA", []v1.Taint{}),
|
||||||
nodeWithTaints("nodeB", []api.Taint{
|
nodeWithTaints("nodeB", []v1.Taint{
|
||||||
{
|
{
|
||||||
Key: "cpu-type",
|
Key: "cpu-type",
|
||||||
Value: "arm64",
|
Value: "arm64",
|
||||||
Effect: api.TaintEffectPreferNoSchedule,
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
nodeWithTaints("nodeC", []api.Taint{
|
nodeWithTaints("nodeC", []v1.Taint{
|
||||||
{
|
{
|
||||||
Key: "cpu-type",
|
Key: "cpu-type",
|
||||||
Value: "arm64",
|
Value: "arm64",
|
||||||
Effect: api.TaintEffectPreferNoSchedule,
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
}, {
|
}, {
|
||||||
Key: "disk-type",
|
Key: "disk-type",
|
||||||
Value: "ssd",
|
Value: "ssd",
|
||||||
Effect: api.TaintEffectPreferNoSchedule,
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
@ -132,30 +132,30 @@ func TestTaintAndToleration(t *testing.T) {
|
|||||||
// the count of taints on a node that are not tolerated by pod, matters.
|
// the count of taints on a node that are not tolerated by pod, matters.
|
||||||
{
|
{
|
||||||
test: "the more intolerable taints a node has, the lower score it gets.",
|
test: "the more intolerable taints a node has, the lower score it gets.",
|
||||||
pod: podWithTolerations([]api.Toleration{{
|
pod: podWithTolerations([]v1.Toleration{{
|
||||||
Key: "foo",
|
Key: "foo",
|
||||||
Operator: api.TolerationOpEqual,
|
Operator: v1.TolerationOpEqual,
|
||||||
Value: "bar",
|
Value: "bar",
|
||||||
Effect: api.TaintEffectPreferNoSchedule,
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
}}),
|
}}),
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
nodeWithTaints("nodeA", []api.Taint{}),
|
nodeWithTaints("nodeA", []v1.Taint{}),
|
||||||
nodeWithTaints("nodeB", []api.Taint{
|
nodeWithTaints("nodeB", []v1.Taint{
|
||||||
{
|
{
|
||||||
Key: "cpu-type",
|
Key: "cpu-type",
|
||||||
Value: "arm64",
|
Value: "arm64",
|
||||||
Effect: api.TaintEffectPreferNoSchedule,
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
nodeWithTaints("nodeC", []api.Taint{
|
nodeWithTaints("nodeC", []v1.Taint{
|
||||||
{
|
{
|
||||||
Key: "cpu-type",
|
Key: "cpu-type",
|
||||||
Value: "arm64",
|
Value: "arm64",
|
||||||
Effect: api.TaintEffectPreferNoSchedule,
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
}, {
|
}, {
|
||||||
Key: "disk-type",
|
Key: "disk-type",
|
||||||
Value: "ssd",
|
Value: "ssd",
|
||||||
Effect: api.TaintEffectPreferNoSchedule,
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
@ -168,37 +168,37 @@ func TestTaintAndToleration(t *testing.T) {
|
|||||||
// taints-tolerations priority only takes care about the taints and tolerations that have effect PreferNoSchedule
|
// taints-tolerations priority only takes care about the taints and tolerations that have effect PreferNoSchedule
|
||||||
{
|
{
|
||||||
test: "only taints and tolerations that have effect PreferNoSchedule are checked by taints-tolerations priority function",
|
test: "only taints and tolerations that have effect PreferNoSchedule are checked by taints-tolerations priority function",
|
||||||
pod: podWithTolerations([]api.Toleration{
|
pod: podWithTolerations([]v1.Toleration{
|
||||||
{
|
{
|
||||||
Key: "cpu-type",
|
Key: "cpu-type",
|
||||||
Operator: api.TolerationOpEqual,
|
Operator: v1.TolerationOpEqual,
|
||||||
Value: "arm64",
|
Value: "arm64",
|
||||||
Effect: api.TaintEffectNoSchedule,
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
}, {
|
}, {
|
||||||
Key: "disk-type",
|
Key: "disk-type",
|
||||||
Operator: api.TolerationOpEqual,
|
Operator: v1.TolerationOpEqual,
|
||||||
Value: "ssd",
|
Value: "ssd",
|
||||||
Effect: api.TaintEffectNoSchedule,
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
nodes: []*api.Node{
|
nodes: []*v1.Node{
|
||||||
nodeWithTaints("nodeA", []api.Taint{}),
|
nodeWithTaints("nodeA", []v1.Taint{}),
|
||||||
nodeWithTaints("nodeB", []api.Taint{
|
nodeWithTaints("nodeB", []v1.Taint{
|
||||||
{
|
{
|
||||||
Key: "cpu-type",
|
Key: "cpu-type",
|
||||||
Value: "arm64",
|
Value: "arm64",
|
||||||
Effect: api.TaintEffectNoSchedule,
|
Effect: v1.TaintEffectNoSchedule,
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
nodeWithTaints("nodeC", []api.Taint{
|
nodeWithTaints("nodeC", []v1.Taint{
|
||||||
{
|
{
|
||||||
Key: "cpu-type",
|
Key: "cpu-type",
|
||||||
Value: "arm64",
|
Value: "arm64",
|
||||||
Effect: api.TaintEffectPreferNoSchedule,
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
}, {
|
}, {
|
||||||
Key: "disk-type",
|
Key: "disk-type",
|
||||||
Value: "ssd",
|
Value: "ssd",
|
||||||
Effect: api.TaintEffectPreferNoSchedule,
|
Effect: v1.TaintEffectPreferNoSchedule,
|
||||||
},
|
},
|
||||||
}),
|
}),
|
||||||
},
|
},
|
||||||
|
@ -17,22 +17,22 @@ limitations under the License.
|
|||||||
package priorities
|
package priorities
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
func makeNode(node string, milliCPU, memory int64) *api.Node {
|
func makeNode(node string, milliCPU, memory int64) *v1.Node {
|
||||||
return &api.Node{
|
return &v1.Node{
|
||||||
ObjectMeta: api.ObjectMeta{Name: node},
|
ObjectMeta: v1.ObjectMeta{Name: node},
|
||||||
Status: api.NodeStatus{
|
Status: v1.NodeStatus{
|
||||||
Capacity: api.ResourceList{
|
Capacity: v1.ResourceList{
|
||||||
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||||
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
||||||
},
|
},
|
||||||
Allocatable: api.ResourceList{
|
Allocatable: v1.ResourceList{
|
||||||
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||||
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
||||||
},
|
},
|
||||||
@ -41,7 +41,7 @@ func makeNode(node string, milliCPU, memory int64) *api.Node {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func priorityFunction(mapFn algorithm.PriorityMapFunction, reduceFn algorithm.PriorityReduceFunction) algorithm.PriorityFunction {
|
func priorityFunction(mapFn algorithm.PriorityMapFunction, reduceFn algorithm.PriorityReduceFunction) algorithm.PriorityFunction {
|
||||||
return func(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) {
|
return func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||||
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
|
result := make(schedulerapi.HostPriorityList, 0, len(nodes))
|
||||||
for i := range nodes {
|
for i := range nodes {
|
||||||
hostResult, err := mapFn(pod, nil, nodeNameToInfo[nodes[i].Name])
|
hostResult, err := mapFn(pod, nil, nodeNameToInfo[nodes[i].Name])
|
||||||
|
@ -16,9 +16,7 @@ limitations under the License.
|
|||||||
|
|
||||||
package util
|
package util
|
||||||
|
|
||||||
import (
|
import "k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
)
|
|
||||||
|
|
||||||
// For each of these resources, a pod that doesn't request the resource explicitly
|
// For each of these resources, a pod that doesn't request the resource explicitly
|
||||||
// will be treated as having requested the amount indicated below, for the purpose
|
// will be treated as having requested the amount indicated below, for the purpose
|
||||||
@ -32,18 +30,18 @@ const DefaultMilliCpuRequest int64 = 100 // 0.1 core
|
|||||||
const DefaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB
|
const DefaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB
|
||||||
|
|
||||||
// GetNonzeroRequests returns the default resource request if none is found or what is provided on the request
|
// GetNonzeroRequests returns the default resource request if none is found or what is provided on the request
|
||||||
// TODO: Consider setting default as a fixed fraction of machine capacity (take "capacity api.ResourceList"
|
// TODO: Consider setting default as a fixed fraction of machine capacity (take "capacity v1.ResourceList"
|
||||||
// as an additional argument here) rather than using constants
|
// as an additional argument here) rather than using constants
|
||||||
func GetNonzeroRequests(requests *api.ResourceList) (int64, int64) {
|
func GetNonzeroRequests(requests *v1.ResourceList) (int64, int64) {
|
||||||
var outMilliCPU, outMemory int64
|
var outMilliCPU, outMemory int64
|
||||||
// Override if un-set, but not if explicitly set to zero
|
// Override if un-set, but not if explicitly set to zero
|
||||||
if _, found := (*requests)[api.ResourceCPU]; !found {
|
if _, found := (*requests)[v1.ResourceCPU]; !found {
|
||||||
outMilliCPU = DefaultMilliCpuRequest
|
outMilliCPU = DefaultMilliCpuRequest
|
||||||
} else {
|
} else {
|
||||||
outMilliCPU = requests.Cpu().MilliValue()
|
outMilliCPU = requests.Cpu().MilliValue()
|
||||||
}
|
}
|
||||||
// Override if un-set, but not if explicitly set to zero
|
// Override if un-set, but not if explicitly set to zero
|
||||||
if _, found := (*requests)[api.ResourceMemory]; !found {
|
if _, found := (*requests)[v1.ResourceMemory]; !found {
|
||||||
outMemory = DefaultMemoryRequest
|
outMemory = DefaultMemoryRequest
|
||||||
} else {
|
} else {
|
||||||
outMemory = requests.Memory().Value()
|
outMemory = requests.Memory().Value()
|
||||||
|
@ -17,8 +17,8 @@ limitations under the License.
|
|||||||
package util
|
package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
"k8s.io/kubernetes/pkg/util/sets"
|
||||||
)
|
)
|
||||||
@ -27,7 +27,7 @@ import (
|
|||||||
// according to the namespaces indicated in podAffinityTerm.
|
// according to the namespaces indicated in podAffinityTerm.
|
||||||
// 1. If the namespaces is nil considers the given pod's namespace
|
// 1. If the namespaces is nil considers the given pod's namespace
|
||||||
// 2. If the namespaces is empty list then considers all the namespaces
|
// 2. If the namespaces is empty list then considers all the namespaces
|
||||||
func getNamespacesFromPodAffinityTerm(pod *api.Pod, podAffinityTerm api.PodAffinityTerm) sets.String {
|
func getNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm v1.PodAffinityTerm) sets.String {
|
||||||
names := sets.String{}
|
names := sets.String{}
|
||||||
if podAffinityTerm.Namespaces == nil {
|
if podAffinityTerm.Namespaces == nil {
|
||||||
names.Insert(pod.Namespace)
|
names.Insert(pod.Namespace)
|
||||||
@ -39,7 +39,7 @@ func getNamespacesFromPodAffinityTerm(pod *api.Pod, podAffinityTerm api.PodAffin
|
|||||||
|
|
||||||
// PodMatchesTermsNamespaceAndSelector returns true if the given <pod>
|
// PodMatchesTermsNamespaceAndSelector returns true if the given <pod>
|
||||||
// matches the namespace and selector defined by <affinityPod>`s <term>.
|
// matches the namespace and selector defined by <affinityPod>`s <term>.
|
||||||
func PodMatchesTermsNamespaceAndSelector(pod *api.Pod, affinityPod *api.Pod, term *api.PodAffinityTerm) (bool, error) {
|
func PodMatchesTermsNamespaceAndSelector(pod *v1.Pod, affinityPod *v1.Pod, term *v1.PodAffinityTerm) (bool, error) {
|
||||||
namespaces := getNamespacesFromPodAffinityTerm(affinityPod, *term)
|
namespaces := getNamespacesFromPodAffinityTerm(affinityPod, *term)
|
||||||
if len(namespaces) != 0 && !namespaces.Has(pod.Namespace) {
|
if len(namespaces) != 0 && !namespaces.Has(pod.Namespace) {
|
||||||
return false, nil
|
return false, nil
|
||||||
@ -53,7 +53,7 @@ func PodMatchesTermsNamespaceAndSelector(pod *api.Pod, affinityPod *api.Pod, ter
|
|||||||
}
|
}
|
||||||
|
|
||||||
// nodesHaveSameTopologyKeyInternal checks if nodeA and nodeB have same label value with given topologyKey as label key.
|
// nodesHaveSameTopologyKeyInternal checks if nodeA and nodeB have same label value with given topologyKey as label key.
|
||||||
func nodesHaveSameTopologyKeyInternal(nodeA, nodeB *api.Node, topologyKey string) bool {
|
func nodesHaveSameTopologyKeyInternal(nodeA, nodeB *v1.Node, topologyKey string) bool {
|
||||||
return nodeA.Labels != nil && nodeB.Labels != nil && len(nodeA.Labels[topologyKey]) > 0 && nodeA.Labels[topologyKey] == nodeB.Labels[topologyKey]
|
return nodeA.Labels != nil && nodeB.Labels != nil && len(nodeA.Labels[topologyKey]) > 0 && nodeA.Labels[topologyKey] == nodeB.Labels[topologyKey]
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -63,7 +63,7 @@ type Topologies struct {
|
|||||||
|
|
||||||
// NodesHaveSameTopologyKey checks if nodeA and nodeB have same label value with given topologyKey as label key.
|
// NodesHaveSameTopologyKey checks if nodeA and nodeB have same label value with given topologyKey as label key.
|
||||||
// If the topologyKey is nil/empty, check if the two nodes have any of the default topologyKeys, and have same corresponding label value.
|
// If the topologyKey is nil/empty, check if the two nodes have any of the default topologyKeys, and have same corresponding label value.
|
||||||
func (tps *Topologies) NodesHaveSameTopologyKey(nodeA, nodeB *api.Node, topologyKey string) bool {
|
func (tps *Topologies) NodesHaveSameTopologyKey(nodeA, nodeB *v1.Node, topologyKey string) bool {
|
||||||
if len(topologyKey) == 0 {
|
if len(topologyKey) == 0 {
|
||||||
// assumes this is allowed only for PreferredDuringScheduling pod anti-affinity (ensured by api/validation)
|
// assumes this is allowed only for PreferredDuringScheduling pod anti-affinity (ensured by api/validation)
|
||||||
for _, defaultKey := range tps.DefaultKeys {
|
for _, defaultKey := range tps.DefaultKeys {
|
||||||
|
@ -16,11 +16,9 @@ limitations under the License.
|
|||||||
|
|
||||||
package util
|
package util
|
||||||
|
|
||||||
import (
|
import "k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
)
|
|
||||||
|
|
||||||
func GetControllerRef(pod *api.Pod) *api.OwnerReference {
|
func GetControllerRef(pod *v1.Pod) *v1.OwnerReference {
|
||||||
if len(pod.OwnerReferences) == 0 {
|
if len(pod.OwnerReferences) == 0 {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||||||
package algorithm
|
package algorithm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -28,16 +28,16 @@ type SchedulerExtender interface {
|
|||||||
// Filter based on extender-implemented predicate functions. The filtered list is
|
// Filter based on extender-implemented predicate functions. The filtered list is
|
||||||
// expected to be a subset of the supplied list. failedNodesMap optionally contains
|
// expected to be a subset of the supplied list. failedNodesMap optionally contains
|
||||||
// the list of failed nodes and failure reasons.
|
// the list of failed nodes and failure reasons.
|
||||||
Filter(pod *api.Pod, nodes []*api.Node) (filteredNodes []*api.Node, failedNodesMap schedulerapi.FailedNodesMap, err error)
|
Filter(pod *v1.Pod, nodes []*v1.Node) (filteredNodes []*v1.Node, failedNodesMap schedulerapi.FailedNodesMap, err error)
|
||||||
|
|
||||||
// Prioritize based on extender-implemented priority functions. The returned scores & weight
|
// Prioritize based on extender-implemented priority functions. The returned scores & weight
|
||||||
// are used to compute the weighted score for an extender. The weighted scores are added to
|
// are used to compute the weighted score for an extender. The weighted scores are added to
|
||||||
// the scores computed by Kubernetes scheduler. The total scores are used to do the host selection.
|
// the scores computed by Kubernetes scheduler. The total scores are used to do the host selection.
|
||||||
Prioritize(pod *api.Pod, nodes []*api.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error)
|
Prioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ScheduleAlgorithm is an interface implemented by things that know how to schedule pods
|
// ScheduleAlgorithm is an interface implemented by things that know how to schedule pods
|
||||||
// onto machines.
|
// onto machines.
|
||||||
type ScheduleAlgorithm interface {
|
type ScheduleAlgorithm interface {
|
||||||
Schedule(*api.Pod, NodeLister) (selectedMachine string, err error)
|
Schedule(*v1.Pod, NodeLister) (selectedMachine string, err error)
|
||||||
}
|
}
|
||||||
|
@ -19,7 +19,7 @@ package algorithm
|
|||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Some functions used by multiple scheduler tests.
|
// Some functions used by multiple scheduler tests.
|
||||||
@ -31,7 +31,7 @@ type schedulerTester struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Call if you know exactly where pod should get scheduled.
|
// Call if you know exactly where pod should get scheduled.
|
||||||
func (st *schedulerTester) expectSchedule(pod *api.Pod, expected string) {
|
func (st *schedulerTester) expectSchedule(pod *v1.Pod, expected string) {
|
||||||
actual, err := st.scheduler.Schedule(pod, st.nodeLister)
|
actual, err := st.scheduler.Schedule(pod, st.nodeLister)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
st.t.Errorf("Unexpected error %v\nTried to schedule: %#v", err, pod)
|
st.t.Errorf("Unexpected error %v\nTried to schedule: %#v", err, pod)
|
||||||
@ -43,7 +43,7 @@ func (st *schedulerTester) expectSchedule(pod *api.Pod, expected string) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Call if you can't predict where pod will be scheduled.
|
// Call if you can't predict where pod will be scheduled.
|
||||||
func (st *schedulerTester) expectSuccess(pod *api.Pod) {
|
func (st *schedulerTester) expectSuccess(pod *v1.Pod) {
|
||||||
_, err := st.scheduler.Schedule(pod, st.nodeLister)
|
_, err := st.scheduler.Schedule(pod, st.nodeLister)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
st.t.Errorf("Unexpected error %v\nTried to schedule: %#v", err, pod)
|
st.t.Errorf("Unexpected error %v\nTried to schedule: %#v", err, pod)
|
||||||
@ -52,7 +52,7 @@ func (st *schedulerTester) expectSuccess(pod *api.Pod) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Call if pod should *not* schedule.
|
// Call if pod should *not* schedule.
|
||||||
func (st *schedulerTester) expectFailure(pod *api.Pod) {
|
func (st *schedulerTester) expectFailure(pod *v1.Pod) {
|
||||||
_, err := st.scheduler.Schedule(pod, st.nodeLister)
|
_, err := st.scheduler.Schedule(pod, st.nodeLister)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
st.t.Error("Unexpected non-error")
|
st.t.Error("Unexpected non-error")
|
||||||
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||||||
package algorithm
|
package algorithm
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
@ -25,25 +25,25 @@ import (
|
|||||||
// FitPredicate is a function that indicates if a pod fits into an existing node.
|
// FitPredicate is a function that indicates if a pod fits into an existing node.
|
||||||
// The failure information is given by the error.
|
// The failure information is given by the error.
|
||||||
// TODO: Change interface{} to a specific type.
|
// TODO: Change interface{} to a specific type.
|
||||||
type FitPredicate func(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []PredicateFailureReason, error)
|
type FitPredicate func(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []PredicateFailureReason, error)
|
||||||
|
|
||||||
// PriorityMapFunction is a function that computes per-node results for a given node.
|
// PriorityMapFunction is a function that computes per-node results for a given node.
|
||||||
// TODO: Figure out the exact API of this method.
|
// TODO: Figure out the exact API of this method.
|
||||||
// TODO: Change interface{} to a specific type.
|
// TODO: Change interface{} to a specific type.
|
||||||
type PriorityMapFunction func(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error)
|
type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error)
|
||||||
|
|
||||||
// PriorityReduceFunction is a function that aggregated per-node results and computes
|
// PriorityReduceFunction is a function that aggregated per-node results and computes
|
||||||
// final scores for all nodes.
|
// final scores for all nodes.
|
||||||
// TODO: Figure out the exact API of this method.
|
// TODO: Figure out the exact API of this method.
|
||||||
// TODO: Change interface{} to a specific type.
|
// TODO: Change interface{} to a specific type.
|
||||||
type PriorityReduceFunction func(pod *api.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error
|
type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error
|
||||||
|
|
||||||
// MetdataProducer is a function that computes metadata for a given pod.
|
// MetdataProducer is a function that computes metadata for a given pod.
|
||||||
type MetadataProducer func(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{}
|
type MetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{}
|
||||||
|
|
||||||
// DEPRECATED
|
// DEPRECATED
|
||||||
// Use Map-Reduce pattern for priority functions.
|
// Use Map-Reduce pattern for priority functions.
|
||||||
type PriorityFunction func(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error)
|
type PriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error)
|
||||||
|
|
||||||
type PriorityConfig struct {
|
type PriorityConfig struct {
|
||||||
Map PriorityMapFunction
|
Map PriorityMapFunction
|
||||||
@ -55,7 +55,7 @@ type PriorityConfig struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EmptyMetadataProducer returns a no-op MetadataProducer type.
|
// EmptyMetadataProducer returns a no-op MetadataProducer type.
|
||||||
func EmptyMetadataProducer(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} {
|
func EmptyMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -63,4 +63,4 @@ type PredicateFailureReason interface {
|
|||||||
GetReason() string
|
GetReason() string
|
||||||
}
|
}
|
||||||
|
|
||||||
type GetEquivalencePodFunc func(pod *api.Pod) interface{}
|
type GetEquivalencePodFunc func(pod *v1.Pod) interface{}
|
||||||
|
@ -23,9 +23,9 @@ import (
|
|||||||
|
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
"k8s.io/kubernetes/pkg/util/sets"
|
||||||
@ -338,9 +338,9 @@ func TestCompatibility_v1_Scheduler(t *testing.T) {
|
|||||||
}
|
}
|
||||||
server := httptest.NewServer(&handler)
|
server := httptest.NewServer(&handler)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
if _, err := factory.NewConfigFactory(client, "some-scheduler-name", api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains).CreateFromConfig(policy); err != nil {
|
if _, err := factory.NewConfigFactory(client, "some-scheduler-name", v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains).CreateFromConfig(policy); err != nil {
|
||||||
t.Errorf("%s: Error constructing: %v", v, err)
|
t.Errorf("%s: Error constructing: %v", v, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -21,7 +21,7 @@ import (
|
|||||||
"os"
|
"os"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
"k8s.io/kubernetes/pkg/util/sets"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
"k8s.io/kubernetes/plugin/pkg/scheduler"
|
||||||
@ -228,7 +228,7 @@ func copyAndReplace(set sets.String, replaceWhat, replaceWith string) sets.Strin
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetEquivalencePod returns a EquivalencePod which contains a group of pod attributes which can be reused.
|
// GetEquivalencePod returns a EquivalencePod which contains a group of pod attributes which can be reused.
|
||||||
func GetEquivalencePod(pod *api.Pod) interface{} {
|
func GetEquivalencePod(pod *v1.Pod) interface{} {
|
||||||
equivalencePod := EquivalencePod{}
|
equivalencePod := EquivalencePod{}
|
||||||
// For now we only consider pods:
|
// For now we only consider pods:
|
||||||
// 1. OwnerReferences is Controller
|
// 1. OwnerReferences is Controller
|
||||||
@ -260,5 +260,5 @@ func isValidControllerKind(kind string) bool {
|
|||||||
|
|
||||||
// EquivalencePod is a group of pod attributes which can be reused as equivalence to schedule other pods.
|
// EquivalencePod is a group of pod attributes which can be reused as equivalence to schedule other pods.
|
||||||
type EquivalencePod struct {
|
type EquivalencePod struct {
|
||||||
ControllerRef api.OwnerReference
|
ControllerRef v1.OwnerReference
|
||||||
}
|
}
|
||||||
|
@ -19,8 +19,8 @@ package api
|
|||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/unversioned"
|
"k8s.io/kubernetes/pkg/api/unversioned"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -134,9 +134,9 @@ type ExtenderConfig struct {
|
|||||||
// nodes for a pod.
|
// nodes for a pod.
|
||||||
type ExtenderArgs struct {
|
type ExtenderArgs struct {
|
||||||
// Pod being scheduled
|
// Pod being scheduled
|
||||||
Pod api.Pod `json:"pod"`
|
Pod v1.Pod `json:"pod"`
|
||||||
// List of candidate nodes where the pod can be scheduled
|
// List of candidate nodes where the pod can be scheduled
|
||||||
Nodes api.NodeList `json:"nodes"`
|
Nodes v1.NodeList `json:"nodes"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// FailedNodesMap represents the filtered out nodes, with node names and failure messages
|
// FailedNodesMap represents the filtered out nodes, with node names and failure messages
|
||||||
@ -145,7 +145,7 @@ type FailedNodesMap map[string]string
|
|||||||
// ExtenderFilterResult represents the results of a filter call to an extender
|
// ExtenderFilterResult represents the results of a filter call to an extender
|
||||||
type ExtenderFilterResult struct {
|
type ExtenderFilterResult struct {
|
||||||
// Filtered set of nodes where the pod can be scheduled
|
// Filtered set of nodes where the pod can be scheduled
|
||||||
Nodes api.NodeList `json:"nodes,omitempty"`
|
Nodes v1.NodeList `json:"nodes,omitempty"`
|
||||||
// Filtered out nodes where the pod can't be scheduled and the failure messages
|
// Filtered out nodes where the pod can't be scheduled and the failure messages
|
||||||
FailedNodes FailedNodesMap `json:"failedNodes,omitempty"`
|
FailedNodes FailedNodesMap `json:"failedNodes,omitempty"`
|
||||||
// Error message indicating failure
|
// Error message indicating failure
|
||||||
|
@ -17,13 +17,15 @@ limitations under the License.
|
|||||||
package scheduler
|
package scheduler
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/golang/groupcache/lru"
|
|
||||||
"hash/adler32"
|
"hash/adler32"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"github.com/golang/groupcache/lru"
|
||||||
|
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
hashutil "k8s.io/kubernetes/pkg/util/hash"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||||
"sync"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// TODO(harryz) figure out the right number for this, 4096 may be too big
|
// TODO(harryz) figure out the right number for this, 4096 may be too big
|
||||||
@ -68,7 +70,7 @@ func (ec *EquivalenceCache) addPodPredicate(podKey uint64, nodeName string, fit
|
|||||||
}
|
}
|
||||||
|
|
||||||
// AddPodPredicatesCache cache pod predicate for equivalence class
|
// AddPodPredicatesCache cache pod predicate for equivalence class
|
||||||
func (ec *EquivalenceCache) AddPodPredicatesCache(pod *api.Pod, fitNodeList []*api.Node, failedPredicates *FailedPredicateMap) {
|
func (ec *EquivalenceCache) AddPodPredicatesCache(pod *v1.Pod, fitNodeList []*v1.Node, failedPredicates *FailedPredicateMap) {
|
||||||
equivalenceHash := ec.hashEquivalencePod(pod)
|
equivalenceHash := ec.hashEquivalencePod(pod)
|
||||||
|
|
||||||
for _, fitNode := range fitNodeList {
|
for _, fitNode := range fitNodeList {
|
||||||
@ -80,10 +82,10 @@ func (ec *EquivalenceCache) AddPodPredicatesCache(pod *api.Pod, fitNodeList []*a
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetCachedPredicates gets cached predicates for equivalence class
|
// GetCachedPredicates gets cached predicates for equivalence class
|
||||||
func (ec *EquivalenceCache) GetCachedPredicates(pod *api.Pod, nodes []*api.Node) ([]*api.Node, FailedPredicateMap, []*api.Node) {
|
func (ec *EquivalenceCache) GetCachedPredicates(pod *v1.Pod, nodes []*v1.Node) ([]*v1.Node, FailedPredicateMap, []*v1.Node) {
|
||||||
fitNodeList := []*api.Node{}
|
fitNodeList := []*v1.Node{}
|
||||||
failedPredicates := FailedPredicateMap{}
|
failedPredicates := FailedPredicateMap{}
|
||||||
noCacheNodeList := []*api.Node{}
|
noCacheNodeList := []*v1.Node{}
|
||||||
equivalenceHash := ec.hashEquivalencePod(pod)
|
equivalenceHash := ec.hashEquivalencePod(pod)
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
findCache := false
|
findCache := false
|
||||||
@ -124,7 +126,7 @@ func (ec *EquivalenceCache) SendClearAllCacheReq() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// hashEquivalencePod returns the hash of equivalence pod.
|
// hashEquivalencePod returns the hash of equivalence pod.
|
||||||
func (ec *EquivalenceCache) hashEquivalencePod(pod *api.Pod) uint64 {
|
func (ec *EquivalenceCache) hashEquivalencePod(pod *v1.Pod) uint64 {
|
||||||
equivalencePod := ec.getEquivalencePod(pod)
|
equivalencePod := ec.getEquivalencePod(pod)
|
||||||
hash := adler32.New()
|
hash := adler32.New()
|
||||||
hashutil.DeepHashObject(hash, equivalencePod)
|
hashutil.DeepHashObject(hash, equivalencePod)
|
||||||
|
@ -24,7 +24,7 @@ import (
|
|||||||
"net/http"
|
"net/http"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
utilnet "k8s.io/kubernetes/pkg/util/net"
|
utilnet "k8s.io/kubernetes/pkg/util/net"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||||
@ -94,20 +94,20 @@ func NewHTTPExtender(config *schedulerapi.ExtenderConfig, apiVersion string) (al
|
|||||||
// Filter based on extender implemented predicate functions. The filtered list is
|
// Filter based on extender implemented predicate functions. The filtered list is
|
||||||
// expected to be a subset of the supplied list. failedNodesMap optionally contains
|
// expected to be a subset of the supplied list. failedNodesMap optionally contains
|
||||||
// the list of failed nodes and failure reasons.
|
// the list of failed nodes and failure reasons.
|
||||||
func (h *HTTPExtender) Filter(pod *api.Pod, nodes []*api.Node) ([]*api.Node, schedulerapi.FailedNodesMap, error) {
|
func (h *HTTPExtender) Filter(pod *v1.Pod, nodes []*v1.Node) ([]*v1.Node, schedulerapi.FailedNodesMap, error) {
|
||||||
var result schedulerapi.ExtenderFilterResult
|
var result schedulerapi.ExtenderFilterResult
|
||||||
|
|
||||||
if h.filterVerb == "" {
|
if h.filterVerb == "" {
|
||||||
return nodes, schedulerapi.FailedNodesMap{}, nil
|
return nodes, schedulerapi.FailedNodesMap{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeItems := make([]api.Node, 0, len(nodes))
|
nodeItems := make([]v1.Node, 0, len(nodes))
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
nodeItems = append(nodeItems, *node)
|
nodeItems = append(nodeItems, *node)
|
||||||
}
|
}
|
||||||
args := schedulerapi.ExtenderArgs{
|
args := schedulerapi.ExtenderArgs{
|
||||||
Pod: *pod,
|
Pod: *pod,
|
||||||
Nodes: api.NodeList{Items: nodeItems},
|
Nodes: v1.NodeList{Items: nodeItems},
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := h.send(h.filterVerb, &args, &result); err != nil {
|
if err := h.send(h.filterVerb, &args, &result); err != nil {
|
||||||
@ -117,7 +117,7 @@ func (h *HTTPExtender) Filter(pod *api.Pod, nodes []*api.Node) ([]*api.Node, sch
|
|||||||
return nil, nil, fmt.Errorf(result.Error)
|
return nil, nil, fmt.Errorf(result.Error)
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeResult := make([]*api.Node, 0, len(result.Nodes.Items))
|
nodeResult := make([]*v1.Node, 0, len(result.Nodes.Items))
|
||||||
for i := range result.Nodes.Items {
|
for i := range result.Nodes.Items {
|
||||||
nodeResult = append(nodeResult, &result.Nodes.Items[i])
|
nodeResult = append(nodeResult, &result.Nodes.Items[i])
|
||||||
}
|
}
|
||||||
@ -127,7 +127,7 @@ func (h *HTTPExtender) Filter(pod *api.Pod, nodes []*api.Node) ([]*api.Node, sch
|
|||||||
// Prioritize based on extender implemented priority functions. Weight*priority is added
|
// Prioritize based on extender implemented priority functions. Weight*priority is added
|
||||||
// up for each such priority function. The returned score is added to the score computed
|
// up for each such priority function. The returned score is added to the score computed
|
||||||
// by Kubernetes scheduler. The total score is used to do the host selection.
|
// by Kubernetes scheduler. The total score is used to do the host selection.
|
||||||
func (h *HTTPExtender) Prioritize(pod *api.Pod, nodes []*api.Node) (*schedulerapi.HostPriorityList, int, error) {
|
func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, int, error) {
|
||||||
var result schedulerapi.HostPriorityList
|
var result schedulerapi.HostPriorityList
|
||||||
|
|
||||||
if h.prioritizeVerb == "" {
|
if h.prioritizeVerb == "" {
|
||||||
@ -138,13 +138,13 @@ func (h *HTTPExtender) Prioritize(pod *api.Pod, nodes []*api.Node) (*schedulerap
|
|||||||
return &result, 0, nil
|
return &result, 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
nodeItems := make([]api.Node, 0, len(nodes))
|
nodeItems := make([]v1.Node, 0, len(nodes))
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
nodeItems = append(nodeItems, *node)
|
nodeItems = append(nodeItems, *node)
|
||||||
}
|
}
|
||||||
args := schedulerapi.ExtenderArgs{
|
args := schedulerapi.ExtenderArgs{
|
||||||
Pod: *pod,
|
Pod: *pod,
|
||||||
Nodes: api.NodeList{Items: nodeItems},
|
Nodes: v1.NodeList{Items: nodeItems},
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := h.send(h.prioritizeVerb, &args, &result); err != nil {
|
if err := h.send(h.prioritizeVerb, &args, &result); err != nil {
|
||||||
|
@ -21,52 +21,52 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||||
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
type fitPredicate func(pod *api.Pod, node *api.Node) (bool, error)
|
type fitPredicate func(pod *v1.Pod, node *v1.Node) (bool, error)
|
||||||
type priorityFunc func(pod *api.Pod, nodes []*api.Node) (*schedulerapi.HostPriorityList, error)
|
type priorityFunc func(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, error)
|
||||||
|
|
||||||
type priorityConfig struct {
|
type priorityConfig struct {
|
||||||
function priorityFunc
|
function priorityFunc
|
||||||
weight int
|
weight int
|
||||||
}
|
}
|
||||||
|
|
||||||
func errorPredicateExtender(pod *api.Pod, node *api.Node) (bool, error) {
|
func errorPredicateExtender(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||||
return false, fmt.Errorf("Some error")
|
return false, fmt.Errorf("Some error")
|
||||||
}
|
}
|
||||||
|
|
||||||
func falsePredicateExtender(pod *api.Pod, node *api.Node) (bool, error) {
|
func falsePredicateExtender(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func truePredicateExtender(pod *api.Pod, node *api.Node) (bool, error) {
|
func truePredicateExtender(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func machine1PredicateExtender(pod *api.Pod, node *api.Node) (bool, error) {
|
func machine1PredicateExtender(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||||
if node.Name == "machine1" {
|
if node.Name == "machine1" {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func machine2PredicateExtender(pod *api.Pod, node *api.Node) (bool, error) {
|
func machine2PredicateExtender(pod *v1.Pod, node *v1.Node) (bool, error) {
|
||||||
if node.Name == "machine2" {
|
if node.Name == "machine2" {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func errorPrioritizerExtender(pod *api.Pod, nodes []*api.Node) (*schedulerapi.HostPriorityList, error) {
|
func errorPrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, error) {
|
||||||
return &schedulerapi.HostPriorityList{}, fmt.Errorf("Some error")
|
return &schedulerapi.HostPriorityList{}, fmt.Errorf("Some error")
|
||||||
}
|
}
|
||||||
|
|
||||||
func machine1PrioritizerExtender(pod *api.Pod, nodes []*api.Node) (*schedulerapi.HostPriorityList, error) {
|
func machine1PrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, error) {
|
||||||
result := schedulerapi.HostPriorityList{}
|
result := schedulerapi.HostPriorityList{}
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
score := 1
|
score := 1
|
||||||
@ -78,7 +78,7 @@ func machine1PrioritizerExtender(pod *api.Pod, nodes []*api.Node) (*schedulerapi
|
|||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func machine2PrioritizerExtender(pod *api.Pod, nodes []*api.Node) (*schedulerapi.HostPriorityList, error) {
|
func machine2PrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, error) {
|
||||||
result := schedulerapi.HostPriorityList{}
|
result := schedulerapi.HostPriorityList{}
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
score := 1
|
score := 1
|
||||||
@ -90,7 +90,7 @@ func machine2PrioritizerExtender(pod *api.Pod, nodes []*api.Node) (*schedulerapi
|
|||||||
return &result, nil
|
return &result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func machine2Prioritizer(_ *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) {
|
func machine2Prioritizer(_ *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||||
result := []schedulerapi.HostPriority{}
|
result := []schedulerapi.HostPriority{}
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
score := 1
|
score := 1
|
||||||
@ -108,15 +108,15 @@ type FakeExtender struct {
|
|||||||
weight int
|
weight int
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeExtender) Filter(pod *api.Pod, nodes []*api.Node) ([]*api.Node, schedulerapi.FailedNodesMap, error) {
|
func (f *FakeExtender) Filter(pod *v1.Pod, nodes []*v1.Node) ([]*v1.Node, schedulerapi.FailedNodesMap, error) {
|
||||||
filtered := []*api.Node{}
|
filtered := []*v1.Node{}
|
||||||
failedNodesMap := schedulerapi.FailedNodesMap{}
|
failedNodesMap := schedulerapi.FailedNodesMap{}
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
fits := true
|
fits := true
|
||||||
for _, predicate := range f.predicates {
|
for _, predicate := range f.predicates {
|
||||||
fit, err := predicate(pod, node)
|
fit, err := predicate(pod, node)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []*api.Node{}, schedulerapi.FailedNodesMap{}, err
|
return []*v1.Node{}, schedulerapi.FailedNodesMap{}, err
|
||||||
}
|
}
|
||||||
if !fit {
|
if !fit {
|
||||||
fits = false
|
fits = false
|
||||||
@ -132,7 +132,7 @@ func (f *FakeExtender) Filter(pod *api.Pod, nodes []*api.Node) ([]*api.Node, sch
|
|||||||
return filtered, failedNodesMap, nil
|
return filtered, failedNodesMap, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeExtender) Prioritize(pod *api.Pod, nodes []*api.Node) (*schedulerapi.HostPriorityList, int, error) {
|
func (f *FakeExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, int, error) {
|
||||||
result := schedulerapi.HostPriorityList{}
|
result := schedulerapi.HostPriorityList{}
|
||||||
combinedScores := map[string]int{}
|
combinedScores := map[string]int{}
|
||||||
for _, prioritizer := range f.prioritizers {
|
for _, prioritizer := range f.prioritizers {
|
||||||
@ -164,8 +164,8 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
|||||||
extenderPredicates []fitPredicate
|
extenderPredicates []fitPredicate
|
||||||
extenderPrioritizers []priorityConfig
|
extenderPrioritizers []priorityConfig
|
||||||
nodes []string
|
nodes []string
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
pods []*api.Pod
|
pods []*v1.Pod
|
||||||
expectedHost string
|
expectedHost string
|
||||||
expectsErr bool
|
expectsErr bool
|
||||||
}{
|
}{
|
||||||
@ -288,7 +288,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) {
|
|||||||
cache.AddPod(pod)
|
cache.AddPod(pod)
|
||||||
}
|
}
|
||||||
for _, name := range test.nodes {
|
for _, name := range test.nodes {
|
||||||
cache.AddNode(&api.Node{ObjectMeta: api.ObjectMeta{Name: name}})
|
cache.AddNode(&v1.Node{ObjectMeta: v1.ObjectMeta{Name: name}})
|
||||||
}
|
}
|
||||||
scheduler := NewGenericScheduler(
|
scheduler := NewGenericScheduler(
|
||||||
cache, test.predicates, algorithm.EmptyMetadataProducer, test.prioritizers, algorithm.EmptyMetadataProducer, extenders)
|
cache, test.predicates, algorithm.EmptyMetadataProducer, test.prioritizers, algorithm.EmptyMetadataProducer, extenders)
|
||||||
|
@ -27,8 +27,9 @@ import (
|
|||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api"
|
||||||
"k8s.io/kubernetes/pkg/api/errors"
|
"k8s.io/kubernetes/pkg/api/errors"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/controller/informers"
|
"k8s.io/kubernetes/pkg/controller/informers"
|
||||||
"k8s.io/kubernetes/pkg/fields"
|
"k8s.io/kubernetes/pkg/fields"
|
||||||
"k8s.io/kubernetes/pkg/types"
|
"k8s.io/kubernetes/pkg/types"
|
||||||
@ -42,7 +43,7 @@ import (
|
|||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/api/validation"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/api/validation"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -110,7 +111,7 @@ func NewConfigFactory(client clientset.Interface, schedulerName string, hardPodA
|
|||||||
schedulerCache := schedulercache.New(30*time.Second, stopEverything)
|
schedulerCache := schedulercache.New(30*time.Second, stopEverything)
|
||||||
|
|
||||||
// TODO: pass this in as an argument...
|
// TODO: pass this in as an argument...
|
||||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
informerFactory := informers.NewSharedInformerFactory(client, nil, 0)
|
||||||
pvcInformer := informerFactory.PersistentVolumeClaims()
|
pvcInformer := informerFactory.PersistentVolumeClaims()
|
||||||
|
|
||||||
c := &ConfigFactory{
|
c := &ConfigFactory{
|
||||||
@ -141,7 +142,7 @@ func NewConfigFactory(client clientset.Interface, schedulerName string, hardPodA
|
|||||||
// they may need to call.
|
// they may need to call.
|
||||||
c.ScheduledPodLister.Indexer, c.scheduledPodPopulator = cache.NewIndexerInformer(
|
c.ScheduledPodLister.Indexer, c.scheduledPodPopulator = cache.NewIndexerInformer(
|
||||||
c.createAssignedNonTerminatedPodLW(),
|
c.createAssignedNonTerminatedPodLW(),
|
||||||
&api.Pod{},
|
&v1.Pod{},
|
||||||
0,
|
0,
|
||||||
cache.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: c.addPodToCache,
|
AddFunc: c.addPodToCache,
|
||||||
@ -153,7 +154,7 @@ func NewConfigFactory(client clientset.Interface, schedulerName string, hardPodA
|
|||||||
|
|
||||||
c.NodeLister.Store, c.nodePopulator = cache.NewInformer(
|
c.NodeLister.Store, c.nodePopulator = cache.NewInformer(
|
||||||
c.createNodeLW(),
|
c.createNodeLW(),
|
||||||
&api.Node{},
|
&v1.Node{},
|
||||||
0,
|
0,
|
||||||
cache.ResourceEventHandlerFuncs{
|
cache.ResourceEventHandlerFuncs{
|
||||||
AddFunc: c.addNodeToCache,
|
AddFunc: c.addNodeToCache,
|
||||||
@ -165,14 +166,14 @@ func NewConfigFactory(client clientset.Interface, schedulerName string, hardPodA
|
|||||||
// TODO(harryz) need to fill all the handlers here and below for equivalence cache
|
// TODO(harryz) need to fill all the handlers here and below for equivalence cache
|
||||||
c.PVLister.Store, c.pvPopulator = cache.NewInformer(
|
c.PVLister.Store, c.pvPopulator = cache.NewInformer(
|
||||||
c.createPersistentVolumeLW(),
|
c.createPersistentVolumeLW(),
|
||||||
&api.PersistentVolume{},
|
&v1.PersistentVolume{},
|
||||||
0,
|
0,
|
||||||
cache.ResourceEventHandlerFuncs{},
|
cache.ResourceEventHandlerFuncs{},
|
||||||
)
|
)
|
||||||
|
|
||||||
c.ServiceLister.Indexer, c.servicePopulator = cache.NewIndexerInformer(
|
c.ServiceLister.Indexer, c.servicePopulator = cache.NewIndexerInformer(
|
||||||
c.createServiceLW(),
|
c.createServiceLW(),
|
||||||
&api.Service{},
|
&v1.Service{},
|
||||||
0,
|
0,
|
||||||
cache.ResourceEventHandlerFuncs{},
|
cache.ResourceEventHandlerFuncs{},
|
||||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||||
@ -180,7 +181,7 @@ func NewConfigFactory(client clientset.Interface, schedulerName string, hardPodA
|
|||||||
|
|
||||||
c.ControllerLister.Indexer, c.controllerPopulator = cache.NewIndexerInformer(
|
c.ControllerLister.Indexer, c.controllerPopulator = cache.NewIndexerInformer(
|
||||||
c.createControllerLW(),
|
c.createControllerLW(),
|
||||||
&api.ReplicationController{},
|
&v1.ReplicationController{},
|
||||||
0,
|
0,
|
||||||
cache.ResourceEventHandlerFuncs{},
|
cache.ResourceEventHandlerFuncs{},
|
||||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||||
@ -191,9 +192,9 @@ func NewConfigFactory(client clientset.Interface, schedulerName string, hardPodA
|
|||||||
|
|
||||||
// TODO(harryz) need to update all the handlers here and below for equivalence cache
|
// TODO(harryz) need to update all the handlers here and below for equivalence cache
|
||||||
func (c *ConfigFactory) addPodToCache(obj interface{}) {
|
func (c *ConfigFactory) addPodToCache(obj interface{}) {
|
||||||
pod, ok := obj.(*api.Pod)
|
pod, ok := obj.(*v1.Pod)
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.Errorf("cannot convert to *api.Pod: %v", obj)
|
glog.Errorf("cannot convert to *v1.Pod: %v", obj)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -203,14 +204,14 @@ func (c *ConfigFactory) addPodToCache(obj interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *ConfigFactory) updatePodInCache(oldObj, newObj interface{}) {
|
func (c *ConfigFactory) updatePodInCache(oldObj, newObj interface{}) {
|
||||||
oldPod, ok := oldObj.(*api.Pod)
|
oldPod, ok := oldObj.(*v1.Pod)
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.Errorf("cannot convert oldObj to *api.Pod: %v", oldObj)
|
glog.Errorf("cannot convert oldObj to *v1.Pod: %v", oldObj)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
newPod, ok := newObj.(*api.Pod)
|
newPod, ok := newObj.(*v1.Pod)
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.Errorf("cannot convert newObj to *api.Pod: %v", newObj)
|
glog.Errorf("cannot convert newObj to *v1.Pod: %v", newObj)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -220,19 +221,19 @@ func (c *ConfigFactory) updatePodInCache(oldObj, newObj interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *ConfigFactory) deletePodFromCache(obj interface{}) {
|
func (c *ConfigFactory) deletePodFromCache(obj interface{}) {
|
||||||
var pod *api.Pod
|
var pod *v1.Pod
|
||||||
switch t := obj.(type) {
|
switch t := obj.(type) {
|
||||||
case *api.Pod:
|
case *v1.Pod:
|
||||||
pod = t
|
pod = t
|
||||||
case cache.DeletedFinalStateUnknown:
|
case cache.DeletedFinalStateUnknown:
|
||||||
var ok bool
|
var ok bool
|
||||||
pod, ok = t.Obj.(*api.Pod)
|
pod, ok = t.Obj.(*v1.Pod)
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.Errorf("cannot convert to *api.Pod: %v", t.Obj)
|
glog.Errorf("cannot convert to *v1.Pod: %v", t.Obj)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
glog.Errorf("cannot convert to *api.Pod: %v", t)
|
glog.Errorf("cannot convert to *v1.Pod: %v", t)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := c.schedulerCache.RemovePod(pod); err != nil {
|
if err := c.schedulerCache.RemovePod(pod); err != nil {
|
||||||
@ -241,9 +242,9 @@ func (c *ConfigFactory) deletePodFromCache(obj interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *ConfigFactory) addNodeToCache(obj interface{}) {
|
func (c *ConfigFactory) addNodeToCache(obj interface{}) {
|
||||||
node, ok := obj.(*api.Node)
|
node, ok := obj.(*v1.Node)
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.Errorf("cannot convert to *api.Node: %v", obj)
|
glog.Errorf("cannot convert to *v1.Node: %v", obj)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -253,14 +254,14 @@ func (c *ConfigFactory) addNodeToCache(obj interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *ConfigFactory) updateNodeInCache(oldObj, newObj interface{}) {
|
func (c *ConfigFactory) updateNodeInCache(oldObj, newObj interface{}) {
|
||||||
oldNode, ok := oldObj.(*api.Node)
|
oldNode, ok := oldObj.(*v1.Node)
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.Errorf("cannot convert oldObj to *api.Node: %v", oldObj)
|
glog.Errorf("cannot convert oldObj to *v1.Node: %v", oldObj)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
newNode, ok := newObj.(*api.Node)
|
newNode, ok := newObj.(*v1.Node)
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.Errorf("cannot convert newObj to *api.Node: %v", newObj)
|
glog.Errorf("cannot convert newObj to *v1.Node: %v", newObj)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -270,19 +271,19 @@ func (c *ConfigFactory) updateNodeInCache(oldObj, newObj interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *ConfigFactory) deleteNodeFromCache(obj interface{}) {
|
func (c *ConfigFactory) deleteNodeFromCache(obj interface{}) {
|
||||||
var node *api.Node
|
var node *v1.Node
|
||||||
switch t := obj.(type) {
|
switch t := obj.(type) {
|
||||||
case *api.Node:
|
case *v1.Node:
|
||||||
node = t
|
node = t
|
||||||
case cache.DeletedFinalStateUnknown:
|
case cache.DeletedFinalStateUnknown:
|
||||||
var ok bool
|
var ok bool
|
||||||
node, ok = t.Obj.(*api.Node)
|
node, ok = t.Obj.(*v1.Node)
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.Errorf("cannot convert to *api.Node: %v", t.Obj)
|
glog.Errorf("cannot convert to *v1.Node: %v", t.Obj)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
glog.Errorf("cannot convert to *api.Node: %v", t)
|
glog.Errorf("cannot convert to *v1.Node: %v", t)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := c.schedulerCache.RemoveNode(node); err != nil {
|
if err := c.schedulerCache.RemoveNode(node); err != nil {
|
||||||
@ -386,7 +387,7 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String,
|
|||||||
Algorithm: algo,
|
Algorithm: algo,
|
||||||
Binder: &binder{f.Client},
|
Binder: &binder{f.Client},
|
||||||
PodConditionUpdater: &podConditionUpdater{f.Client},
|
PodConditionUpdater: &podConditionUpdater{f.Client},
|
||||||
NextPod: func() *api.Pod {
|
NextPod: func() *v1.Pod {
|
||||||
return f.getNextPod()
|
return f.getNextPod()
|
||||||
},
|
},
|
||||||
Error: f.makeDefaultErrorFunc(&podBackoff, f.PodQueue),
|
Error: f.makeDefaultErrorFunc(&podBackoff, f.PodQueue),
|
||||||
@ -454,7 +455,7 @@ func (f *ConfigFactory) getPluginArgs() (*PluginFactoryArgs, error) {
|
|||||||
|
|
||||||
func (f *ConfigFactory) Run() {
|
func (f *ConfigFactory) Run() {
|
||||||
// Watch and queue pods that need scheduling.
|
// Watch and queue pods that need scheduling.
|
||||||
cache.NewReflector(f.createUnassignedNonTerminatedPodLW(), &api.Pod{}, f.PodQueue, 0).RunUntil(f.StopEverything)
|
cache.NewReflector(f.createUnassignedNonTerminatedPodLW(), &v1.Pod{}, f.PodQueue, 0).RunUntil(f.StopEverything)
|
||||||
|
|
||||||
// Begin populating scheduled pods.
|
// Begin populating scheduled pods.
|
||||||
go f.scheduledPodPopulator.Run(f.StopEverything)
|
go f.scheduledPodPopulator.Run(f.StopEverything)
|
||||||
@ -481,9 +482,9 @@ func (f *ConfigFactory) Run() {
|
|||||||
cache.NewReflector(f.createReplicaSetLW(), &extensions.ReplicaSet{}, f.ReplicaSetLister.Indexer, 0).RunUntil(f.StopEverything)
|
cache.NewReflector(f.createReplicaSetLW(), &extensions.ReplicaSet{}, f.ReplicaSetLister.Indexer, 0).RunUntil(f.StopEverything)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *ConfigFactory) getNextPod() *api.Pod {
|
func (f *ConfigFactory) getNextPod() *v1.Pod {
|
||||||
for {
|
for {
|
||||||
pod := cache.Pop(f.PodQueue).(*api.Pod)
|
pod := cache.Pop(f.PodQueue).(*v1.Pod)
|
||||||
if f.responsibleForPod(pod) {
|
if f.responsibleForPod(pod) {
|
||||||
glog.V(4).Infof("About to try and schedule pod %v", pod.Name)
|
glog.V(4).Infof("About to try and schedule pod %v", pod.Name)
|
||||||
return pod
|
return pod
|
||||||
@ -491,8 +492,8 @@ func (f *ConfigFactory) getNextPod() *api.Pod {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *ConfigFactory) responsibleForPod(pod *api.Pod) bool {
|
func (f *ConfigFactory) responsibleForPod(pod *v1.Pod) bool {
|
||||||
if f.SchedulerName == api.DefaultSchedulerName {
|
if f.SchedulerName == v1.DefaultSchedulerName {
|
||||||
return pod.Annotations[SchedulerAnnotationKey] == f.SchedulerName || pod.Annotations[SchedulerAnnotationKey] == ""
|
return pod.Annotations[SchedulerAnnotationKey] == f.SchedulerName || pod.Annotations[SchedulerAnnotationKey] == ""
|
||||||
} else {
|
} else {
|
||||||
return pod.Annotations[SchedulerAnnotationKey] == f.SchedulerName
|
return pod.Annotations[SchedulerAnnotationKey] == f.SchedulerName
|
||||||
@ -500,20 +501,20 @@ func (f *ConfigFactory) responsibleForPod(pod *api.Pod) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func getNodeConditionPredicate() cache.NodeConditionPredicate {
|
func getNodeConditionPredicate() cache.NodeConditionPredicate {
|
||||||
return func(node *api.Node) bool {
|
return func(node *v1.Node) bool {
|
||||||
for i := range node.Status.Conditions {
|
for i := range node.Status.Conditions {
|
||||||
cond := &node.Status.Conditions[i]
|
cond := &node.Status.Conditions[i]
|
||||||
// We consider the node for scheduling only when its:
|
// We consider the node for scheduling only when its:
|
||||||
// - NodeReady condition status is ConditionTrue,
|
// - NodeReady condition status is ConditionTrue,
|
||||||
// - NodeOutOfDisk condition status is ConditionFalse,
|
// - NodeOutOfDisk condition status is ConditionFalse,
|
||||||
// - NodeNetworkUnavailable condition status is ConditionFalse.
|
// - NodeNetworkUnavailable condition status is ConditionFalse.
|
||||||
if cond.Type == api.NodeReady && cond.Status != api.ConditionTrue {
|
if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue {
|
||||||
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||||
return false
|
return false
|
||||||
} else if cond.Type == api.NodeOutOfDisk && cond.Status != api.ConditionFalse {
|
} else if cond.Type == v1.NodeOutOfDisk && cond.Status != v1.ConditionFalse {
|
||||||
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||||
return false
|
return false
|
||||||
} else if cond.Type == api.NodeNetworkUnavailable && cond.Status != api.ConditionFalse {
|
} else if cond.Type == v1.NodeNetworkUnavailable && cond.Status != v1.ConditionFalse {
|
||||||
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -530,16 +531,16 @@ func getNodeConditionPredicate() cache.NodeConditionPredicate {
|
|||||||
// Returns a cache.ListWatch that finds all pods that need to be
|
// Returns a cache.ListWatch that finds all pods that need to be
|
||||||
// scheduled.
|
// scheduled.
|
||||||
func (factory *ConfigFactory) createUnassignedNonTerminatedPodLW() *cache.ListWatch {
|
func (factory *ConfigFactory) createUnassignedNonTerminatedPodLW() *cache.ListWatch {
|
||||||
selector := fields.ParseSelectorOrDie("spec.nodeName==" + "" + ",status.phase!=" + string(api.PodSucceeded) + ",status.phase!=" + string(api.PodFailed))
|
selector := fields.ParseSelectorOrDie("spec.nodeName==" + "" + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed))
|
||||||
return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "pods", api.NamespaceAll, selector)
|
return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "pods", v1.NamespaceAll, selector)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns a cache.ListWatch that finds all pods that are
|
// Returns a cache.ListWatch that finds all pods that are
|
||||||
// already scheduled.
|
// already scheduled.
|
||||||
// TODO: return a ListerWatcher interface instead?
|
// TODO: return a ListerWatcher interface instead?
|
||||||
func (factory *ConfigFactory) createAssignedNonTerminatedPodLW() *cache.ListWatch {
|
func (factory *ConfigFactory) createAssignedNonTerminatedPodLW() *cache.ListWatch {
|
||||||
selector := fields.ParseSelectorOrDie("spec.nodeName!=" + "" + ",status.phase!=" + string(api.PodSucceeded) + ",status.phase!=" + string(api.PodFailed))
|
selector := fields.ParseSelectorOrDie("spec.nodeName!=" + "" + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed))
|
||||||
return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "pods", api.NamespaceAll, selector)
|
return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "pods", v1.NamespaceAll, selector)
|
||||||
}
|
}
|
||||||
|
|
||||||
// createNodeLW returns a cache.ListWatch that gets all changes to nodes.
|
// createNodeLW returns a cache.ListWatch that gets all changes to nodes.
|
||||||
@ -547,36 +548,36 @@ func (factory *ConfigFactory) createNodeLW() *cache.ListWatch {
|
|||||||
// all nodes are considered to ensure that the scheduler cache has access to all nodes for lookups
|
// all nodes are considered to ensure that the scheduler cache has access to all nodes for lookups
|
||||||
// the NodeCondition is used to filter out the nodes that are not ready or unschedulable
|
// the NodeCondition is used to filter out the nodes that are not ready or unschedulable
|
||||||
// the filtered list is used as the super set of nodes to consider for scheduling
|
// the filtered list is used as the super set of nodes to consider for scheduling
|
||||||
return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "nodes", api.NamespaceAll, fields.ParseSelectorOrDie(""))
|
return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "nodes", v1.NamespaceAll, fields.ParseSelectorOrDie(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
// createPersistentVolumeLW returns a cache.ListWatch that gets all changes to persistentVolumes.
|
// createPersistentVolumeLW returns a cache.ListWatch that gets all changes to persistentVolumes.
|
||||||
func (factory *ConfigFactory) createPersistentVolumeLW() *cache.ListWatch {
|
func (factory *ConfigFactory) createPersistentVolumeLW() *cache.ListWatch {
|
||||||
return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "persistentVolumes", api.NamespaceAll, fields.ParseSelectorOrDie(""))
|
return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "persistentVolumes", v1.NamespaceAll, fields.ParseSelectorOrDie(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
// createPersistentVolumeClaimLW returns a cache.ListWatch that gets all changes to persistentVolumeClaims.
|
// createPersistentVolumeClaimLW returns a cache.ListWatch that gets all changes to persistentVolumeClaims.
|
||||||
func (factory *ConfigFactory) createPersistentVolumeClaimLW() *cache.ListWatch {
|
func (factory *ConfigFactory) createPersistentVolumeClaimLW() *cache.ListWatch {
|
||||||
return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "persistentVolumeClaims", api.NamespaceAll, fields.ParseSelectorOrDie(""))
|
return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "persistentVolumeClaims", v1.NamespaceAll, fields.ParseSelectorOrDie(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns a cache.ListWatch that gets all changes to services.
|
// Returns a cache.ListWatch that gets all changes to services.
|
||||||
func (factory *ConfigFactory) createServiceLW() *cache.ListWatch {
|
func (factory *ConfigFactory) createServiceLW() *cache.ListWatch {
|
||||||
return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "services", api.NamespaceAll, fields.ParseSelectorOrDie(""))
|
return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "services", v1.NamespaceAll, fields.ParseSelectorOrDie(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns a cache.ListWatch that gets all changes to controllers.
|
// Returns a cache.ListWatch that gets all changes to controllers.
|
||||||
func (factory *ConfigFactory) createControllerLW() *cache.ListWatch {
|
func (factory *ConfigFactory) createControllerLW() *cache.ListWatch {
|
||||||
return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "replicationControllers", api.NamespaceAll, fields.ParseSelectorOrDie(""))
|
return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "replicationControllers", v1.NamespaceAll, fields.ParseSelectorOrDie(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns a cache.ListWatch that gets all changes to replicasets.
|
// Returns a cache.ListWatch that gets all changes to replicasets.
|
||||||
func (factory *ConfigFactory) createReplicaSetLW() *cache.ListWatch {
|
func (factory *ConfigFactory) createReplicaSetLW() *cache.ListWatch {
|
||||||
return cache.NewListWatchFromClient(factory.Client.Extensions().RESTClient(), "replicasets", api.NamespaceAll, fields.ParseSelectorOrDie(""))
|
return cache.NewListWatchFromClient(factory.Client.Extensions().RESTClient(), "replicasets", v1.NamespaceAll, fields.ParseSelectorOrDie(""))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (factory *ConfigFactory) makeDefaultErrorFunc(backoff *podBackoff, podQueue *cache.FIFO) func(pod *api.Pod, err error) {
|
func (factory *ConfigFactory) makeDefaultErrorFunc(backoff *podBackoff, podQueue *cache.FIFO) func(pod *v1.Pod, err error) {
|
||||||
return func(pod *api.Pod, err error) {
|
return func(pod *v1.Pod, err error) {
|
||||||
if err == scheduler.ErrNoNodesAvailable {
|
if err == scheduler.ErrNoNodesAvailable {
|
||||||
glog.V(4).Infof("Unable to schedule %v %v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name)
|
glog.V(4).Infof("Unable to schedule %v %v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name)
|
||||||
} else {
|
} else {
|
||||||
@ -621,9 +622,9 @@ func (factory *ConfigFactory) makeDefaultErrorFunc(backoff *podBackoff, podQueue
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// nodeEnumerator allows a cache.Poller to enumerate items in an api.NodeList
|
// nodeEnumerator allows a cache.Poller to enumerate items in an v1.NodeList
|
||||||
type nodeEnumerator struct {
|
type nodeEnumerator struct {
|
||||||
*api.NodeList
|
*v1.NodeList
|
||||||
}
|
}
|
||||||
|
|
||||||
// Len returns the number of items in the node list.
|
// Len returns the number of items in the node list.
|
||||||
@ -644,7 +645,7 @@ type binder struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Bind just does a POST binding RPC.
|
// Bind just does a POST binding RPC.
|
||||||
func (b *binder) Bind(binding *api.Binding) error {
|
func (b *binder) Bind(binding *v1.Binding) error {
|
||||||
glog.V(3).Infof("Attempting to bind %v to %v", binding.Name, binding.Target.Name)
|
glog.V(3).Infof("Attempting to bind %v to %v", binding.Name, binding.Target.Name)
|
||||||
ctx := api.WithNamespace(api.NewContext(), binding.Namespace)
|
ctx := api.WithNamespace(api.NewContext(), binding.Namespace)
|
||||||
return b.Client.Core().RESTClient().Post().Namespace(api.NamespaceValue(ctx)).Resource("bindings").Body(binding).Do().Error()
|
return b.Client.Core().RESTClient().Post().Namespace(api.NamespaceValue(ctx)).Resource("bindings").Body(binding).Do().Error()
|
||||||
@ -656,9 +657,9 @@ type podConditionUpdater struct {
|
|||||||
Client clientset.Interface
|
Client clientset.Interface
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *podConditionUpdater) Update(pod *api.Pod, condition *api.PodCondition) error {
|
func (p *podConditionUpdater) Update(pod *v1.Pod, condition *v1.PodCondition) error {
|
||||||
glog.V(2).Infof("Updating pod condition for %s/%s to (%s==%s)", pod.Namespace, pod.Name, condition.Type, condition.Status)
|
glog.V(2).Infof("Updating pod condition for %s/%s to (%s==%s)", pod.Namespace, pod.Name, condition.Type, condition.Status)
|
||||||
if api.UpdatePodCondition(&pod.Status, condition) {
|
if v1.UpdatePodCondition(&pod.Status, condition) {
|
||||||
_, err := p.Client.Core().Pods(pod.Namespace).UpdateStatus(pod)
|
_, err := p.Client.Core().Pods(pod.Namespace).UpdateStatus(pod)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -23,12 +23,12 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/testapi"
|
"k8s.io/kubernetes/pkg/api/testapi"
|
||||||
apitesting "k8s.io/kubernetes/pkg/api/testing"
|
apitesting "k8s.io/kubernetes/pkg/api/testing"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
"k8s.io/kubernetes/pkg/apimachinery/registered"
|
||||||
"k8s.io/kubernetes/pkg/client/cache"
|
"k8s.io/kubernetes/pkg/client/cache"
|
||||||
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
|
clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5"
|
||||||
"k8s.io/kubernetes/pkg/client/restclient"
|
"k8s.io/kubernetes/pkg/client/restclient"
|
||||||
"k8s.io/kubernetes/pkg/runtime"
|
"k8s.io/kubernetes/pkg/runtime"
|
||||||
"k8s.io/kubernetes/pkg/types"
|
"k8s.io/kubernetes/pkg/types"
|
||||||
@ -47,8 +47,8 @@ func TestCreate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
server := httptest.NewServer(&handler)
|
server := httptest.NewServer(&handler)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
factory := NewConfigFactory(client, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
factory := NewConfigFactory(client, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||||
factory.Create()
|
factory.Create()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -65,8 +65,8 @@ func TestCreateFromConfig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
server := httptest.NewServer(&handler)
|
server := httptest.NewServer(&handler)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
factory := NewConfigFactory(client, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
factory := NewConfigFactory(client, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||||
|
|
||||||
// Pre-register some predicate and priority functions
|
// Pre-register some predicate and priority functions
|
||||||
RegisterFitPredicate("PredicateOne", PredicateOne)
|
RegisterFitPredicate("PredicateOne", PredicateOne)
|
||||||
@ -106,8 +106,8 @@ func TestCreateFromEmptyConfig(t *testing.T) {
|
|||||||
}
|
}
|
||||||
server := httptest.NewServer(&handler)
|
server := httptest.NewServer(&handler)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
factory := NewConfigFactory(client, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
factory := NewConfigFactory(client, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||||
|
|
||||||
configData = []byte(`{}`)
|
configData = []byte(`{}`)
|
||||||
if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil {
|
if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil {
|
||||||
@ -117,26 +117,26 @@ func TestCreateFromEmptyConfig(t *testing.T) {
|
|||||||
factory.CreateFromConfig(policy)
|
factory.CreateFromConfig(policy)
|
||||||
}
|
}
|
||||||
|
|
||||||
func PredicateOne(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func PredicateOne(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
return true, nil, nil
|
return true, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func PredicateTwo(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func PredicateTwo(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
return true, nil, nil
|
return true, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func PriorityOne(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) {
|
func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||||
return []schedulerapi.HostPriority{}, nil
|
return []schedulerapi.HostPriority{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func PriorityTwo(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) {
|
func PriorityTwo(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||||
return []schedulerapi.HostPriority{}, nil
|
return []schedulerapi.HostPriority{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestDefaultErrorFunc(t *testing.T) {
|
func TestDefaultErrorFunc(t *testing.T) {
|
||||||
testPod := &api.Pod{
|
testPod := &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar"},
|
ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: "bar"},
|
||||||
Spec: apitesting.DeepEqualSafePodSpec(),
|
Spec: apitesting.V1DeepEqualSafePodSpec(),
|
||||||
}
|
}
|
||||||
handler := utiltesting.FakeHandler{
|
handler := utiltesting.FakeHandler{
|
||||||
StatusCode: 200,
|
StatusCode: 200,
|
||||||
@ -149,7 +149,7 @@ func TestDefaultErrorFunc(t *testing.T) {
|
|||||||
mux.Handle(testapi.Default.ResourcePath("pods", "bar", "foo"), &handler)
|
mux.Handle(testapi.Default.ResourcePath("pods", "bar", "foo"), &handler)
|
||||||
server := httptest.NewServer(mux)
|
server := httptest.NewServer(mux)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
factory := NewConfigFactory(clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}), api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
factory := NewConfigFactory(clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}}), v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||||
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
|
queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc)
|
||||||
podBackoff := podBackoff{
|
podBackoff := podBackoff{
|
||||||
perPodBackoff: map[types.NamespacedName]*backoffEntry{},
|
perPodBackoff: map[types.NamespacedName]*backoffEntry{},
|
||||||
@ -178,11 +178,11 @@ func TestDefaultErrorFunc(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestNodeEnumerator(t *testing.T) {
|
func TestNodeEnumerator(t *testing.T) {
|
||||||
testList := &api.NodeList{
|
testList := &v1.NodeList{
|
||||||
Items: []api.Node{
|
Items: []v1.Node{
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "foo"}},
|
{ObjectMeta: v1.ObjectMeta{Name: "foo"}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "bar"}},
|
{ObjectMeta: v1.ObjectMeta{Name: "bar"}},
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "baz"}},
|
{ObjectMeta: v1.ObjectMeta{Name: "baz"}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
me := nodeEnumerator{testList}
|
me := nodeEnumerator{testList}
|
||||||
@ -192,7 +192,7 @@ func TestNodeEnumerator(t *testing.T) {
|
|||||||
}
|
}
|
||||||
for i := range testList.Items {
|
for i := range testList.Items {
|
||||||
gotObj := me.Get(i)
|
gotObj := me.Get(i)
|
||||||
if e, a := testList.Items[i].Name, gotObj.(*api.Node).Name; e != a {
|
if e, a := testList.Items[i].Name, gotObj.(*v1.Node).Name; e != a {
|
||||||
t.Errorf("Expected %v, got %v", e, a)
|
t.Errorf("Expected %v, got %v", e, a)
|
||||||
}
|
}
|
||||||
if e, a := &testList.Items[i], gotObj; !reflect.DeepEqual(e, a) {
|
if e, a := &testList.Items[i], gotObj; !reflect.DeepEqual(e, a) {
|
||||||
@ -211,14 +211,14 @@ func (f *fakeClock) Now() time.Time {
|
|||||||
|
|
||||||
func TestBind(t *testing.T) {
|
func TestBind(t *testing.T) {
|
||||||
table := []struct {
|
table := []struct {
|
||||||
binding *api.Binding
|
binding *v1.Binding
|
||||||
}{
|
}{
|
||||||
{binding: &api.Binding{
|
{binding: &v1.Binding{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Namespace: api.NamespaceDefault,
|
Namespace: v1.NamespaceDefault,
|
||||||
Name: "foo",
|
Name: "foo",
|
||||||
},
|
},
|
||||||
Target: api.ObjectReference{
|
Target: v1.ObjectReference{
|
||||||
Name: "foohost.kubernetes.mydomain.com",
|
Name: "foohost.kubernetes.mydomain.com",
|
||||||
},
|
},
|
||||||
}},
|
}},
|
||||||
@ -232,7 +232,7 @@ func TestBind(t *testing.T) {
|
|||||||
}
|
}
|
||||||
server := httptest.NewServer(&handler)
|
server := httptest.NewServer(&handler)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
b := binder{client}
|
b := binder{client}
|
||||||
|
|
||||||
if err := b.Bind(item.binding); err != nil {
|
if err := b.Bind(item.binding); err != nil {
|
||||||
@ -240,7 +240,7 @@ func TestBind(t *testing.T) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
expectedBody := runtime.EncodeOrDie(testapi.Default.Codec(), item.binding)
|
expectedBody := runtime.EncodeOrDie(testapi.Default.Codec(), item.binding)
|
||||||
handler.ValidateRequest(t, testapi.Default.ResourcePath("bindings", api.NamespaceDefault, ""), "POST", &expectedBody)
|
handler.ValidateRequest(t, testapi.Default.ResourcePath("bindings", v1.NamespaceDefault, ""), "POST", &expectedBody)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -317,45 +317,45 @@ func TestResponsibleForPod(t *testing.T) {
|
|||||||
}
|
}
|
||||||
server := httptest.NewServer(&handler)
|
server := httptest.NewServer(&handler)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
// factory of "default-scheduler"
|
// factory of "default-scheduler"
|
||||||
factoryDefaultScheduler := NewConfigFactory(client, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
factoryDefaultScheduler := NewConfigFactory(client, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||||
// factory of "foo-scheduler"
|
// factory of "foo-scheduler"
|
||||||
factoryFooScheduler := NewConfigFactory(client, "foo-scheduler", api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains)
|
factoryFooScheduler := NewConfigFactory(client, "foo-scheduler", v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains)
|
||||||
// scheduler annotations to be tested
|
// scheduler annotations to be tested
|
||||||
schedulerAnnotationFitsDefault := map[string]string{"scheduler.alpha.kubernetes.io/name": "default-scheduler"}
|
schedulerAnnotationFitsDefault := map[string]string{"scheduler.alpha.kubernetes.io/name": "default-scheduler"}
|
||||||
schedulerAnnotationFitsFoo := map[string]string{"scheduler.alpha.kubernetes.io/name": "foo-scheduler"}
|
schedulerAnnotationFitsFoo := map[string]string{"scheduler.alpha.kubernetes.io/name": "foo-scheduler"}
|
||||||
schedulerAnnotationFitsNone := map[string]string{"scheduler.alpha.kubernetes.io/name": "bar-scheduler"}
|
schedulerAnnotationFitsNone := map[string]string{"scheduler.alpha.kubernetes.io/name": "bar-scheduler"}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
pickedByDefault bool
|
pickedByDefault bool
|
||||||
pickedByFoo bool
|
pickedByFoo bool
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
// pod with no annotation "scheduler.alpha.kubernetes.io/name=<scheduler-name>" should be
|
// pod with no annotation "scheduler.alpha.kubernetes.io/name=<scheduler-name>" should be
|
||||||
// picked by the default scheduler, NOT by the one of name "foo-scheduler"
|
// picked by the default scheduler, NOT by the one of name "foo-scheduler"
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar"}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: "bar"}},
|
||||||
pickedByDefault: true,
|
pickedByDefault: true,
|
||||||
pickedByFoo: false,
|
pickedByFoo: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// pod with annotation "scheduler.alpha.kubernetes.io/name=default-scheduler" should be picked
|
// pod with annotation "scheduler.alpha.kubernetes.io/name=default-scheduler" should be picked
|
||||||
// by the scheduler of name "default-scheduler", NOT by the one of name "foo-scheduler"
|
// by the scheduler of name "default-scheduler", NOT by the one of name "foo-scheduler"
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar", Annotations: schedulerAnnotationFitsDefault}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: "bar", Annotations: schedulerAnnotationFitsDefault}},
|
||||||
pickedByDefault: true,
|
pickedByDefault: true,
|
||||||
pickedByFoo: false,
|
pickedByFoo: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// pod with annotataion "scheduler.alpha.kubernetes.io/name=foo-scheduler" should be NOT
|
// pod with annotataion "scheduler.alpha.kubernetes.io/name=foo-scheduler" should be NOT
|
||||||
// be picked by the scheduler of name "default-scheduler", but by the one of name "foo-scheduler"
|
// be picked by the scheduler of name "default-scheduler", but by the one of name "foo-scheduler"
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar", Annotations: schedulerAnnotationFitsFoo}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: "bar", Annotations: schedulerAnnotationFitsFoo}},
|
||||||
pickedByDefault: false,
|
pickedByDefault: false,
|
||||||
pickedByFoo: true,
|
pickedByFoo: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
// pod with annotataion "scheduler.alpha.kubernetes.io/name=foo-scheduler" should be NOT
|
// pod with annotataion "scheduler.alpha.kubernetes.io/name=foo-scheduler" should be NOT
|
||||||
// be picked by niether the scheduler of name "default-scheduler" nor the one of name "foo-scheduler"
|
// be picked by niether the scheduler of name "default-scheduler" nor the one of name "foo-scheduler"
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar", Annotations: schedulerAnnotationFitsNone}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: "bar", Annotations: schedulerAnnotationFitsNone}},
|
||||||
pickedByDefault: false,
|
pickedByDefault: false,
|
||||||
pickedByFoo: false,
|
pickedByFoo: false,
|
||||||
},
|
},
|
||||||
@ -381,9 +381,9 @@ func TestInvalidHardPodAffinitySymmetricWeight(t *testing.T) {
|
|||||||
server := httptest.NewServer(&handler)
|
server := httptest.NewServer(&handler)
|
||||||
// TODO: Uncomment when fix #19254
|
// TODO: Uncomment when fix #19254
|
||||||
// defer server.Close()
|
// defer server.Close()
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
// factory of "default-scheduler"
|
// factory of "default-scheduler"
|
||||||
factory := NewConfigFactory(client, api.DefaultSchedulerName, -1, api.DefaultFailureDomains)
|
factory := NewConfigFactory(client, v1.DefaultSchedulerName, -1, v1.DefaultFailureDomains)
|
||||||
_, err := factory.Create()
|
_, err := factory.Create()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("expected err: invalid hardPodAffinitySymmetricWeight, got nothing")
|
t.Errorf("expected err: invalid hardPodAffinitySymmetricWeight, got nothing")
|
||||||
@ -398,7 +398,7 @@ func TestInvalidFactoryArgs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
server := httptest.NewServer(&handler)
|
server := httptest.NewServer(&handler)
|
||||||
defer server.Close()
|
defer server.Close()
|
||||||
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}})
|
client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}})
|
||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
hardPodAffinitySymmetricWeight int
|
hardPodAffinitySymmetricWeight int
|
||||||
@ -407,12 +407,12 @@ func TestInvalidFactoryArgs(t *testing.T) {
|
|||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
hardPodAffinitySymmetricWeight: -1,
|
hardPodAffinitySymmetricWeight: -1,
|
||||||
failureDomains: api.DefaultFailureDomains,
|
failureDomains: v1.DefaultFailureDomains,
|
||||||
expectErr: "invalid hardPodAffinitySymmetricWeight: -1, must be in the range 0-100",
|
expectErr: "invalid hardPodAffinitySymmetricWeight: -1, must be in the range 0-100",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
hardPodAffinitySymmetricWeight: 101,
|
hardPodAffinitySymmetricWeight: 101,
|
||||||
failureDomains: api.DefaultFailureDomains,
|
failureDomains: v1.DefaultFailureDomains,
|
||||||
expectErr: "invalid hardPodAffinitySymmetricWeight: 101, must be in the range 0-100",
|
expectErr: "invalid hardPodAffinitySymmetricWeight: 101, must be in the range 0-100",
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@ -423,7 +423,7 @@ func TestInvalidFactoryArgs(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range testCases {
|
for _, test := range testCases {
|
||||||
factory := NewConfigFactory(client, api.DefaultSchedulerName, test.hardPodAffinitySymmetricWeight, test.failureDomains)
|
factory := NewConfigFactory(client, v1.DefaultSchedulerName, test.hardPodAffinitySymmetricWeight, test.failureDomains)
|
||||||
_, err := factory.Create()
|
_, err := factory.Create()
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("expected err: %s, got nothing", test.expectErr)
|
t.Errorf("expected err: %s, got nothing", test.expectErr)
|
||||||
@ -434,32 +434,32 @@ func TestInvalidFactoryArgs(t *testing.T) {
|
|||||||
|
|
||||||
func TestNodeConditionPredicate(t *testing.T) {
|
func TestNodeConditionPredicate(t *testing.T) {
|
||||||
nodeFunc := getNodeConditionPredicate()
|
nodeFunc := getNodeConditionPredicate()
|
||||||
nodeList := &api.NodeList{
|
nodeList := &v1.NodeList{
|
||||||
Items: []api.Node{
|
Items: []v1.Node{
|
||||||
// node1 considered
|
// node1 considered
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "node1"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionTrue}}}},
|
{ObjectMeta: v1.ObjectMeta{Name: "node1"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}}}},
|
||||||
// node2 ignored - node not Ready
|
// node2 ignored - node not Ready
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "node2"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionFalse}}}},
|
{ObjectMeta: v1.ObjectMeta{Name: "node2"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}}}},
|
||||||
// node3 ignored - node out of disk
|
// node3 ignored - node out of disk
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "node3"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeOutOfDisk, Status: api.ConditionTrue}}}},
|
{ObjectMeta: v1.ObjectMeta{Name: "node3"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}}},
|
||||||
// node4 considered
|
// node4 considered
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "node4"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeOutOfDisk, Status: api.ConditionFalse}}}},
|
{ObjectMeta: v1.ObjectMeta{Name: "node4"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionFalse}}}},
|
||||||
|
|
||||||
// node5 ignored - node out of disk
|
// node5 ignored - node out of disk
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "node5"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionTrue}, {Type: api.NodeOutOfDisk, Status: api.ConditionTrue}}}},
|
{ObjectMeta: v1.ObjectMeta{Name: "node5"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}, {Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}}},
|
||||||
// node6 considered
|
// node6 considered
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "node6"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionTrue}, {Type: api.NodeOutOfDisk, Status: api.ConditionFalse}}}},
|
{ObjectMeta: v1.ObjectMeta{Name: "node6"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}, {Type: v1.NodeOutOfDisk, Status: v1.ConditionFalse}}}},
|
||||||
// node7 ignored - node out of disk, node not Ready
|
// node7 ignored - node out of disk, node not Ready
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "node7"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionFalse}, {Type: api.NodeOutOfDisk, Status: api.ConditionTrue}}}},
|
{ObjectMeta: v1.ObjectMeta{Name: "node7"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}, {Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}}},
|
||||||
// node8 ignored - node not Ready
|
// node8 ignored - node not Ready
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "node8"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionFalse}, {Type: api.NodeOutOfDisk, Status: api.ConditionFalse}}}},
|
{ObjectMeta: v1.ObjectMeta{Name: "node8"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}, {Type: v1.NodeOutOfDisk, Status: v1.ConditionFalse}}}},
|
||||||
|
|
||||||
// node9 ignored - node unschedulable
|
// node9 ignored - node unschedulable
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "node9"}, Spec: api.NodeSpec{Unschedulable: true}},
|
{ObjectMeta: v1.ObjectMeta{Name: "node9"}, Spec: v1.NodeSpec{Unschedulable: true}},
|
||||||
// node10 considered
|
// node10 considered
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "node10"}, Spec: api.NodeSpec{Unschedulable: false}},
|
{ObjectMeta: v1.ObjectMeta{Name: "node10"}, Spec: v1.NodeSpec{Unschedulable: false}},
|
||||||
// node11 considered
|
// node11 considered
|
||||||
{ObjectMeta: api.ObjectMeta{Name: "node11"}},
|
{ObjectMeta: v1.ObjectMeta{Name: "node11"}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,7 +26,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/util"
|
"k8s.io/kubernetes/pkg/util"
|
||||||
"k8s.io/kubernetes/pkg/util/errors"
|
"k8s.io/kubernetes/pkg/util/errors"
|
||||||
"k8s.io/kubernetes/pkg/util/workqueue"
|
"k8s.io/kubernetes/pkg/util/workqueue"
|
||||||
@ -39,7 +39,7 @@ import (
|
|||||||
type FailedPredicateMap map[string][]algorithm.PredicateFailureReason
|
type FailedPredicateMap map[string][]algorithm.PredicateFailureReason
|
||||||
|
|
||||||
type FitError struct {
|
type FitError struct {
|
||||||
Pod *api.Pod
|
Pod *v1.Pod
|
||||||
FailedPredicates FailedPredicateMap
|
FailedPredicates FailedPredicateMap
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -89,7 +89,7 @@ type genericScheduler struct {
|
|||||||
// Schedule tries to schedule the given pod to one of node in the node list.
|
// Schedule tries to schedule the given pod to one of node in the node list.
|
||||||
// If it succeeds, it will return the name of the node.
|
// If it succeeds, it will return the name of the node.
|
||||||
// If it fails, it will return a Fiterror error with reasons.
|
// If it fails, it will return a Fiterror error with reasons.
|
||||||
func (g *genericScheduler) Schedule(pod *api.Pod, nodeLister algorithm.NodeLister) (string, error) {
|
func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister) (string, error) {
|
||||||
var trace *util.Trace
|
var trace *util.Trace
|
||||||
if pod != nil {
|
if pod != nil {
|
||||||
trace = util.NewTrace(fmt.Sprintf("Scheduling %s/%s", pod.Namespace, pod.Name))
|
trace = util.NewTrace(fmt.Sprintf("Scheduling %s/%s", pod.Namespace, pod.Name))
|
||||||
@ -160,14 +160,14 @@ func (g *genericScheduler) selectHost(priorityList schedulerapi.HostPriorityList
|
|||||||
// Filters the nodes to find the ones that fit based on the given predicate functions
|
// Filters the nodes to find the ones that fit based on the given predicate functions
|
||||||
// Each node is passed through the predicate functions to determine if it is a fit
|
// Each node is passed through the predicate functions to determine if it is a fit
|
||||||
func findNodesThatFit(
|
func findNodesThatFit(
|
||||||
pod *api.Pod,
|
pod *v1.Pod,
|
||||||
nodeNameToInfo map[string]*schedulercache.NodeInfo,
|
nodeNameToInfo map[string]*schedulercache.NodeInfo,
|
||||||
nodes []*api.Node,
|
nodes []*v1.Node,
|
||||||
predicateFuncs map[string]algorithm.FitPredicate,
|
predicateFuncs map[string]algorithm.FitPredicate,
|
||||||
extenders []algorithm.SchedulerExtender,
|
extenders []algorithm.SchedulerExtender,
|
||||||
metadataProducer algorithm.MetadataProducer,
|
metadataProducer algorithm.MetadataProducer,
|
||||||
) ([]*api.Node, FailedPredicateMap, error) {
|
) ([]*v1.Node, FailedPredicateMap, error) {
|
||||||
var filtered []*api.Node
|
var filtered []*v1.Node
|
||||||
failedPredicateMap := FailedPredicateMap{}
|
failedPredicateMap := FailedPredicateMap{}
|
||||||
|
|
||||||
if len(predicateFuncs) == 0 {
|
if len(predicateFuncs) == 0 {
|
||||||
@ -175,7 +175,7 @@ func findNodesThatFit(
|
|||||||
} else {
|
} else {
|
||||||
// Create filtered list with enough space to avoid growing it
|
// Create filtered list with enough space to avoid growing it
|
||||||
// and allow assigning.
|
// and allow assigning.
|
||||||
filtered = make([]*api.Node, len(nodes))
|
filtered = make([]*v1.Node, len(nodes))
|
||||||
errs := []error{}
|
errs := []error{}
|
||||||
var predicateResultLock sync.Mutex
|
var predicateResultLock sync.Mutex
|
||||||
var filteredLen int32
|
var filteredLen int32
|
||||||
@ -202,7 +202,7 @@ func findNodesThatFit(
|
|||||||
workqueue.Parallelize(16, len(nodes), checkNode)
|
workqueue.Parallelize(16, len(nodes), checkNode)
|
||||||
filtered = filtered[:filteredLen]
|
filtered = filtered[:filteredLen]
|
||||||
if len(errs) > 0 {
|
if len(errs) > 0 {
|
||||||
return []*api.Node{}, FailedPredicateMap{}, errors.NewAggregate(errs)
|
return []*v1.Node{}, FailedPredicateMap{}, errors.NewAggregate(errs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -210,7 +210,7 @@ func findNodesThatFit(
|
|||||||
for _, extender := range extenders {
|
for _, extender := range extenders {
|
||||||
filteredList, failedMap, err := extender.Filter(pod, filtered)
|
filteredList, failedMap, err := extender.Filter(pod, filtered)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return []*api.Node{}, FailedPredicateMap{}, err
|
return []*v1.Node{}, FailedPredicateMap{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
for failedNodeName, failedMsg := range failedMap {
|
for failedNodeName, failedMsg := range failedMap {
|
||||||
@ -229,7 +229,7 @@ func findNodesThatFit(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Checks whether node with a given name and NodeInfo satisfies all predicateFuncs.
|
// Checks whether node with a given name and NodeInfo satisfies all predicateFuncs.
|
||||||
func podFitsOnNode(pod *api.Pod, meta interface{}, info *schedulercache.NodeInfo, predicateFuncs map[string]algorithm.FitPredicate) (bool, []algorithm.PredicateFailureReason, error) {
|
func podFitsOnNode(pod *v1.Pod, meta interface{}, info *schedulercache.NodeInfo, predicateFuncs map[string]algorithm.FitPredicate) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
var failedPredicates []algorithm.PredicateFailureReason
|
var failedPredicates []algorithm.PredicateFailureReason
|
||||||
for _, predicate := range predicateFuncs {
|
for _, predicate := range predicateFuncs {
|
||||||
fit, reasons, err := predicate(pod, meta, info)
|
fit, reasons, err := predicate(pod, meta, info)
|
||||||
@ -251,11 +251,11 @@ func podFitsOnNode(pod *api.Pod, meta interface{}, info *schedulercache.NodeInfo
|
|||||||
// The node scores returned by the priority function are multiplied by the weights to get weighted scores
|
// The node scores returned by the priority function are multiplied by the weights to get weighted scores
|
||||||
// All scores are finally combined (added) to get the total weighted scores of all nodes
|
// All scores are finally combined (added) to get the total weighted scores of all nodes
|
||||||
func PrioritizeNodes(
|
func PrioritizeNodes(
|
||||||
pod *api.Pod,
|
pod *v1.Pod,
|
||||||
nodeNameToInfo map[string]*schedulercache.NodeInfo,
|
nodeNameToInfo map[string]*schedulercache.NodeInfo,
|
||||||
meta interface{},
|
meta interface{},
|
||||||
priorityConfigs []algorithm.PriorityConfig,
|
priorityConfigs []algorithm.PriorityConfig,
|
||||||
nodes []*api.Node,
|
nodes []*v1.Node,
|
||||||
extenders []algorithm.SchedulerExtender,
|
extenders []algorithm.SchedulerExtender,
|
||||||
) (schedulerapi.HostPriorityList, error) {
|
) (schedulerapi.HostPriorityList, error) {
|
||||||
// If no priority configs are provided, then the EqualPriority function is applied
|
// If no priority configs are provided, then the EqualPriority function is applied
|
||||||
@ -381,7 +381,7 @@ func PrioritizeNodes(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// EqualPriority is a prioritizer function that gives an equal weight of one to all nodes
|
// EqualPriority is a prioritizer function that gives an equal weight of one to all nodes
|
||||||
func EqualPriorityMap(_ *api.Pod, _ interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
func EqualPriorityMap(_ *v1.Pod, _ interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
return schedulerapi.HostPriority{}, fmt.Errorf("node not found")
|
||||||
|
@ -24,9 +24,9 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/apis/extensions"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/util/sets"
|
"k8s.io/kubernetes/pkg/util/sets"
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||||
@ -37,15 +37,15 @@ import (
|
|||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
func falsePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func falsePredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
|
return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func truePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func truePredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
return true, nil, nil
|
return true, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func matchesPredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func matchesPredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
node := nodeInfo.Node()
|
node := nodeInfo.Node()
|
||||||
if node == nil {
|
if node == nil {
|
||||||
return false, nil, fmt.Errorf("node not found")
|
return false, nil, fmt.Errorf("node not found")
|
||||||
@ -56,14 +56,14 @@ func matchesPredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.N
|
|||||||
return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
|
return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasNoPodsPredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
func hasNoPodsPredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) {
|
||||||
if len(nodeInfo.Pods()) == 0 {
|
if len(nodeInfo.Pods()) == 0 {
|
||||||
return true, nil, nil
|
return true, nil, nil
|
||||||
}
|
}
|
||||||
return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
|
return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func numericPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) {
|
func numericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||||
result := []schedulerapi.HostPriority{}
|
result := []schedulerapi.HostPriority{}
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
score, err := strconv.Atoi(node.Name)
|
score, err := strconv.Atoi(node.Name)
|
||||||
@ -78,7 +78,7 @@ func numericPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.Nod
|
|||||||
return result, nil
|
return result, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func reverseNumericPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) {
|
func reverseNumericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) {
|
||||||
var maxScore float64
|
var maxScore float64
|
||||||
minScore := math.MaxFloat64
|
minScore := math.MaxFloat64
|
||||||
reverseResult := []schedulerapi.HostPriority{}
|
reverseResult := []schedulerapi.HostPriority{}
|
||||||
@ -101,10 +101,10 @@ func reverseNumericPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulerca
|
|||||||
return reverseResult, nil
|
return reverseResult, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeNodeList(nodeNames []string) []*api.Node {
|
func makeNodeList(nodeNames []string) []*v1.Node {
|
||||||
result := make([]*api.Node, 0, len(nodeNames))
|
result := make([]*v1.Node, 0, len(nodeNames))
|
||||||
for _, nodeName := range nodeNames {
|
for _, nodeName := range nodeNames {
|
||||||
result = append(result, &api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName}})
|
result = append(result, &v1.Node{ObjectMeta: v1.ObjectMeta{Name: nodeName}})
|
||||||
}
|
}
|
||||||
return result
|
return result
|
||||||
}
|
}
|
||||||
@ -181,8 +181,8 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
predicates map[string]algorithm.FitPredicate
|
predicates map[string]algorithm.FitPredicate
|
||||||
prioritizers []algorithm.PriorityConfig
|
prioritizers []algorithm.PriorityConfig
|
||||||
nodes []string
|
nodes []string
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
pods []*api.Pod
|
pods []*v1.Pod
|
||||||
expectedHosts sets.String
|
expectedHosts sets.String
|
||||||
expectsErr bool
|
expectsErr bool
|
||||||
wErr error
|
wErr error
|
||||||
@ -192,10 +192,10 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
expectsErr: true,
|
expectsErr: true,
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}},
|
||||||
name: "test 1",
|
name: "test 1",
|
||||||
wErr: &FitError{
|
wErr: &FitError{
|
||||||
Pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
|
Pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}},
|
||||||
FailedPredicates: FailedPredicateMap{
|
FailedPredicates: FailedPredicateMap{
|
||||||
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
"machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||||
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
"machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||||
@ -214,7 +214,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate},
|
predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate},
|
||||||
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}},
|
||||||
nodes: []string{"machine1", "machine2"},
|
nodes: []string{"machine1", "machine2"},
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "machine2"}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "machine2"}},
|
||||||
expectedHosts: sets.NewString("machine2"),
|
expectedHosts: sets.NewString("machine2"),
|
||||||
name: "test 3",
|
name: "test 3",
|
||||||
wErr: nil,
|
wErr: nil,
|
||||||
@ -231,7 +231,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate},
|
predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate},
|
||||||
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
|
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
|
||||||
nodes: []string{"3", "2", "1"},
|
nodes: []string{"3", "2", "1"},
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}},
|
||||||
expectedHosts: sets.NewString("2"),
|
expectedHosts: sets.NewString("2"),
|
||||||
name: "test 5",
|
name: "test 5",
|
||||||
wErr: nil,
|
wErr: nil,
|
||||||
@ -240,7 +240,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
predicates: map[string]algorithm.FitPredicate{"true": truePredicate},
|
||||||
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}, {Function: reverseNumericPriority, Weight: 2}},
|
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}, {Function: reverseNumericPriority, Weight: 2}},
|
||||||
nodes: []string{"3", "2", "1"},
|
nodes: []string{"3", "2", "1"},
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}},
|
||||||
expectedHosts: sets.NewString("1"),
|
expectedHosts: sets.NewString("1"),
|
||||||
name: "test 6",
|
name: "test 6",
|
||||||
wErr: nil,
|
wErr: nil,
|
||||||
@ -249,11 +249,11 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
predicates: map[string]algorithm.FitPredicate{"true": truePredicate, "false": falsePredicate},
|
predicates: map[string]algorithm.FitPredicate{"true": truePredicate, "false": falsePredicate},
|
||||||
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
|
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
|
||||||
nodes: []string{"3", "2", "1"},
|
nodes: []string{"3", "2", "1"},
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}},
|
||||||
expectsErr: true,
|
expectsErr: true,
|
||||||
name: "test 7",
|
name: "test 7",
|
||||||
wErr: &FitError{
|
wErr: &FitError{
|
||||||
Pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
|
Pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}},
|
||||||
FailedPredicates: FailedPredicateMap{
|
FailedPredicates: FailedPredicateMap{
|
||||||
"3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
"3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||||
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||||
@ -266,24 +266,24 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
"nopods": hasNoPodsPredicate,
|
"nopods": hasNoPodsPredicate,
|
||||||
"matches": matchesPredicate,
|
"matches": matchesPredicate,
|
||||||
},
|
},
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{
|
{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "2"},
|
ObjectMeta: v1.ObjectMeta{Name: "2"},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
NodeName: "2",
|
NodeName: "2",
|
||||||
},
|
},
|
||||||
Status: api.PodStatus{
|
Status: v1.PodStatus{
|
||||||
Phase: api.PodRunning,
|
Phase: v1.PodRunning,
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
|
pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}},
|
||||||
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
|
prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}},
|
||||||
nodes: []string{"1", "2"},
|
nodes: []string{"1", "2"},
|
||||||
expectsErr: true,
|
expectsErr: true,
|
||||||
name: "test 8",
|
name: "test 8",
|
||||||
wErr: &FitError{
|
wErr: &FitError{
|
||||||
Pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}},
|
Pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}},
|
||||||
FailedPredicates: FailedPredicateMap{
|
FailedPredicates: FailedPredicateMap{
|
||||||
"1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
"1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||||
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
"2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate},
|
||||||
@ -297,7 +297,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
cache.AddPod(pod)
|
cache.AddPod(pod)
|
||||||
}
|
}
|
||||||
for _, name := range test.nodes {
|
for _, name := range test.nodes {
|
||||||
cache.AddNode(&api.Node{ObjectMeta: api.ObjectMeta{Name: name}})
|
cache.AddNode(&v1.Node{ObjectMeta: v1.ObjectMeta{Name: name}})
|
||||||
}
|
}
|
||||||
|
|
||||||
scheduler := NewGenericScheduler(
|
scheduler := NewGenericScheduler(
|
||||||
@ -322,7 +322,7 @@ func TestFindFitAllError(t *testing.T) {
|
|||||||
"2": schedulercache.NewNodeInfo(),
|
"2": schedulercache.NewNodeInfo(),
|
||||||
"1": schedulercache.NewNodeInfo(),
|
"1": schedulercache.NewNodeInfo(),
|
||||||
}
|
}
|
||||||
_, predicateMap, err := findNodesThatFit(&api.Pod{}, nodeNameToInfo, makeNodeList(nodes), predicates, nil, algorithm.EmptyMetadataProducer)
|
_, predicateMap, err := findNodesThatFit(&v1.Pod{}, nodeNameToInfo, makeNodeList(nodes), predicates, nil, algorithm.EmptyMetadataProducer)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("unexpected error: %v", err)
|
t.Errorf("unexpected error: %v", err)
|
||||||
@ -346,14 +346,14 @@ func TestFindFitAllError(t *testing.T) {
|
|||||||
func TestFindFitSomeError(t *testing.T) {
|
func TestFindFitSomeError(t *testing.T) {
|
||||||
nodes := []string{"3", "2", "1"}
|
nodes := []string{"3", "2", "1"}
|
||||||
predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "match": matchesPredicate}
|
predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "match": matchesPredicate}
|
||||||
pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "1"}}
|
pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "1"}}
|
||||||
nodeNameToInfo := map[string]*schedulercache.NodeInfo{
|
nodeNameToInfo := map[string]*schedulercache.NodeInfo{
|
||||||
"3": schedulercache.NewNodeInfo(),
|
"3": schedulercache.NewNodeInfo(),
|
||||||
"2": schedulercache.NewNodeInfo(),
|
"2": schedulercache.NewNodeInfo(),
|
||||||
"1": schedulercache.NewNodeInfo(pod),
|
"1": schedulercache.NewNodeInfo(pod),
|
||||||
}
|
}
|
||||||
for name := range nodeNameToInfo {
|
for name := range nodeNameToInfo {
|
||||||
nodeNameToInfo[name].SetNode(&api.Node{ObjectMeta: api.ObjectMeta{Name: name}})
|
nodeNameToInfo[name].SetNode(&v1.Node{ObjectMeta: v1.ObjectMeta{Name: name}})
|
||||||
}
|
}
|
||||||
|
|
||||||
_, predicateMap, err := findNodesThatFit(pod, nodeNameToInfo, makeNodeList(nodes), predicates, nil, algorithm.EmptyMetadataProducer)
|
_, predicateMap, err := findNodesThatFit(pod, nodeNameToInfo, makeNodeList(nodes), predicates, nil, algorithm.EmptyMetadataProducer)
|
||||||
@ -379,15 +379,15 @@ func TestFindFitSomeError(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeNode(node string, milliCPU, memory int64) *api.Node {
|
func makeNode(node string, milliCPU, memory int64) *v1.Node {
|
||||||
return &api.Node{
|
return &v1.Node{
|
||||||
ObjectMeta: api.ObjectMeta{Name: node},
|
ObjectMeta: v1.ObjectMeta{Name: node},
|
||||||
Status: api.NodeStatus{
|
Status: v1.NodeStatus{
|
||||||
Capacity: api.ResourceList{
|
Capacity: v1.ResourceList{
|
||||||
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||||
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
||||||
},
|
},
|
||||||
Allocatable: api.ResourceList{
|
Allocatable: v1.ResourceList{
|
||||||
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
"cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI),
|
||||||
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
"memory": *resource.NewQuantity(memory, resource.BinarySI),
|
||||||
},
|
},
|
||||||
@ -402,19 +402,19 @@ func makeNode(node string, milliCPU, memory int64) *api.Node {
|
|||||||
// - don't get the same score no matter what we schedule.
|
// - don't get the same score no matter what we schedule.
|
||||||
func TestZeroRequest(t *testing.T) {
|
func TestZeroRequest(t *testing.T) {
|
||||||
// A pod with no resources. We expect spreading to count it as having the default resources.
|
// A pod with no resources. We expect spreading to count it as having the default resources.
|
||||||
noResources := api.PodSpec{
|
noResources := v1.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{},
|
{},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
noResources1 := noResources
|
noResources1 := noResources
|
||||||
noResources1.NodeName = "machine1"
|
noResources1.NodeName = "machine1"
|
||||||
// A pod with the same resources as a 0-request pod gets by default as its resources (for spreading).
|
// A pod with the same resources as a 0-request pod gets by default as its resources (for spreading).
|
||||||
small := api.PodSpec{
|
small := v1.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Resources: api.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse(
|
"cpu": resource.MustParse(
|
||||||
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest, 10) + "m"),
|
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest, 10) + "m"),
|
||||||
"memory": resource.MustParse(
|
"memory": resource.MustParse(
|
||||||
@ -427,11 +427,11 @@ func TestZeroRequest(t *testing.T) {
|
|||||||
small2 := small
|
small2 := small
|
||||||
small2.NodeName = "machine2"
|
small2.NodeName = "machine2"
|
||||||
// A larger pod.
|
// A larger pod.
|
||||||
large := api.PodSpec{
|
large := v1.PodSpec{
|
||||||
Containers: []api.Container{
|
Containers: []v1.Container{
|
||||||
{
|
{
|
||||||
Resources: api.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: api.ResourceList{
|
Requests: v1.ResourceList{
|
||||||
"cpu": resource.MustParse(
|
"cpu": resource.MustParse(
|
||||||
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest*3, 10) + "m"),
|
strconv.FormatInt(priorityutil.DefaultMilliCpuRequest*3, 10) + "m"),
|
||||||
"memory": resource.MustParse(
|
"memory": resource.MustParse(
|
||||||
@ -446,38 +446,38 @@ func TestZeroRequest(t *testing.T) {
|
|||||||
large2 := large
|
large2 := large
|
||||||
large2.NodeName = "machine2"
|
large2.NodeName = "machine2"
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
pods []*api.Pod
|
pods []*v1.Pod
|
||||||
nodes []*api.Node
|
nodes []*v1.Node
|
||||||
test string
|
test string
|
||||||
}{
|
}{
|
||||||
// The point of these next two tests is to show you get the same priority for a zero-request pod
|
// The point of these next two tests is to show you get the same priority for a zero-request pod
|
||||||
// as for a pod with the defaults requests, both when the zero-request pod is already on the machine
|
// as for a pod with the defaults requests, both when the zero-request pod is already on the machine
|
||||||
// and when the zero-request pod is the one being scheduled.
|
// and when the zero-request pod is the one being scheduled.
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: noResources},
|
pod: &v1.Pod{Spec: noResources},
|
||||||
nodes: []*api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
||||||
test: "test priority of zero-request pod with machine with zero-request pod",
|
test: "test priority of zero-request pod with machine with zero-request pod",
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: large1}, {Spec: noResources1},
|
{Spec: large1}, {Spec: noResources1},
|
||||||
{Spec: large2}, {Spec: small2},
|
{Spec: large2}, {Spec: small2},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: small},
|
pod: &v1.Pod{Spec: small},
|
||||||
nodes: []*api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
||||||
test: "test priority of nonzero-request pod with machine with zero-request pod",
|
test: "test priority of nonzero-request pod with machine with zero-request pod",
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: large1}, {Spec: noResources1},
|
{Spec: large1}, {Spec: noResources1},
|
||||||
{Spec: large2}, {Spec: small2},
|
{Spec: large2}, {Spec: small2},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
// The point of this test is to verify that we're not just getting the same score no matter what we schedule.
|
// The point of this test is to verify that we're not just getting the same score no matter what we schedule.
|
||||||
{
|
{
|
||||||
pod: &api.Pod{Spec: large},
|
pod: &v1.Pod{Spec: large},
|
||||||
nodes: []*api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)},
|
||||||
test: "test priority of larger pod with machine with zero-request pod",
|
test: "test priority of larger pod with machine with zero-request pod",
|
||||||
pods: []*api.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: large1}, {Spec: noResources1},
|
{Spec: large1}, {Spec: noResources1},
|
||||||
{Spec: large2}, {Spec: small2},
|
{Spec: large2}, {Spec: small2},
|
||||||
},
|
},
|
||||||
@ -494,8 +494,8 @@ func TestZeroRequest(t *testing.T) {
|
|||||||
{Map: algorithmpriorities.BalancedResourceAllocationMap, Weight: 1},
|
{Map: algorithmpriorities.BalancedResourceAllocationMap, Weight: 1},
|
||||||
{
|
{
|
||||||
Function: algorithmpriorities.NewSelectorSpreadPriority(
|
Function: algorithmpriorities.NewSelectorSpreadPriority(
|
||||||
algorithm.FakeServiceLister([]*api.Service{}),
|
algorithm.FakeServiceLister([]*v1.Service{}),
|
||||||
algorithm.FakeControllerLister([]*api.ReplicationController{}),
|
algorithm.FakeControllerLister([]*v1.ReplicationController{}),
|
||||||
algorithm.FakeReplicaSetLister([]*extensions.ReplicaSet{})),
|
algorithm.FakeReplicaSetLister([]*extensions.ReplicaSet{})),
|
||||||
Weight: 1,
|
Weight: 1,
|
||||||
},
|
},
|
||||||
|
@ -19,7 +19,7 @@ package scheduler
|
|||||||
import (
|
import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/algorithm"
|
||||||
@ -31,11 +31,11 @@ import (
|
|||||||
|
|
||||||
// Binder knows how to write a binding.
|
// Binder knows how to write a binding.
|
||||||
type Binder interface {
|
type Binder interface {
|
||||||
Bind(binding *api.Binding) error
|
Bind(binding *v1.Binding) error
|
||||||
}
|
}
|
||||||
|
|
||||||
type PodConditionUpdater interface {
|
type PodConditionUpdater interface {
|
||||||
Update(pod *api.Pod, podCondition *api.PodCondition) error
|
Update(pod *v1.Pod, podCondition *v1.PodCondition) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// Scheduler watches for new unscheduled pods. It attempts to find
|
// Scheduler watches for new unscheduled pods. It attempts to find
|
||||||
@ -60,11 +60,11 @@ type Config struct {
|
|||||||
// is available. We don't use a channel for this, because scheduling
|
// is available. We don't use a channel for this, because scheduling
|
||||||
// a pod may take some amount of time and we don't want pods to get
|
// a pod may take some amount of time and we don't want pods to get
|
||||||
// stale while they sit in a channel.
|
// stale while they sit in a channel.
|
||||||
NextPod func() *api.Pod
|
NextPod func() *v1.Pod
|
||||||
|
|
||||||
// Error is called if there is an error. It is passed the pod in
|
// Error is called if there is an error. It is passed the pod in
|
||||||
// question, and the error
|
// question, and the error
|
||||||
Error func(*api.Pod, error)
|
Error func(*v1.Pod, error)
|
||||||
|
|
||||||
// Recorder is the EventRecorder to use
|
// Recorder is the EventRecorder to use
|
||||||
Recorder record.EventRecorder
|
Recorder record.EventRecorder
|
||||||
@ -96,11 +96,11 @@ func (s *Scheduler) scheduleOne() {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
glog.V(1).Infof("Failed to schedule pod: %v/%v", pod.Namespace, pod.Name)
|
glog.V(1).Infof("Failed to schedule pod: %v/%v", pod.Namespace, pod.Name)
|
||||||
s.config.Error(pod, err)
|
s.config.Error(pod, err)
|
||||||
s.config.Recorder.Eventf(pod, api.EventTypeWarning, "FailedScheduling", "%v", err)
|
s.config.Recorder.Eventf(pod, v1.EventTypeWarning, "FailedScheduling", "%v", err)
|
||||||
s.config.PodConditionUpdater.Update(pod, &api.PodCondition{
|
s.config.PodConditionUpdater.Update(pod, &v1.PodCondition{
|
||||||
Type: api.PodScheduled,
|
Type: v1.PodScheduled,
|
||||||
Status: api.ConditionFalse,
|
Status: v1.ConditionFalse,
|
||||||
Reason: api.PodReasonUnschedulable,
|
Reason: v1.PodReasonUnschedulable,
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -126,9 +126,9 @@ func (s *Scheduler) scheduleOne() {
|
|||||||
go func() {
|
go func() {
|
||||||
defer metrics.E2eSchedulingLatency.Observe(metrics.SinceInMicroseconds(start))
|
defer metrics.E2eSchedulingLatency.Observe(metrics.SinceInMicroseconds(start))
|
||||||
|
|
||||||
b := &api.Binding{
|
b := &v1.Binding{
|
||||||
ObjectMeta: api.ObjectMeta{Namespace: pod.Namespace, Name: pod.Name},
|
ObjectMeta: v1.ObjectMeta{Namespace: pod.Namespace, Name: pod.Name},
|
||||||
Target: api.ObjectReference{
|
Target: v1.ObjectReference{
|
||||||
Kind: "Node",
|
Kind: "Node",
|
||||||
Name: dest,
|
Name: dest,
|
||||||
},
|
},
|
||||||
@ -144,15 +144,15 @@ func (s *Scheduler) scheduleOne() {
|
|||||||
glog.Errorf("scheduler cache ForgetPod failed: %v", err)
|
glog.Errorf("scheduler cache ForgetPod failed: %v", err)
|
||||||
}
|
}
|
||||||
s.config.Error(pod, err)
|
s.config.Error(pod, err)
|
||||||
s.config.Recorder.Eventf(pod, api.EventTypeNormal, "FailedScheduling", "Binding rejected: %v", err)
|
s.config.Recorder.Eventf(pod, v1.EventTypeNormal, "FailedScheduling", "Binding rejected: %v", err)
|
||||||
s.config.PodConditionUpdater.Update(pod, &api.PodCondition{
|
s.config.PodConditionUpdater.Update(pod, &v1.PodCondition{
|
||||||
Type: api.PodScheduled,
|
Type: v1.PodScheduled,
|
||||||
Status: api.ConditionFalse,
|
Status: v1.ConditionFalse,
|
||||||
Reason: "BindingRejected",
|
Reason: "BindingRejected",
|
||||||
})
|
})
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
metrics.BindingLatency.Observe(metrics.SinceInMicroseconds(bindingStart))
|
metrics.BindingLatency.Observe(metrics.SinceInMicroseconds(bindingStart))
|
||||||
s.config.Recorder.Eventf(pod, api.EventTypeNormal, "Scheduled", "Successfully assigned %v to %v", pod.Name, dest)
|
s.config.Recorder.Eventf(pod, v1.EventTypeNormal, "Scheduled", "Successfully assigned %v to %v", pod.Name, dest)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
@ -23,9 +23,9 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
"k8s.io/kubernetes/pkg/api/testapi"
|
"k8s.io/kubernetes/pkg/api/testapi"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
clientcache "k8s.io/kubernetes/pkg/client/cache"
|
clientcache "k8s.io/kubernetes/pkg/client/cache"
|
||||||
"k8s.io/kubernetes/pkg/client/record"
|
"k8s.io/kubernetes/pkg/client/record"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
@ -38,38 +38,38 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
type fakeBinder struct {
|
type fakeBinder struct {
|
||||||
b func(binding *api.Binding) error
|
b func(binding *v1.Binding) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fb fakeBinder) Bind(binding *api.Binding) error { return fb.b(binding) }
|
func (fb fakeBinder) Bind(binding *v1.Binding) error { return fb.b(binding) }
|
||||||
|
|
||||||
type fakePodConditionUpdater struct{}
|
type fakePodConditionUpdater struct{}
|
||||||
|
|
||||||
func (fc fakePodConditionUpdater) Update(pod *api.Pod, podCondition *api.PodCondition) error {
|
func (fc fakePodConditionUpdater) Update(pod *v1.Pod, podCondition *v1.PodCondition) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func podWithID(id, desiredHost string) *api.Pod {
|
func podWithID(id, desiredHost string) *v1.Pod {
|
||||||
return &api.Pod{
|
return &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{Name: id, SelfLink: testapi.Default.SelfLink("pods", id)},
|
ObjectMeta: v1.ObjectMeta{Name: id, SelfLink: testapi.Default.SelfLink("pods", id)},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
NodeName: desiredHost,
|
NodeName: desiredHost,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func podWithPort(id, desiredHost string, port int) *api.Pod {
|
func podWithPort(id, desiredHost string, port int) *v1.Pod {
|
||||||
pod := podWithID(id, desiredHost)
|
pod := podWithID(id, desiredHost)
|
||||||
pod.Spec.Containers = []api.Container{
|
pod.Spec.Containers = []v1.Container{
|
||||||
{Name: "ctr", Ports: []api.ContainerPort{{HostPort: int32(port)}}},
|
{Name: "ctr", Ports: []v1.ContainerPort{{HostPort: int32(port)}}},
|
||||||
}
|
}
|
||||||
return pod
|
return pod
|
||||||
}
|
}
|
||||||
|
|
||||||
func podWithResources(id, desiredHost string, limits api.ResourceList, requests api.ResourceList) *api.Pod {
|
func podWithResources(id, desiredHost string, limits v1.ResourceList, requests v1.ResourceList) *v1.Pod {
|
||||||
pod := podWithID(id, desiredHost)
|
pod := podWithID(id, desiredHost)
|
||||||
pod.Spec.Containers = []api.Container{
|
pod.Spec.Containers = []v1.Container{
|
||||||
{Name: "ctr", Resources: api.ResourceRequirements{Limits: limits, Requests: requests}},
|
{Name: "ctr", Resources: v1.ResourceRequirements{Limits: limits, Requests: requests}},
|
||||||
}
|
}
|
||||||
return pod
|
return pod
|
||||||
}
|
}
|
||||||
@ -79,7 +79,7 @@ type mockScheduler struct {
|
|||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (es mockScheduler) Schedule(pod *api.Pod, ml algorithm.NodeLister) (string, error) {
|
func (es mockScheduler) Schedule(pod *v1.Pod, ml algorithm.NodeLister) (string, error) {
|
||||||
return es.machine, es.err
|
return es.machine, es.err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -88,22 +88,22 @@ func TestScheduler(t *testing.T) {
|
|||||||
eventBroadcaster.StartLogging(t.Logf).Stop()
|
eventBroadcaster.StartLogging(t.Logf).Stop()
|
||||||
errS := errors.New("scheduler")
|
errS := errors.New("scheduler")
|
||||||
errB := errors.New("binder")
|
errB := errors.New("binder")
|
||||||
testNode := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine1"}}
|
testNode := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "machine1"}}
|
||||||
|
|
||||||
table := []struct {
|
table := []struct {
|
||||||
injectBindError error
|
injectBindError error
|
||||||
sendPod *api.Pod
|
sendPod *v1.Pod
|
||||||
algo algorithm.ScheduleAlgorithm
|
algo algorithm.ScheduleAlgorithm
|
||||||
expectErrorPod *api.Pod
|
expectErrorPod *v1.Pod
|
||||||
expectAssumedPod *api.Pod
|
expectAssumedPod *v1.Pod
|
||||||
expectError error
|
expectError error
|
||||||
expectBind *api.Binding
|
expectBind *v1.Binding
|
||||||
eventReason string
|
eventReason string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
sendPod: podWithID("foo", ""),
|
sendPod: podWithID("foo", ""),
|
||||||
algo: mockScheduler{testNode.Name, nil},
|
algo: mockScheduler{testNode.Name, nil},
|
||||||
expectBind: &api.Binding{ObjectMeta: api.ObjectMeta{Name: "foo"}, Target: api.ObjectReference{Kind: "Node", Name: testNode.Name}},
|
expectBind: &v1.Binding{ObjectMeta: v1.ObjectMeta{Name: "foo"}, Target: v1.ObjectReference{Kind: "Node", Name: testNode.Name}},
|
||||||
expectAssumedPod: podWithID("foo", testNode.Name),
|
expectAssumedPod: podWithID("foo", testNode.Name),
|
||||||
eventReason: "Scheduled",
|
eventReason: "Scheduled",
|
||||||
}, {
|
}, {
|
||||||
@ -115,7 +115,7 @@ func TestScheduler(t *testing.T) {
|
|||||||
}, {
|
}, {
|
||||||
sendPod: podWithID("foo", ""),
|
sendPod: podWithID("foo", ""),
|
||||||
algo: mockScheduler{testNode.Name, nil},
|
algo: mockScheduler{testNode.Name, nil},
|
||||||
expectBind: &api.Binding{ObjectMeta: api.ObjectMeta{Name: "foo"}, Target: api.ObjectReference{Kind: "Node", Name: testNode.Name}},
|
expectBind: &v1.Binding{ObjectMeta: v1.ObjectMeta{Name: "foo"}, Target: v1.ObjectReference{Kind: "Node", Name: testNode.Name}},
|
||||||
expectAssumedPod: podWithID("foo", testNode.Name),
|
expectAssumedPod: podWithID("foo", testNode.Name),
|
||||||
injectBindError: errB,
|
injectBindError: errB,
|
||||||
expectError: errB,
|
expectError: errB,
|
||||||
@ -126,36 +126,36 @@ func TestScheduler(t *testing.T) {
|
|||||||
|
|
||||||
for i, item := range table {
|
for i, item := range table {
|
||||||
var gotError error
|
var gotError error
|
||||||
var gotPod *api.Pod
|
var gotPod *v1.Pod
|
||||||
var gotAssumedPod *api.Pod
|
var gotAssumedPod *v1.Pod
|
||||||
var gotBinding *api.Binding
|
var gotBinding *v1.Binding
|
||||||
c := &Config{
|
c := &Config{
|
||||||
SchedulerCache: &schedulertesting.FakeCache{
|
SchedulerCache: &schedulertesting.FakeCache{
|
||||||
AssumeFunc: func(pod *api.Pod) {
|
AssumeFunc: func(pod *v1.Pod) {
|
||||||
gotAssumedPod = pod
|
gotAssumedPod = pod
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
NodeLister: algorithm.FakeNodeLister(
|
NodeLister: algorithm.FakeNodeLister(
|
||||||
[]*api.Node{&testNode},
|
[]*v1.Node{&testNode},
|
||||||
),
|
),
|
||||||
Algorithm: item.algo,
|
Algorithm: item.algo,
|
||||||
Binder: fakeBinder{func(b *api.Binding) error {
|
Binder: fakeBinder{func(b *v1.Binding) error {
|
||||||
gotBinding = b
|
gotBinding = b
|
||||||
return item.injectBindError
|
return item.injectBindError
|
||||||
}},
|
}},
|
||||||
PodConditionUpdater: fakePodConditionUpdater{},
|
PodConditionUpdater: fakePodConditionUpdater{},
|
||||||
Error: func(p *api.Pod, err error) {
|
Error: func(p *v1.Pod, err error) {
|
||||||
gotPod = p
|
gotPod = p
|
||||||
gotError = err
|
gotError = err
|
||||||
},
|
},
|
||||||
NextPod: func() *api.Pod {
|
NextPod: func() *v1.Pod {
|
||||||
return item.sendPod
|
return item.sendPod
|
||||||
},
|
},
|
||||||
Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}),
|
Recorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "scheduler"}),
|
||||||
}
|
}
|
||||||
s := New(c)
|
s := New(c)
|
||||||
called := make(chan struct{})
|
called := make(chan struct{})
|
||||||
events := eventBroadcaster.StartEventWatcher(func(e *api.Event) {
|
events := eventBroadcaster.StartEventWatcher(func(e *v1.Event) {
|
||||||
if e, a := item.eventReason, e.Reason; e != a {
|
if e, a := item.eventReason, e.Reason; e != a {
|
||||||
t.Errorf("%v: expected %v, got %v", i, e, a)
|
t.Errorf("%v: expected %v, got %v", i, e, a)
|
||||||
}
|
}
|
||||||
@ -185,9 +185,9 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
|
|||||||
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
||||||
scache := schedulercache.New(100*time.Millisecond, stop)
|
scache := schedulercache.New(100*time.Millisecond, stop)
|
||||||
pod := podWithPort("pod.Name", "", 8080)
|
pod := podWithPort("pod.Name", "", 8080)
|
||||||
node := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine1"}}
|
node := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "machine1"}}
|
||||||
scache.AddNode(&node)
|
scache.AddNode(&node)
|
||||||
nodeLister := algorithm.FakeNodeLister([]*api.Node{&node})
|
nodeLister := algorithm.FakeNodeLister([]*v1.Node{&node})
|
||||||
predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
|
predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
|
||||||
scheduler, bindingChan, _ := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, nodeLister, predicateMap, pod, &node)
|
scheduler, bindingChan, _ := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, nodeLister, predicateMap, pod, &node)
|
||||||
|
|
||||||
@ -225,9 +225,9 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) {
|
|||||||
scheduler.scheduleOne()
|
scheduler.scheduleOne()
|
||||||
select {
|
select {
|
||||||
case b := <-bindingChan:
|
case b := <-bindingChan:
|
||||||
expectBinding := &api.Binding{
|
expectBinding := &v1.Binding{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "bar"},
|
ObjectMeta: v1.ObjectMeta{Name: "bar"},
|
||||||
Target: api.ObjectReference{Kind: "Node", Name: node.Name},
|
Target: v1.ObjectReference{Kind: "Node", Name: node.Name},
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(expectBinding, b) {
|
if !reflect.DeepEqual(expectBinding, b) {
|
||||||
t.Errorf("binding want=%v, get=%v", expectBinding, b)
|
t.Errorf("binding want=%v, get=%v", expectBinding, b)
|
||||||
@ -243,9 +243,9 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
|
|||||||
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc)
|
||||||
scache := schedulercache.New(10*time.Minute, stop)
|
scache := schedulercache.New(10*time.Minute, stop)
|
||||||
firstPod := podWithPort("pod.Name", "", 8080)
|
firstPod := podWithPort("pod.Name", "", 8080)
|
||||||
node := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine1"}}
|
node := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "machine1"}}
|
||||||
scache.AddNode(&node)
|
scache.AddNode(&node)
|
||||||
nodeLister := algorithm.FakeNodeLister([]*api.Node{&node})
|
nodeLister := algorithm.FakeNodeLister([]*v1.Node{&node})
|
||||||
predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
|
predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts}
|
||||||
scheduler, bindingChan, errChan := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, nodeLister, predicateMap, firstPod, &node)
|
scheduler, bindingChan, errChan := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, nodeLister, predicateMap, firstPod, &node)
|
||||||
|
|
||||||
@ -285,9 +285,9 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
|
|||||||
scheduler.scheduleOne()
|
scheduler.scheduleOne()
|
||||||
select {
|
select {
|
||||||
case b := <-bindingChan:
|
case b := <-bindingChan:
|
||||||
expectBinding := &api.Binding{
|
expectBinding := &v1.Binding{
|
||||||
ObjectMeta: api.ObjectMeta{Name: "bar"},
|
ObjectMeta: v1.ObjectMeta{Name: "bar"},
|
||||||
Target: api.ObjectReference{Kind: "Node", Name: node.Name},
|
Target: v1.ObjectReference{Kind: "Node", Name: node.Name},
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(expectBinding, b) {
|
if !reflect.DeepEqual(expectBinding, b) {
|
||||||
t.Errorf("binding want=%v, get=%v", expectBinding, b)
|
t.Errorf("binding want=%v, get=%v", expectBinding, b)
|
||||||
@ -300,7 +300,7 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) {
|
|||||||
// queuedPodStore: pods queued before processing.
|
// queuedPodStore: pods queued before processing.
|
||||||
// cache: scheduler cache that might contain assumed pods.
|
// cache: scheduler cache that might contain assumed pods.
|
||||||
func setupTestSchedulerWithOnePodOnNode(t *testing.T, queuedPodStore *clientcache.FIFO, scache schedulercache.Cache,
|
func setupTestSchedulerWithOnePodOnNode(t *testing.T, queuedPodStore *clientcache.FIFO, scache schedulercache.Cache,
|
||||||
nodeLister algorithm.FakeNodeLister, predicateMap map[string]algorithm.FitPredicate, pod *api.Pod, node *api.Node) (*Scheduler, chan *api.Binding, chan error) {
|
nodeLister algorithm.FakeNodeLister, predicateMap map[string]algorithm.FitPredicate, pod *v1.Pod, node *v1.Node) (*Scheduler, chan *v1.Binding, chan error) {
|
||||||
|
|
||||||
scheduler, bindingChan, errChan := setupTestScheduler(queuedPodStore, scache, nodeLister, predicateMap)
|
scheduler, bindingChan, errChan := setupTestScheduler(queuedPodStore, scache, nodeLister, predicateMap)
|
||||||
|
|
||||||
@ -314,9 +314,9 @@ func setupTestSchedulerWithOnePodOnNode(t *testing.T, queuedPodStore *clientcach
|
|||||||
|
|
||||||
select {
|
select {
|
||||||
case b := <-bindingChan:
|
case b := <-bindingChan:
|
||||||
expectBinding := &api.Binding{
|
expectBinding := &v1.Binding{
|
||||||
ObjectMeta: api.ObjectMeta{Name: pod.Name},
|
ObjectMeta: v1.ObjectMeta{Name: pod.Name},
|
||||||
Target: api.ObjectReference{Kind: "Node", Name: node.Name},
|
Target: v1.ObjectReference{Kind: "Node", Name: node.Name},
|
||||||
}
|
}
|
||||||
if !reflect.DeepEqual(expectBinding, b) {
|
if !reflect.DeepEqual(expectBinding, b) {
|
||||||
t.Errorf("binding want=%v, get=%v", expectBinding, b)
|
t.Errorf("binding want=%v, get=%v", expectBinding, b)
|
||||||
@ -336,29 +336,29 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
|
|||||||
// Design the baseline for the pods, and we will make nodes that dont fit it later.
|
// Design the baseline for the pods, and we will make nodes that dont fit it later.
|
||||||
var cpu = int64(4)
|
var cpu = int64(4)
|
||||||
var mem = int64(500)
|
var mem = int64(500)
|
||||||
podWithTooBigResourceRequests := podWithResources("bar", "", api.ResourceList{
|
podWithTooBigResourceRequests := podWithResources("bar", "", v1.ResourceList{
|
||||||
api.ResourceCPU: *(resource.NewQuantity(cpu, resource.DecimalSI)),
|
v1.ResourceCPU: *(resource.NewQuantity(cpu, resource.DecimalSI)),
|
||||||
api.ResourceMemory: *(resource.NewQuantity(mem, resource.DecimalSI)),
|
v1.ResourceMemory: *(resource.NewQuantity(mem, resource.DecimalSI)),
|
||||||
}, api.ResourceList{
|
}, v1.ResourceList{
|
||||||
api.ResourceCPU: *(resource.NewQuantity(cpu, resource.DecimalSI)),
|
v1.ResourceCPU: *(resource.NewQuantity(cpu, resource.DecimalSI)),
|
||||||
api.ResourceMemory: *(resource.NewQuantity(mem, resource.DecimalSI)),
|
v1.ResourceMemory: *(resource.NewQuantity(mem, resource.DecimalSI)),
|
||||||
})
|
})
|
||||||
|
|
||||||
// create several nodes which cannot schedule the above pod
|
// create several nodes which cannot schedule the above pod
|
||||||
nodes := []*api.Node{}
|
nodes := []*v1.Node{}
|
||||||
for i := 0; i < 100; i++ {
|
for i := 0; i < 100; i++ {
|
||||||
node := api.Node{
|
node := v1.Node{
|
||||||
ObjectMeta: api.ObjectMeta{Name: fmt.Sprintf("machine%v", i)},
|
ObjectMeta: v1.ObjectMeta{Name: fmt.Sprintf("machine%v", i)},
|
||||||
Status: api.NodeStatus{
|
Status: v1.NodeStatus{
|
||||||
Capacity: api.ResourceList{
|
Capacity: v1.ResourceList{
|
||||||
api.ResourceCPU: *(resource.NewQuantity(cpu/2, resource.DecimalSI)),
|
v1.ResourceCPU: *(resource.NewQuantity(cpu/2, resource.DecimalSI)),
|
||||||
api.ResourceMemory: *(resource.NewQuantity(mem/5, resource.DecimalSI)),
|
v1.ResourceMemory: *(resource.NewQuantity(mem/5, resource.DecimalSI)),
|
||||||
api.ResourcePods: *(resource.NewQuantity(10, resource.DecimalSI)),
|
v1.ResourcePods: *(resource.NewQuantity(10, resource.DecimalSI)),
|
||||||
},
|
},
|
||||||
Allocatable: api.ResourceList{
|
Allocatable: v1.ResourceList{
|
||||||
api.ResourceCPU: *(resource.NewQuantity(cpu/2, resource.DecimalSI)),
|
v1.ResourceCPU: *(resource.NewQuantity(cpu/2, resource.DecimalSI)),
|
||||||
api.ResourceMemory: *(resource.NewQuantity(mem/5, resource.DecimalSI)),
|
v1.ResourceMemory: *(resource.NewQuantity(mem/5, resource.DecimalSI)),
|
||||||
api.ResourcePods: *(resource.NewQuantity(10, resource.DecimalSI)),
|
v1.ResourcePods: *(resource.NewQuantity(10, resource.DecimalSI)),
|
||||||
}},
|
}},
|
||||||
}
|
}
|
||||||
scache.AddNode(&node)
|
scache.AddNode(&node)
|
||||||
@ -373,8 +373,8 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
|
|||||||
failedPredicatesMap := FailedPredicateMap{}
|
failedPredicatesMap := FailedPredicateMap{}
|
||||||
for _, node := range nodes {
|
for _, node := range nodes {
|
||||||
failedPredicatesMap[node.Name] = []algorithm.PredicateFailureReason{
|
failedPredicatesMap[node.Name] = []algorithm.PredicateFailureReason{
|
||||||
predicates.NewInsufficientResourceError(api.ResourceCPU, 4000, 0, 2000),
|
predicates.NewInsufficientResourceError(v1.ResourceCPU, 4000, 0, 2000),
|
||||||
predicates.NewInsufficientResourceError(api.ResourceMemory, 500, 0, 100),
|
predicates.NewInsufficientResourceError(v1.ResourceMemory, 500, 0, 100),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
scheduler, _, errChan := setupTestScheduler(queuedPodStore, scache, nodeLister, predicateMap)
|
scheduler, _, errChan := setupTestScheduler(queuedPodStore, scache, nodeLister, predicateMap)
|
||||||
@ -400,7 +400,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) {
|
|||||||
|
|
||||||
// queuedPodStore: pods queued before processing.
|
// queuedPodStore: pods queued before processing.
|
||||||
// scache: scheduler cache that might contain assumed pods.
|
// scache: scheduler cache that might contain assumed pods.
|
||||||
func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulercache.Cache, nodeLister algorithm.FakeNodeLister, predicateMap map[string]algorithm.FitPredicate) (*Scheduler, chan *api.Binding, chan error) {
|
func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulercache.Cache, nodeLister algorithm.FakeNodeLister, predicateMap map[string]algorithm.FitPredicate) (*Scheduler, chan *v1.Binding, chan error) {
|
||||||
algo := NewGenericScheduler(
|
algo := NewGenericScheduler(
|
||||||
scache,
|
scache,
|
||||||
predicateMap,
|
predicateMap,
|
||||||
@ -408,20 +408,20 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulercache.
|
|||||||
[]algorithm.PriorityConfig{},
|
[]algorithm.PriorityConfig{},
|
||||||
algorithm.EmptyMetadataProducer,
|
algorithm.EmptyMetadataProducer,
|
||||||
[]algorithm.SchedulerExtender{})
|
[]algorithm.SchedulerExtender{})
|
||||||
bindingChan := make(chan *api.Binding, 1)
|
bindingChan := make(chan *v1.Binding, 1)
|
||||||
errChan := make(chan error, 1)
|
errChan := make(chan error, 1)
|
||||||
cfg := &Config{
|
cfg := &Config{
|
||||||
SchedulerCache: scache,
|
SchedulerCache: scache,
|
||||||
NodeLister: nodeLister,
|
NodeLister: nodeLister,
|
||||||
Algorithm: algo,
|
Algorithm: algo,
|
||||||
Binder: fakeBinder{func(b *api.Binding) error {
|
Binder: fakeBinder{func(b *v1.Binding) error {
|
||||||
bindingChan <- b
|
bindingChan <- b
|
||||||
return nil
|
return nil
|
||||||
}},
|
}},
|
||||||
NextPod: func() *api.Pod {
|
NextPod: func() *v1.Pod {
|
||||||
return clientcache.Pop(queuedPodStore).(*api.Pod)
|
return clientcache.Pop(queuedPodStore).(*v1.Pod)
|
||||||
},
|
},
|
||||||
Error: func(p *api.Pod, err error) {
|
Error: func(p *v1.Pod, err error) {
|
||||||
errChan <- err
|
errChan <- err
|
||||||
},
|
},
|
||||||
Recorder: &record.FakeRecorder{},
|
Recorder: &record.FakeRecorder{},
|
||||||
|
@ -22,7 +22,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
"k8s.io/kubernetes/pkg/util/wait"
|
"k8s.io/kubernetes/pkg/util/wait"
|
||||||
)
|
)
|
||||||
@ -57,7 +57,7 @@ type schedulerCache struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type podState struct {
|
type podState struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
// Used by assumedPod to determinate expiration.
|
// Used by assumedPod to determinate expiration.
|
||||||
deadline *time.Time
|
deadline *time.Time
|
||||||
}
|
}
|
||||||
@ -90,10 +90,10 @@ func (cache *schedulerCache) UpdateNodeNameToInfoMap(nodeNameToInfo map[string]*
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *schedulerCache) List(selector labels.Selector) ([]*api.Pod, error) {
|
func (cache *schedulerCache) List(selector labels.Selector) ([]*v1.Pod, error) {
|
||||||
cache.mu.Lock()
|
cache.mu.Lock()
|
||||||
defer cache.mu.Unlock()
|
defer cache.mu.Unlock()
|
||||||
var pods []*api.Pod
|
var pods []*v1.Pod
|
||||||
for _, info := range cache.nodes {
|
for _, info := range cache.nodes {
|
||||||
for _, pod := range info.pods {
|
for _, pod := range info.pods {
|
||||||
if selector.Matches(labels.Set(pod.Labels)) {
|
if selector.Matches(labels.Set(pod.Labels)) {
|
||||||
@ -104,12 +104,12 @@ func (cache *schedulerCache) List(selector labels.Selector) ([]*api.Pod, error)
|
|||||||
return pods, nil
|
return pods, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *schedulerCache) AssumePod(pod *api.Pod) error {
|
func (cache *schedulerCache) AssumePod(pod *v1.Pod) error {
|
||||||
return cache.assumePod(pod, time.Now())
|
return cache.assumePod(pod, time.Now())
|
||||||
}
|
}
|
||||||
|
|
||||||
// assumePod exists for making test deterministic by taking time as input argument.
|
// assumePod exists for making test deterministic by taking time as input argument.
|
||||||
func (cache *schedulerCache) assumePod(pod *api.Pod, now time.Time) error {
|
func (cache *schedulerCache) assumePod(pod *v1.Pod, now time.Time) error {
|
||||||
cache.mu.Lock()
|
cache.mu.Lock()
|
||||||
defer cache.mu.Unlock()
|
defer cache.mu.Unlock()
|
||||||
|
|
||||||
@ -132,7 +132,7 @@ func (cache *schedulerCache) assumePod(pod *api.Pod, now time.Time) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *schedulerCache) ForgetPod(pod *api.Pod) error {
|
func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error {
|
||||||
key, err := getPodKey(pod)
|
key, err := getPodKey(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -157,7 +157,7 @@ func (cache *schedulerCache) ForgetPod(pod *api.Pod) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *schedulerCache) AddPod(pod *api.Pod) error {
|
func (cache *schedulerCache) AddPod(pod *v1.Pod) error {
|
||||||
key, err := getPodKey(pod)
|
key, err := getPodKey(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -184,7 +184,7 @@ func (cache *schedulerCache) AddPod(pod *api.Pod) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *schedulerCache) UpdatePod(oldPod, newPod *api.Pod) error {
|
func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error {
|
||||||
key, err := getPodKey(oldPod)
|
key, err := getPodKey(oldPod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -207,7 +207,7 @@ func (cache *schedulerCache) UpdatePod(oldPod, newPod *api.Pod) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *schedulerCache) updatePod(oldPod, newPod *api.Pod) error {
|
func (cache *schedulerCache) updatePod(oldPod, newPod *v1.Pod) error {
|
||||||
if err := cache.removePod(oldPod); err != nil {
|
if err := cache.removePod(oldPod); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -215,7 +215,7 @@ func (cache *schedulerCache) updatePod(oldPod, newPod *api.Pod) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *schedulerCache) addPod(pod *api.Pod) {
|
func (cache *schedulerCache) addPod(pod *v1.Pod) {
|
||||||
n, ok := cache.nodes[pod.Spec.NodeName]
|
n, ok := cache.nodes[pod.Spec.NodeName]
|
||||||
if !ok {
|
if !ok {
|
||||||
n = NewNodeInfo()
|
n = NewNodeInfo()
|
||||||
@ -224,7 +224,7 @@ func (cache *schedulerCache) addPod(pod *api.Pod) {
|
|||||||
n.addPod(pod)
|
n.addPod(pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *schedulerCache) removePod(pod *api.Pod) error {
|
func (cache *schedulerCache) removePod(pod *v1.Pod) error {
|
||||||
n := cache.nodes[pod.Spec.NodeName]
|
n := cache.nodes[pod.Spec.NodeName]
|
||||||
if err := n.removePod(pod); err != nil {
|
if err := n.removePod(pod); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -235,7 +235,7 @@ func (cache *schedulerCache) removePod(pod *api.Pod) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *schedulerCache) RemovePod(pod *api.Pod) error {
|
func (cache *schedulerCache) RemovePod(pod *v1.Pod) error {
|
||||||
key, err := getPodKey(pod)
|
key, err := getPodKey(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -260,7 +260,7 @@ func (cache *schedulerCache) RemovePod(pod *api.Pod) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *schedulerCache) AddNode(node *api.Node) error {
|
func (cache *schedulerCache) AddNode(node *v1.Node) error {
|
||||||
cache.mu.Lock()
|
cache.mu.Lock()
|
||||||
defer cache.mu.Unlock()
|
defer cache.mu.Unlock()
|
||||||
|
|
||||||
@ -272,7 +272,7 @@ func (cache *schedulerCache) AddNode(node *api.Node) error {
|
|||||||
return n.SetNode(node)
|
return n.SetNode(node)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *schedulerCache) UpdateNode(oldNode, newNode *api.Node) error {
|
func (cache *schedulerCache) UpdateNode(oldNode, newNode *v1.Node) error {
|
||||||
cache.mu.Lock()
|
cache.mu.Lock()
|
||||||
defer cache.mu.Unlock()
|
defer cache.mu.Unlock()
|
||||||
|
|
||||||
@ -284,7 +284,7 @@ func (cache *schedulerCache) UpdateNode(oldNode, newNode *api.Node) error {
|
|||||||
return n.SetNode(newNode)
|
return n.SetNode(newNode)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *schedulerCache) RemoveNode(node *api.Node) error {
|
func (cache *schedulerCache) RemoveNode(node *v1.Node) error {
|
||||||
cache.mu.Lock()
|
cache.mu.Lock()
|
||||||
defer cache.mu.Unlock()
|
defer cache.mu.Unlock()
|
||||||
|
|
||||||
|
@ -22,8 +22,8 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
||||||
)
|
)
|
||||||
@ -42,19 +42,19 @@ func deepEqualWithoutGeneration(t *testing.T, testcase int, actual, expected *No
|
|||||||
// on node level.
|
// on node level.
|
||||||
func TestAssumePodScheduled(t *testing.T) {
|
func TestAssumePodScheduled(t *testing.T) {
|
||||||
nodeName := "node"
|
nodeName := "node"
|
||||||
testPods := []*api.Pod{
|
testPods := []*v1.Pod{
|
||||||
makeBasePod(nodeName, "test", "100m", "500", []api.ContainerPort{{HostPort: 80}}),
|
makeBasePod(nodeName, "test", "100m", "500", []v1.ContainerPort{{HostPort: 80}}),
|
||||||
makeBasePod(nodeName, "test-1", "100m", "500", []api.ContainerPort{{HostPort: 80}}),
|
makeBasePod(nodeName, "test-1", "100m", "500", []v1.ContainerPort{{HostPort: 80}}),
|
||||||
makeBasePod(nodeName, "test-2", "200m", "1Ki", []api.ContainerPort{{HostPort: 8080}}),
|
makeBasePod(nodeName, "test-2", "200m", "1Ki", []v1.ContainerPort{{HostPort: 8080}}),
|
||||||
makeBasePod(nodeName, "test-nonzero", "", "", []api.ContainerPort{{HostPort: 80}}),
|
makeBasePod(nodeName, "test-nonzero", "", "", []v1.ContainerPort{{HostPort: 80}}),
|
||||||
}
|
}
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pods []*api.Pod
|
pods []*v1.Pod
|
||||||
|
|
||||||
wNodeInfo *NodeInfo
|
wNodeInfo *NodeInfo
|
||||||
}{{
|
}{{
|
||||||
pods: []*api.Pod{testPods[0]},
|
pods: []*v1.Pod{testPods[0]},
|
||||||
wNodeInfo: &NodeInfo{
|
wNodeInfo: &NodeInfo{
|
||||||
requestedResource: &Resource{
|
requestedResource: &Resource{
|
||||||
MilliCPU: 100,
|
MilliCPU: 100,
|
||||||
@ -65,10 +65,10 @@ func TestAssumePodScheduled(t *testing.T) {
|
|||||||
Memory: 500,
|
Memory: 500,
|
||||||
},
|
},
|
||||||
allocatableResource: &Resource{},
|
allocatableResource: &Resource{},
|
||||||
pods: []*api.Pod{testPods[0]},
|
pods: []*v1.Pod{testPods[0]},
|
||||||
},
|
},
|
||||||
}, {
|
}, {
|
||||||
pods: []*api.Pod{testPods[1], testPods[2]},
|
pods: []*v1.Pod{testPods[1], testPods[2]},
|
||||||
wNodeInfo: &NodeInfo{
|
wNodeInfo: &NodeInfo{
|
||||||
requestedResource: &Resource{
|
requestedResource: &Resource{
|
||||||
MilliCPU: 300,
|
MilliCPU: 300,
|
||||||
@ -79,10 +79,10 @@ func TestAssumePodScheduled(t *testing.T) {
|
|||||||
Memory: 1524,
|
Memory: 1524,
|
||||||
},
|
},
|
||||||
allocatableResource: &Resource{},
|
allocatableResource: &Resource{},
|
||||||
pods: []*api.Pod{testPods[1], testPods[2]},
|
pods: []*v1.Pod{testPods[1], testPods[2]},
|
||||||
},
|
},
|
||||||
}, { // test non-zero request
|
}, { // test non-zero request
|
||||||
pods: []*api.Pod{testPods[3]},
|
pods: []*v1.Pod{testPods[3]},
|
||||||
wNodeInfo: &NodeInfo{
|
wNodeInfo: &NodeInfo{
|
||||||
requestedResource: &Resource{
|
requestedResource: &Resource{
|
||||||
MilliCPU: 0,
|
MilliCPU: 0,
|
||||||
@ -93,7 +93,7 @@ func TestAssumePodScheduled(t *testing.T) {
|
|||||||
Memory: priorityutil.DefaultMemoryRequest,
|
Memory: priorityutil.DefaultMemoryRequest,
|
||||||
},
|
},
|
||||||
allocatableResource: &Resource{},
|
allocatableResource: &Resource{},
|
||||||
pods: []*api.Pod{testPods[3]},
|
pods: []*v1.Pod{testPods[3]},
|
||||||
},
|
},
|
||||||
}}
|
}}
|
||||||
|
|
||||||
@ -119,7 +119,7 @@ func TestAssumePodScheduled(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type testExpirePodStruct struct {
|
type testExpirePodStruct struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
assumedTime time.Time
|
assumedTime time.Time
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -127,9 +127,9 @@ type testExpirePodStruct struct {
|
|||||||
// The removal will be reflected in node info.
|
// The removal will be reflected in node info.
|
||||||
func TestExpirePod(t *testing.T) {
|
func TestExpirePod(t *testing.T) {
|
||||||
nodeName := "node"
|
nodeName := "node"
|
||||||
testPods := []*api.Pod{
|
testPods := []*v1.Pod{
|
||||||
makeBasePod(nodeName, "test-1", "100m", "500", []api.ContainerPort{{HostPort: 80}}),
|
makeBasePod(nodeName, "test-1", "100m", "500", []v1.ContainerPort{{HostPort: 80}}),
|
||||||
makeBasePod(nodeName, "test-2", "200m", "1Ki", []api.ContainerPort{{HostPort: 8080}}),
|
makeBasePod(nodeName, "test-2", "200m", "1Ki", []v1.ContainerPort{{HostPort: 8080}}),
|
||||||
}
|
}
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
ttl := 10 * time.Second
|
ttl := 10 * time.Second
|
||||||
@ -160,7 +160,7 @@ func TestExpirePod(t *testing.T) {
|
|||||||
Memory: 1024,
|
Memory: 1024,
|
||||||
},
|
},
|
||||||
allocatableResource: &Resource{},
|
allocatableResource: &Resource{},
|
||||||
pods: []*api.Pod{testPods[1]},
|
pods: []*v1.Pod{testPods[1]},
|
||||||
},
|
},
|
||||||
}}
|
}}
|
||||||
|
|
||||||
@ -186,18 +186,18 @@ func TestAddPodWillConfirm(t *testing.T) {
|
|||||||
now := time.Now()
|
now := time.Now()
|
||||||
ttl := 10 * time.Second
|
ttl := 10 * time.Second
|
||||||
|
|
||||||
testPods := []*api.Pod{
|
testPods := []*v1.Pod{
|
||||||
makeBasePod(nodeName, "test-1", "100m", "500", []api.ContainerPort{{HostPort: 80}}),
|
makeBasePod(nodeName, "test-1", "100m", "500", []v1.ContainerPort{{HostPort: 80}}),
|
||||||
makeBasePod(nodeName, "test-2", "200m", "1Ki", []api.ContainerPort{{HostPort: 8080}}),
|
makeBasePod(nodeName, "test-2", "200m", "1Ki", []v1.ContainerPort{{HostPort: 8080}}),
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
podsToAssume []*api.Pod
|
podsToAssume []*v1.Pod
|
||||||
podsToAdd []*api.Pod
|
podsToAdd []*v1.Pod
|
||||||
|
|
||||||
wNodeInfo *NodeInfo
|
wNodeInfo *NodeInfo
|
||||||
}{{ // two pod were assumed at same time. But first one is called Add() and gets confirmed.
|
}{{ // two pod were assumed at same time. But first one is called Add() and gets confirmed.
|
||||||
podsToAssume: []*api.Pod{testPods[0], testPods[1]},
|
podsToAssume: []*v1.Pod{testPods[0], testPods[1]},
|
||||||
podsToAdd: []*api.Pod{testPods[0]},
|
podsToAdd: []*v1.Pod{testPods[0]},
|
||||||
wNodeInfo: &NodeInfo{
|
wNodeInfo: &NodeInfo{
|
||||||
requestedResource: &Resource{
|
requestedResource: &Resource{
|
||||||
MilliCPU: 100,
|
MilliCPU: 100,
|
||||||
@ -208,7 +208,7 @@ func TestAddPodWillConfirm(t *testing.T) {
|
|||||||
Memory: 500,
|
Memory: 500,
|
||||||
},
|
},
|
||||||
allocatableResource: &Resource{},
|
allocatableResource: &Resource{},
|
||||||
pods: []*api.Pod{testPods[0]},
|
pods: []*v1.Pod{testPods[0]},
|
||||||
},
|
},
|
||||||
}}
|
}}
|
||||||
|
|
||||||
@ -235,9 +235,9 @@ func TestAddPodWillConfirm(t *testing.T) {
|
|||||||
func TestAddPodAfterExpiration(t *testing.T) {
|
func TestAddPodAfterExpiration(t *testing.T) {
|
||||||
nodeName := "node"
|
nodeName := "node"
|
||||||
ttl := 10 * time.Second
|
ttl := 10 * time.Second
|
||||||
basePod := makeBasePod(nodeName, "test", "100m", "500", []api.ContainerPort{{HostPort: 80}})
|
basePod := makeBasePod(nodeName, "test", "100m", "500", []v1.ContainerPort{{HostPort: 80}})
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
|
|
||||||
wNodeInfo *NodeInfo
|
wNodeInfo *NodeInfo
|
||||||
}{{
|
}{{
|
||||||
@ -252,7 +252,7 @@ func TestAddPodAfterExpiration(t *testing.T) {
|
|||||||
Memory: 500,
|
Memory: 500,
|
||||||
},
|
},
|
||||||
allocatableResource: &Resource{},
|
allocatableResource: &Resource{},
|
||||||
pods: []*api.Pod{basePod},
|
pods: []*v1.Pod{basePod},
|
||||||
},
|
},
|
||||||
}}
|
}}
|
||||||
|
|
||||||
@ -281,19 +281,19 @@ func TestAddPodAfterExpiration(t *testing.T) {
|
|||||||
func TestUpdatePod(t *testing.T) {
|
func TestUpdatePod(t *testing.T) {
|
||||||
nodeName := "node"
|
nodeName := "node"
|
||||||
ttl := 10 * time.Second
|
ttl := 10 * time.Second
|
||||||
testPods := []*api.Pod{
|
testPods := []*v1.Pod{
|
||||||
makeBasePod(nodeName, "test", "100m", "500", []api.ContainerPort{{HostPort: 80}}),
|
makeBasePod(nodeName, "test", "100m", "500", []v1.ContainerPort{{HostPort: 80}}),
|
||||||
makeBasePod(nodeName, "test", "200m", "1Ki", []api.ContainerPort{{HostPort: 8080}}),
|
makeBasePod(nodeName, "test", "200m", "1Ki", []v1.ContainerPort{{HostPort: 8080}}),
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
podsToAssume []*api.Pod
|
podsToAssume []*v1.Pod
|
||||||
podsToAdd []*api.Pod
|
podsToAdd []*v1.Pod
|
||||||
podsToUpdate []*api.Pod
|
podsToUpdate []*v1.Pod
|
||||||
|
|
||||||
wNodeInfo []*NodeInfo
|
wNodeInfo []*NodeInfo
|
||||||
}{{ // add a pod and then update it twice
|
}{{ // add a pod and then update it twice
|
||||||
podsToAdd: []*api.Pod{testPods[0]},
|
podsToAdd: []*v1.Pod{testPods[0]},
|
||||||
podsToUpdate: []*api.Pod{testPods[0], testPods[1], testPods[0]},
|
podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]},
|
||||||
wNodeInfo: []*NodeInfo{{
|
wNodeInfo: []*NodeInfo{{
|
||||||
requestedResource: &Resource{
|
requestedResource: &Resource{
|
||||||
MilliCPU: 200,
|
MilliCPU: 200,
|
||||||
@ -304,7 +304,7 @@ func TestUpdatePod(t *testing.T) {
|
|||||||
Memory: 1024,
|
Memory: 1024,
|
||||||
},
|
},
|
||||||
allocatableResource: &Resource{},
|
allocatableResource: &Resource{},
|
||||||
pods: []*api.Pod{testPods[1]},
|
pods: []*v1.Pod{testPods[1]},
|
||||||
}, {
|
}, {
|
||||||
requestedResource: &Resource{
|
requestedResource: &Resource{
|
||||||
MilliCPU: 100,
|
MilliCPU: 100,
|
||||||
@ -315,7 +315,7 @@ func TestUpdatePod(t *testing.T) {
|
|||||||
Memory: 500,
|
Memory: 500,
|
||||||
},
|
},
|
||||||
allocatableResource: &Resource{},
|
allocatableResource: &Resource{},
|
||||||
pods: []*api.Pod{testPods[0]},
|
pods: []*v1.Pod{testPods[0]},
|
||||||
}},
|
}},
|
||||||
}}
|
}}
|
||||||
|
|
||||||
@ -345,20 +345,20 @@ func TestUpdatePod(t *testing.T) {
|
|||||||
func TestExpireAddUpdatePod(t *testing.T) {
|
func TestExpireAddUpdatePod(t *testing.T) {
|
||||||
nodeName := "node"
|
nodeName := "node"
|
||||||
ttl := 10 * time.Second
|
ttl := 10 * time.Second
|
||||||
testPods := []*api.Pod{
|
testPods := []*v1.Pod{
|
||||||
makeBasePod(nodeName, "test", "100m", "500", []api.ContainerPort{{HostPort: 80}}),
|
makeBasePod(nodeName, "test", "100m", "500", []v1.ContainerPort{{HostPort: 80}}),
|
||||||
makeBasePod(nodeName, "test", "200m", "1Ki", []api.ContainerPort{{HostPort: 8080}}),
|
makeBasePod(nodeName, "test", "200m", "1Ki", []v1.ContainerPort{{HostPort: 8080}}),
|
||||||
}
|
}
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
podsToAssume []*api.Pod
|
podsToAssume []*v1.Pod
|
||||||
podsToAdd []*api.Pod
|
podsToAdd []*v1.Pod
|
||||||
podsToUpdate []*api.Pod
|
podsToUpdate []*v1.Pod
|
||||||
|
|
||||||
wNodeInfo []*NodeInfo
|
wNodeInfo []*NodeInfo
|
||||||
}{{ // Pod is assumed, expired, and added. Then it would be updated twice.
|
}{{ // Pod is assumed, expired, and added. Then it would be updated twice.
|
||||||
podsToAssume: []*api.Pod{testPods[0]},
|
podsToAssume: []*v1.Pod{testPods[0]},
|
||||||
podsToAdd: []*api.Pod{testPods[0]},
|
podsToAdd: []*v1.Pod{testPods[0]},
|
||||||
podsToUpdate: []*api.Pod{testPods[0], testPods[1], testPods[0]},
|
podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]},
|
||||||
wNodeInfo: []*NodeInfo{{
|
wNodeInfo: []*NodeInfo{{
|
||||||
requestedResource: &Resource{
|
requestedResource: &Resource{
|
||||||
MilliCPU: 200,
|
MilliCPU: 200,
|
||||||
@ -369,7 +369,7 @@ func TestExpireAddUpdatePod(t *testing.T) {
|
|||||||
Memory: 1024,
|
Memory: 1024,
|
||||||
},
|
},
|
||||||
allocatableResource: &Resource{},
|
allocatableResource: &Resource{},
|
||||||
pods: []*api.Pod{testPods[1]},
|
pods: []*v1.Pod{testPods[1]},
|
||||||
}, {
|
}, {
|
||||||
requestedResource: &Resource{
|
requestedResource: &Resource{
|
||||||
MilliCPU: 100,
|
MilliCPU: 100,
|
||||||
@ -380,7 +380,7 @@ func TestExpireAddUpdatePod(t *testing.T) {
|
|||||||
Memory: 500,
|
Memory: 500,
|
||||||
},
|
},
|
||||||
allocatableResource: &Resource{},
|
allocatableResource: &Resource{},
|
||||||
pods: []*api.Pod{testPods[0]},
|
pods: []*v1.Pod{testPods[0]},
|
||||||
}},
|
}},
|
||||||
}}
|
}}
|
||||||
|
|
||||||
@ -417,9 +417,9 @@ func TestExpireAddUpdatePod(t *testing.T) {
|
|||||||
// TestRemovePod tests after added pod is removed, its information should also be subtracted.
|
// TestRemovePod tests after added pod is removed, its information should also be subtracted.
|
||||||
func TestRemovePod(t *testing.T) {
|
func TestRemovePod(t *testing.T) {
|
||||||
nodeName := "node"
|
nodeName := "node"
|
||||||
basePod := makeBasePod(nodeName, "test", "100m", "500", []api.ContainerPort{{HostPort: 80}})
|
basePod := makeBasePod(nodeName, "test", "100m", "500", []v1.ContainerPort{{HostPort: 80}})
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *api.Pod
|
pod *v1.Pod
|
||||||
|
|
||||||
wNodeInfo *NodeInfo
|
wNodeInfo *NodeInfo
|
||||||
}{{
|
}{{
|
||||||
@ -434,7 +434,7 @@ func TestRemovePod(t *testing.T) {
|
|||||||
Memory: 500,
|
Memory: 500,
|
||||||
},
|
},
|
||||||
allocatableResource: &Resource{},
|
allocatableResource: &Resource{},
|
||||||
pods: []*api.Pod{basePod},
|
pods: []*v1.Pod{basePod},
|
||||||
},
|
},
|
||||||
}}
|
}}
|
||||||
|
|
||||||
@ -459,11 +459,11 @@ func TestRemovePod(t *testing.T) {
|
|||||||
|
|
||||||
func TestForgetPod(t *testing.T) {
|
func TestForgetPod(t *testing.T) {
|
||||||
nodeName := "node"
|
nodeName := "node"
|
||||||
basePod := makeBasePod(nodeName, "test", "100m", "500", []api.ContainerPort{{HostPort: 80}})
|
basePod := makeBasePod(nodeName, "test", "100m", "500", []v1.ContainerPort{{HostPort: 80}})
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
pods []*api.Pod
|
pods []*v1.Pod
|
||||||
}{{
|
}{{
|
||||||
pods: []*api.Pod{basePod},
|
pods: []*v1.Pod{basePod},
|
||||||
}}
|
}}
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
ttl := 10 * time.Second
|
ttl := 10 * time.Second
|
||||||
@ -517,22 +517,22 @@ func benchmarkExpire(b *testing.B, podNum int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func makeBasePod(nodeName, objName, cpu, mem string, ports []api.ContainerPort) *api.Pod {
|
func makeBasePod(nodeName, objName, cpu, mem string, ports []v1.ContainerPort) *v1.Pod {
|
||||||
req := api.ResourceList{}
|
req := v1.ResourceList{}
|
||||||
if cpu != "" {
|
if cpu != "" {
|
||||||
req = api.ResourceList{
|
req = v1.ResourceList{
|
||||||
api.ResourceCPU: resource.MustParse(cpu),
|
v1.ResourceCPU: resource.MustParse(cpu),
|
||||||
api.ResourceMemory: resource.MustParse(mem),
|
v1.ResourceMemory: resource.MustParse(mem),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return &api.Pod{
|
return &v1.Pod{
|
||||||
ObjectMeta: api.ObjectMeta{
|
ObjectMeta: v1.ObjectMeta{
|
||||||
Namespace: "node_info_cache_test",
|
Namespace: "node_info_cache_test",
|
||||||
Name: objName,
|
Name: objName,
|
||||||
},
|
},
|
||||||
Spec: api.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
Containers: []api.Container{{
|
Containers: []v1.Container{{
|
||||||
Resources: api.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
Requests: req,
|
Requests: req,
|
||||||
},
|
},
|
||||||
Ports: ports,
|
Ports: ports,
|
||||||
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||||||
package schedulercache
|
package schedulercache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -59,29 +59,29 @@ type Cache interface {
|
|||||||
// AssumePod assumes a pod scheduled and aggregates the pod's information into its node.
|
// AssumePod assumes a pod scheduled and aggregates the pod's information into its node.
|
||||||
// The implementation also decides the policy to expire pod before being confirmed (receiving Add event).
|
// The implementation also decides the policy to expire pod before being confirmed (receiving Add event).
|
||||||
// After expiration, its information would be subtracted.
|
// After expiration, its information would be subtracted.
|
||||||
AssumePod(pod *api.Pod) error
|
AssumePod(pod *v1.Pod) error
|
||||||
|
|
||||||
// ForgetPod removes an assumed pod from cache.
|
// ForgetPod removes an assumed pod from cache.
|
||||||
ForgetPod(pod *api.Pod) error
|
ForgetPod(pod *v1.Pod) error
|
||||||
|
|
||||||
// AddPod either confirms a pod if it's assumed, or adds it back if it's expired.
|
// AddPod either confirms a pod if it's assumed, or adds it back if it's expired.
|
||||||
// If added back, the pod's information would be added again.
|
// If added back, the pod's information would be added again.
|
||||||
AddPod(pod *api.Pod) error
|
AddPod(pod *v1.Pod) error
|
||||||
|
|
||||||
// UpdatePod removes oldPod's information and adds newPod's information.
|
// UpdatePod removes oldPod's information and adds newPod's information.
|
||||||
UpdatePod(oldPod, newPod *api.Pod) error
|
UpdatePod(oldPod, newPod *v1.Pod) error
|
||||||
|
|
||||||
// RemovePod removes a pod. The pod's information would be subtracted from assigned node.
|
// RemovePod removes a pod. The pod's information would be subtracted from assigned node.
|
||||||
RemovePod(pod *api.Pod) error
|
RemovePod(pod *v1.Pod) error
|
||||||
|
|
||||||
// AddNode adds overall information about node.
|
// AddNode adds overall information about node.
|
||||||
AddNode(node *api.Node) error
|
AddNode(node *v1.Node) error
|
||||||
|
|
||||||
// UpdateNode updates overall information about node.
|
// UpdateNode updates overall information about node.
|
||||||
UpdateNode(oldNode, newNode *api.Node) error
|
UpdateNode(oldNode, newNode *v1.Node) error
|
||||||
|
|
||||||
// RemoveNode removes overall information about node.
|
// RemoveNode removes overall information about node.
|
||||||
RemoveNode(node *api.Node) error
|
RemoveNode(node *v1.Node) error
|
||||||
|
|
||||||
// UpdateNodeNameToInfoMap updates the passed infoMap to the current contents of Cache.
|
// UpdateNodeNameToInfoMap updates the passed infoMap to the current contents of Cache.
|
||||||
// The node info contains aggregated information of pods scheduled (including assumed to be)
|
// The node info contains aggregated information of pods scheduled (including assumed to be)
|
||||||
@ -89,5 +89,5 @@ type Cache interface {
|
|||||||
UpdateNodeNameToInfoMap(infoMap map[string]*NodeInfo) error
|
UpdateNodeNameToInfoMap(infoMap map[string]*NodeInfo) error
|
||||||
|
|
||||||
// List lists all cached pods (including assumed ones).
|
// List lists all cached pods (including assumed ones).
|
||||||
List(labels.Selector) ([]*api.Pod, error)
|
List(labels.Selector) ([]*v1.Pod, error)
|
||||||
}
|
}
|
||||||
|
@ -21,8 +21,8 @@ import (
|
|||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api"
|
|
||||||
"k8s.io/kubernetes/pkg/api/resource"
|
"k8s.io/kubernetes/pkg/api/resource"
|
||||||
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
clientcache "k8s.io/kubernetes/pkg/client/cache"
|
clientcache "k8s.io/kubernetes/pkg/client/cache"
|
||||||
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
|
||||||
)
|
)
|
||||||
@ -32,10 +32,10 @@ var emptyResource = Resource{}
|
|||||||
// NodeInfo is node level aggregated information.
|
// NodeInfo is node level aggregated information.
|
||||||
type NodeInfo struct {
|
type NodeInfo struct {
|
||||||
// Overall node information.
|
// Overall node information.
|
||||||
node *api.Node
|
node *v1.Node
|
||||||
|
|
||||||
pods []*api.Pod
|
pods []*v1.Pod
|
||||||
podsWithAffinity []*api.Pod
|
podsWithAffinity []*v1.Pod
|
||||||
|
|
||||||
// Total requested resource of all pods on this node.
|
// Total requested resource of all pods on this node.
|
||||||
// It includes assumed pods which scheduler sends binding to apiserver but
|
// It includes assumed pods which scheduler sends binding to apiserver but
|
||||||
@ -59,14 +59,14 @@ type Resource struct {
|
|||||||
MilliCPU int64
|
MilliCPU int64
|
||||||
Memory int64
|
Memory int64
|
||||||
NvidiaGPU int64
|
NvidiaGPU int64
|
||||||
OpaqueIntResources map[api.ResourceName]int64
|
OpaqueIntResources map[v1.ResourceName]int64
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *Resource) ResourceList() api.ResourceList {
|
func (r *Resource) ResourceList() v1.ResourceList {
|
||||||
result := api.ResourceList{
|
result := v1.ResourceList{
|
||||||
api.ResourceCPU: *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI),
|
v1.ResourceCPU: *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI),
|
||||||
api.ResourceMemory: *resource.NewQuantity(r.Memory, resource.BinarySI),
|
v1.ResourceMemory: *resource.NewQuantity(r.Memory, resource.BinarySI),
|
||||||
api.ResourceNvidiaGPU: *resource.NewQuantity(r.NvidiaGPU, resource.DecimalSI),
|
v1.ResourceNvidiaGPU: *resource.NewQuantity(r.NvidiaGPU, resource.DecimalSI),
|
||||||
}
|
}
|
||||||
for rName, rQuant := range r.OpaqueIntResources {
|
for rName, rQuant := range r.OpaqueIntResources {
|
||||||
result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
|
result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI)
|
||||||
@ -77,7 +77,7 @@ func (r *Resource) ResourceList() api.ResourceList {
|
|||||||
// NewNodeInfo returns a ready to use empty NodeInfo object.
|
// NewNodeInfo returns a ready to use empty NodeInfo object.
|
||||||
// If any pods are given in arguments, their information will be aggregated in
|
// If any pods are given in arguments, their information will be aggregated in
|
||||||
// the returned object.
|
// the returned object.
|
||||||
func NewNodeInfo(pods ...*api.Pod) *NodeInfo {
|
func NewNodeInfo(pods ...*v1.Pod) *NodeInfo {
|
||||||
ni := &NodeInfo{
|
ni := &NodeInfo{
|
||||||
requestedResource: &Resource{},
|
requestedResource: &Resource{},
|
||||||
nonzeroRequest: &Resource{},
|
nonzeroRequest: &Resource{},
|
||||||
@ -92,7 +92,7 @@ func NewNodeInfo(pods ...*api.Pod) *NodeInfo {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Returns overall information about this node.
|
// Returns overall information about this node.
|
||||||
func (n *NodeInfo) Node() *api.Node {
|
func (n *NodeInfo) Node() *v1.Node {
|
||||||
if n == nil {
|
if n == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -100,7 +100,7 @@ func (n *NodeInfo) Node() *api.Node {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Pods return all pods scheduled (including assumed to be) on this node.
|
// Pods return all pods scheduled (including assumed to be) on this node.
|
||||||
func (n *NodeInfo) Pods() []*api.Pod {
|
func (n *NodeInfo) Pods() []*v1.Pod {
|
||||||
if n == nil {
|
if n == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -108,7 +108,7 @@ func (n *NodeInfo) Pods() []*api.Pod {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// PodsWithAffinity return all pods with (anti)affinity constraints on this node.
|
// PodsWithAffinity return all pods with (anti)affinity constraints on this node.
|
||||||
func (n *NodeInfo) PodsWithAffinity() []*api.Pod {
|
func (n *NodeInfo) PodsWithAffinity() []*v1.Pod {
|
||||||
if n == nil {
|
if n == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -156,10 +156,10 @@ func (n *NodeInfo) Clone() *NodeInfo {
|
|||||||
generation: n.generation,
|
generation: n.generation,
|
||||||
}
|
}
|
||||||
if len(n.pods) > 0 {
|
if len(n.pods) > 0 {
|
||||||
clone.pods = append([]*api.Pod(nil), n.pods...)
|
clone.pods = append([]*v1.Pod(nil), n.pods...)
|
||||||
}
|
}
|
||||||
if len(n.podsWithAffinity) > 0 {
|
if len(n.podsWithAffinity) > 0 {
|
||||||
clone.podsWithAffinity = append([]*api.Pod(nil), n.podsWithAffinity...)
|
clone.podsWithAffinity = append([]*v1.Pod(nil), n.podsWithAffinity...)
|
||||||
}
|
}
|
||||||
return clone
|
return clone
|
||||||
}
|
}
|
||||||
@ -173,8 +173,8 @@ func (n *NodeInfo) String() string {
|
|||||||
return fmt.Sprintf("&NodeInfo{Pods:%v, RequestedResource:%#v, NonZeroRequest: %#v}", podKeys, n.requestedResource, n.nonzeroRequest)
|
return fmt.Sprintf("&NodeInfo{Pods:%v, RequestedResource:%#v, NonZeroRequest: %#v}", podKeys, n.requestedResource, n.nonzeroRequest)
|
||||||
}
|
}
|
||||||
|
|
||||||
func hasPodAffinityConstraints(pod *api.Pod) bool {
|
func hasPodAffinityConstraints(pod *v1.Pod) bool {
|
||||||
affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations)
|
affinity, err := v1.GetAffinityFromPodAnnotations(pod.Annotations)
|
||||||
if err != nil || affinity == nil {
|
if err != nil || affinity == nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@ -182,14 +182,14 @@ func hasPodAffinityConstraints(pod *api.Pod) bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// addPod adds pod information to this NodeInfo.
|
// addPod adds pod information to this NodeInfo.
|
||||||
func (n *NodeInfo) addPod(pod *api.Pod) {
|
func (n *NodeInfo) addPod(pod *v1.Pod) {
|
||||||
// cpu, mem, nvidia_gpu, non0_cpu, non0_mem := calculateResource(pod)
|
// cpu, mem, nvidia_gpu, non0_cpu, non0_mem := calculateResource(pod)
|
||||||
res, non0_cpu, non0_mem := calculateResource(pod)
|
res, non0_cpu, non0_mem := calculateResource(pod)
|
||||||
n.requestedResource.MilliCPU += res.MilliCPU
|
n.requestedResource.MilliCPU += res.MilliCPU
|
||||||
n.requestedResource.Memory += res.Memory
|
n.requestedResource.Memory += res.Memory
|
||||||
n.requestedResource.NvidiaGPU += res.NvidiaGPU
|
n.requestedResource.NvidiaGPU += res.NvidiaGPU
|
||||||
if n.requestedResource.OpaqueIntResources == nil && len(res.OpaqueIntResources) > 0 {
|
if n.requestedResource.OpaqueIntResources == nil && len(res.OpaqueIntResources) > 0 {
|
||||||
n.requestedResource.OpaqueIntResources = map[api.ResourceName]int64{}
|
n.requestedResource.OpaqueIntResources = map[v1.ResourceName]int64{}
|
||||||
}
|
}
|
||||||
for rName, rQuant := range res.OpaqueIntResources {
|
for rName, rQuant := range res.OpaqueIntResources {
|
||||||
n.requestedResource.OpaqueIntResources[rName] += rQuant
|
n.requestedResource.OpaqueIntResources[rName] += rQuant
|
||||||
@ -204,7 +204,7 @@ func (n *NodeInfo) addPod(pod *api.Pod) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// removePod subtracts pod information to this NodeInfo.
|
// removePod subtracts pod information to this NodeInfo.
|
||||||
func (n *NodeInfo) removePod(pod *api.Pod) error {
|
func (n *NodeInfo) removePod(pod *v1.Pod) error {
|
||||||
k1, err := getPodKey(pod)
|
k1, err := getPodKey(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -240,7 +240,7 @@ func (n *NodeInfo) removePod(pod *api.Pod) error {
|
|||||||
n.requestedResource.Memory -= res.Memory
|
n.requestedResource.Memory -= res.Memory
|
||||||
n.requestedResource.NvidiaGPU -= res.NvidiaGPU
|
n.requestedResource.NvidiaGPU -= res.NvidiaGPU
|
||||||
if len(res.OpaqueIntResources) > 0 && n.requestedResource.OpaqueIntResources == nil {
|
if len(res.OpaqueIntResources) > 0 && n.requestedResource.OpaqueIntResources == nil {
|
||||||
n.requestedResource.OpaqueIntResources = map[api.ResourceName]int64{}
|
n.requestedResource.OpaqueIntResources = map[v1.ResourceName]int64{}
|
||||||
}
|
}
|
||||||
for rName, rQuant := range res.OpaqueIntResources {
|
for rName, rQuant := range res.OpaqueIntResources {
|
||||||
n.requestedResource.OpaqueIntResources[rName] -= rQuant
|
n.requestedResource.OpaqueIntResources[rName] -= rQuant
|
||||||
@ -254,21 +254,21 @@ func (n *NodeInfo) removePod(pod *api.Pod) error {
|
|||||||
return fmt.Errorf("no corresponding pod %s in pods of node %s", pod.Name, n.node.Name)
|
return fmt.Errorf("no corresponding pod %s in pods of node %s", pod.Name, n.node.Name)
|
||||||
}
|
}
|
||||||
|
|
||||||
func calculateResource(pod *api.Pod) (res Resource, non0_cpu int64, non0_mem int64) {
|
func calculateResource(pod *v1.Pod) (res Resource, non0_cpu int64, non0_mem int64) {
|
||||||
for _, c := range pod.Spec.Containers {
|
for _, c := range pod.Spec.Containers {
|
||||||
for rName, rQuant := range c.Resources.Requests {
|
for rName, rQuant := range c.Resources.Requests {
|
||||||
switch rName {
|
switch rName {
|
||||||
case api.ResourceCPU:
|
case v1.ResourceCPU:
|
||||||
res.MilliCPU += rQuant.MilliValue()
|
res.MilliCPU += rQuant.MilliValue()
|
||||||
case api.ResourceMemory:
|
case v1.ResourceMemory:
|
||||||
res.Memory += rQuant.Value()
|
res.Memory += rQuant.Value()
|
||||||
case api.ResourceNvidiaGPU:
|
case v1.ResourceNvidiaGPU:
|
||||||
res.NvidiaGPU += rQuant.Value()
|
res.NvidiaGPU += rQuant.Value()
|
||||||
default:
|
default:
|
||||||
if api.IsOpaqueIntResourceName(rName) {
|
if v1.IsOpaqueIntResourceName(rName) {
|
||||||
// Lazily allocate opaque resource map.
|
// Lazily allocate opaque resource map.
|
||||||
if res.OpaqueIntResources == nil {
|
if res.OpaqueIntResources == nil {
|
||||||
res.OpaqueIntResources = map[api.ResourceName]int64{}
|
res.OpaqueIntResources = map[v1.ResourceName]int64{}
|
||||||
}
|
}
|
||||||
res.OpaqueIntResources[rName] += rQuant.Value()
|
res.OpaqueIntResources[rName] += rQuant.Value()
|
||||||
}
|
}
|
||||||
@ -284,23 +284,23 @@ func calculateResource(pod *api.Pod) (res Resource, non0_cpu int64, non0_mem int
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Sets the overall node information.
|
// Sets the overall node information.
|
||||||
func (n *NodeInfo) SetNode(node *api.Node) error {
|
func (n *NodeInfo) SetNode(node *v1.Node) error {
|
||||||
n.node = node
|
n.node = node
|
||||||
for rName, rQuant := range node.Status.Allocatable {
|
for rName, rQuant := range node.Status.Allocatable {
|
||||||
switch rName {
|
switch rName {
|
||||||
case api.ResourceCPU:
|
case v1.ResourceCPU:
|
||||||
n.allocatableResource.MilliCPU = rQuant.MilliValue()
|
n.allocatableResource.MilliCPU = rQuant.MilliValue()
|
||||||
case api.ResourceMemory:
|
case v1.ResourceMemory:
|
||||||
n.allocatableResource.Memory = rQuant.Value()
|
n.allocatableResource.Memory = rQuant.Value()
|
||||||
case api.ResourceNvidiaGPU:
|
case v1.ResourceNvidiaGPU:
|
||||||
n.allocatableResource.NvidiaGPU = rQuant.Value()
|
n.allocatableResource.NvidiaGPU = rQuant.Value()
|
||||||
case api.ResourcePods:
|
case v1.ResourcePods:
|
||||||
n.allowedPodNumber = int(rQuant.Value())
|
n.allowedPodNumber = int(rQuant.Value())
|
||||||
default:
|
default:
|
||||||
if api.IsOpaqueIntResourceName(rName) {
|
if v1.IsOpaqueIntResourceName(rName) {
|
||||||
// Lazily allocate opaque resource map.
|
// Lazily allocate opaque resource map.
|
||||||
if n.allocatableResource.OpaqueIntResources == nil {
|
if n.allocatableResource.OpaqueIntResources == nil {
|
||||||
n.allocatableResource.OpaqueIntResources = map[api.ResourceName]int64{}
|
n.allocatableResource.OpaqueIntResources = map[v1.ResourceName]int64{}
|
||||||
}
|
}
|
||||||
n.allocatableResource.OpaqueIntResources[rName] = rQuant.Value()
|
n.allocatableResource.OpaqueIntResources[rName] = rQuant.Value()
|
||||||
}
|
}
|
||||||
@ -311,7 +311,7 @@ func (n *NodeInfo) SetNode(node *api.Node) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Removes the overall information about the node.
|
// Removes the overall information about the node.
|
||||||
func (n *NodeInfo) RemoveNode(node *api.Node) error {
|
func (n *NodeInfo) RemoveNode(node *v1.Node) error {
|
||||||
// We don't remove NodeInfo for because there can still be some pods on this node -
|
// We don't remove NodeInfo for because there can still be some pods on this node -
|
||||||
// this is because notifications about pods are delivered in a different watch,
|
// this is because notifications about pods are delivered in a different watch,
|
||||||
// and thus can potentially be observed later, even though they happened before
|
// and thus can potentially be observed later, even though they happened before
|
||||||
@ -324,6 +324,6 @@ func (n *NodeInfo) RemoveNode(node *api.Node) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// getPodKey returns the string key of a pod.
|
// getPodKey returns the string key of a pod.
|
||||||
func getPodKey(pod *api.Pod) (string, error) {
|
func getPodKey(pod *v1.Pod) (string, error) {
|
||||||
return clientcache.MetaNamespaceKeyFunc(pod)
|
return clientcache.MetaNamespaceKeyFunc(pod)
|
||||||
}
|
}
|
||||||
|
@ -16,11 +16,11 @@ limitations under the License.
|
|||||||
|
|
||||||
package schedulercache
|
package schedulercache
|
||||||
|
|
||||||
import "k8s.io/kubernetes/pkg/api"
|
import "k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
|
||||||
// CreateNodeNameToInfoMap obtains a list of pods and pivots that list into a map where the keys are node names
|
// CreateNodeNameToInfoMap obtains a list of pods and pivots that list into a map where the keys are node names
|
||||||
// and the values are the aggregated information for that node.
|
// and the values are the aggregated information for that node.
|
||||||
func CreateNodeNameToInfoMap(pods []*api.Pod, nodes []*api.Node) map[string]*NodeInfo {
|
func CreateNodeNameToInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*NodeInfo {
|
||||||
nodeNameToInfo := make(map[string]*NodeInfo)
|
nodeNameToInfo := make(map[string]*NodeInfo)
|
||||||
for _, pod := range pods {
|
for _, pod := range pods {
|
||||||
nodeName := pod.Spec.NodeName
|
nodeName := pod.Spec.NodeName
|
||||||
|
@ -17,37 +17,37 @@ limitations under the License.
|
|||||||
package schedulercache
|
package schedulercache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
// FakeCache is used for testing
|
// FakeCache is used for testing
|
||||||
type FakeCache struct {
|
type FakeCache struct {
|
||||||
AssumeFunc func(*api.Pod)
|
AssumeFunc func(*v1.Pod)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeCache) AssumePod(pod *api.Pod) error {
|
func (f *FakeCache) AssumePod(pod *v1.Pod) error {
|
||||||
f.AssumeFunc(pod)
|
f.AssumeFunc(pod)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeCache) ForgetPod(pod *api.Pod) error { return nil }
|
func (f *FakeCache) ForgetPod(pod *v1.Pod) error { return nil }
|
||||||
|
|
||||||
func (f *FakeCache) AddPod(pod *api.Pod) error { return nil }
|
func (f *FakeCache) AddPod(pod *v1.Pod) error { return nil }
|
||||||
|
|
||||||
func (f *FakeCache) UpdatePod(oldPod, newPod *api.Pod) error { return nil }
|
func (f *FakeCache) UpdatePod(oldPod, newPod *v1.Pod) error { return nil }
|
||||||
|
|
||||||
func (f *FakeCache) RemovePod(pod *api.Pod) error { return nil }
|
func (f *FakeCache) RemovePod(pod *v1.Pod) error { return nil }
|
||||||
|
|
||||||
func (f *FakeCache) AddNode(node *api.Node) error { return nil }
|
func (f *FakeCache) AddNode(node *v1.Node) error { return nil }
|
||||||
|
|
||||||
func (f *FakeCache) UpdateNode(oldNode, newNode *api.Node) error { return nil }
|
func (f *FakeCache) UpdateNode(oldNode, newNode *v1.Node) error { return nil }
|
||||||
|
|
||||||
func (f *FakeCache) RemoveNode(node *api.Node) error { return nil }
|
func (f *FakeCache) RemoveNode(node *v1.Node) error { return nil }
|
||||||
|
|
||||||
func (f *FakeCache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error {
|
func (f *FakeCache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f *FakeCache) List(s labels.Selector) ([]*api.Pod, error) { return nil, nil }
|
func (f *FakeCache) List(s labels.Selector) ([]*v1.Pod, error) { return nil, nil }
|
||||||
|
@ -17,35 +17,35 @@ limitations under the License.
|
|||||||
package schedulercache
|
package schedulercache
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"k8s.io/kubernetes/pkg/api"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
"k8s.io/kubernetes/pkg/labels"
|
"k8s.io/kubernetes/pkg/labels"
|
||||||
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
"k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PodsToCache is used for testing
|
// PodsToCache is used for testing
|
||||||
type PodsToCache []*api.Pod
|
type PodsToCache []*v1.Pod
|
||||||
|
|
||||||
func (p PodsToCache) AssumePod(pod *api.Pod) error { return nil }
|
func (p PodsToCache) AssumePod(pod *v1.Pod) error { return nil }
|
||||||
|
|
||||||
func (p PodsToCache) ForgetPod(pod *api.Pod) error { return nil }
|
func (p PodsToCache) ForgetPod(pod *v1.Pod) error { return nil }
|
||||||
|
|
||||||
func (p PodsToCache) AddPod(pod *api.Pod) error { return nil }
|
func (p PodsToCache) AddPod(pod *v1.Pod) error { return nil }
|
||||||
|
|
||||||
func (p PodsToCache) UpdatePod(oldPod, newPod *api.Pod) error { return nil }
|
func (p PodsToCache) UpdatePod(oldPod, newPod *v1.Pod) error { return nil }
|
||||||
|
|
||||||
func (p PodsToCache) RemovePod(pod *api.Pod) error { return nil }
|
func (p PodsToCache) RemovePod(pod *v1.Pod) error { return nil }
|
||||||
|
|
||||||
func (p PodsToCache) AddNode(node *api.Node) error { return nil }
|
func (p PodsToCache) AddNode(node *v1.Node) error { return nil }
|
||||||
|
|
||||||
func (p PodsToCache) UpdateNode(oldNode, newNode *api.Node) error { return nil }
|
func (p PodsToCache) UpdateNode(oldNode, newNode *v1.Node) error { return nil }
|
||||||
|
|
||||||
func (p PodsToCache) RemoveNode(node *api.Node) error { return nil }
|
func (p PodsToCache) RemoveNode(node *v1.Node) error { return nil }
|
||||||
|
|
||||||
func (p PodsToCache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error {
|
func (p PodsToCache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p PodsToCache) List(s labels.Selector) (selected []*api.Pod, err error) {
|
func (p PodsToCache) List(s labels.Selector) (selected []*v1.Pod, err error) {
|
||||||
for _, pod := range p {
|
for _, pod := range p {
|
||||||
if s.Matches(labels.Set(pod.Labels)) {
|
if s.Matches(labels.Set(pod.Labels)) {
|
||||||
selected = append(selected, pod)
|
selected = append(selected, pod)
|
||||||
|
Loading…
Reference in New Issue
Block a user