From f782aba56eb5dc24f931c103c20ba76bfb320ecf Mon Sep 17 00:00:00 2001 From: Chao Xu Date: Fri, 18 Nov 2016 12:52:35 -0800 Subject: [PATCH] plugin/scheduler --- plugin/cmd/kube-scheduler/app/server.go | 12 +- plugin/pkg/scheduler/algorithm/listers.go | 64 +- .../scheduler/algorithm/predicates/error.go | 6 +- .../algorithm/predicates/metadata.go | 4 +- .../algorithm/predicates/predicates.go | 152 +-- .../algorithm/predicates/predicates_test.go | 1106 ++++++++--------- .../scheduler/algorithm/predicates/utils.go | 10 +- .../algorithm/predicates/utils_test.go | 10 +- .../balanced_resource_allocation.go | 8 +- .../balanced_resource_allocation_test.go | 100 +- .../algorithm/priorities/image_locality.go | 6 +- .../priorities/image_locality_test.go | 46 +- .../algorithm/priorities/interpod_affinity.go | 18 +- .../priorities/interpod_affinity_test.go | 356 +++--- .../algorithm/priorities/least_requested.go | 6 +- .../priorities/least_requested_test.go | 100 +- .../algorithm/priorities/metadata.go | 10 +- .../algorithm/priorities/most_requested.go | 6 +- .../priorities/most_requested_test.go | 64 +- .../algorithm/priorities/node_affinity.go | 12 +- .../priorities/node_affinity_test.go | 58 +- .../algorithm/priorities/node_label.go | 4 +- .../algorithm/priorities/node_label_test.go | 52 +- .../priorities/node_prefer_avoid_pods.go | 6 +- .../priorities/node_prefer_avoid_pods_test.go | 42 +- .../priorities/selector_spreading.go | 12 +- .../priorities/selector_spreading_test.go | 380 +++--- .../algorithm/priorities/taint_toleration.go | 24 +- .../priorities/taint_toleration_test.go | 106 +- .../algorithm/priorities/test_util.go | 16 +- .../algorithm/priorities/util/non_zero.go | 12 +- .../algorithm/priorities/util/topologies.go | 10 +- .../algorithm/priorities/util/util.go | 6 +- .../algorithm/scheduler_interface.go | 8 +- .../algorithm/scheduler_interface_test.go | 8 +- plugin/pkg/scheduler/algorithm/types.go | 16 +- .../defaults/compatibility_test.go | 8 +- .../algorithmprovider/defaults/defaults.go | 6 +- plugin/pkg/scheduler/api/types.go | 8 +- plugin/pkg/scheduler/equivalence_cache.go | 18 +- plugin/pkg/scheduler/extender.go | 16 +- plugin/pkg/scheduler/extender_test.go | 38 +- plugin/pkg/scheduler/factory/factory.go | 115 +- plugin/pkg/scheduler/factory/factory_test.go | 112 +- plugin/pkg/scheduler/generic_scheduler.go | 28 +- .../pkg/scheduler/generic_scheduler_test.go | 122 +- plugin/pkg/scheduler/scheduler.go | 36 +- plugin/pkg/scheduler/scheduler_test.go | 144 +-- plugin/pkg/scheduler/schedulercache/cache.go | 32 +- .../scheduler/schedulercache/cache_test.go | 132 +- .../pkg/scheduler/schedulercache/interface.go | 20 +- .../pkg/scheduler/schedulercache/node_info.go | 74 +- plugin/pkg/scheduler/schedulercache/util.go | 4 +- plugin/pkg/scheduler/testing/fake_cache.go | 22 +- plugin/pkg/scheduler/testing/pods_to_cache.go | 22 +- 55 files changed, 1907 insertions(+), 1906 deletions(-) diff --git a/plugin/cmd/kube-scheduler/app/server.go b/plugin/cmd/kube-scheduler/app/server.go index 766d0be2754..60fdabf90e4 100644 --- a/plugin/cmd/kube-scheduler/app/server.go +++ b/plugin/cmd/kube-scheduler/app/server.go @@ -26,9 +26,9 @@ import ( "os" "strconv" - "k8s.io/kubernetes/pkg/api" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" - unversionedcore "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/typed/core/internalversion" + "k8s.io/kubernetes/pkg/api/v1" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" + v1core "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5/typed/core/v1" "k8s.io/kubernetes/pkg/client/leaderelection" "k8s.io/kubernetes/pkg/client/leaderelection/resourcelock" "k8s.io/kubernetes/pkg/client/record" @@ -122,9 +122,9 @@ func Run(s *options.SchedulerServer) error { } eventBroadcaster := record.NewBroadcaster() - config.Recorder = eventBroadcaster.NewRecorder(api.EventSource{Component: s.SchedulerName}) + config.Recorder = eventBroadcaster.NewRecorder(v1.EventSource{Component: s.SchedulerName}) eventBroadcaster.StartLogging(glog.Infof) - eventBroadcaster.StartRecordingToSink(&unversionedcore.EventSinkImpl{Interface: leaderElectionClient.Core().Events("")}) + eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: leaderElectionClient.Core().Events("")}) sched := scheduler.New(config) @@ -147,7 +147,7 @@ func Run(s *options.SchedulerServer) error { // TODO: enable other lock types rl := resourcelock.EndpointsLock{ - EndpointsMeta: api.ObjectMeta{ + EndpointsMeta: v1.ObjectMeta{ Namespace: "kube-system", Name: "kube-scheduler", }, diff --git a/plugin/pkg/scheduler/algorithm/listers.go b/plugin/pkg/scheduler/algorithm/listers.go index 343a27c7946..78c2c93d3df 100644 --- a/plugin/pkg/scheduler/algorithm/listers.go +++ b/plugin/pkg/scheduler/algorithm/listers.go @@ -19,39 +19,39 @@ package algorithm import ( "fmt" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/api/v1" + extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/labels" ) // NodeLister interface represents anything that can list nodes for a scheduler. type NodeLister interface { - // We explicitly return []*api.Node, instead of api.NodeList, to avoid + // We explicitly return []*v1.Node, instead of v1.NodeList, to avoid // performing expensive copies that are unneded. - List() ([]*api.Node, error) + List() ([]*v1.Node, error) } // FakeNodeLister implements NodeLister on a []string for test purposes. -type FakeNodeLister []*api.Node +type FakeNodeLister []*v1.Node // List returns nodes as a []string. -func (f FakeNodeLister) List() ([]*api.Node, error) { +func (f FakeNodeLister) List() ([]*v1.Node, error) { return f, nil } // PodLister interface represents anything that can list pods for a scheduler. type PodLister interface { - // We explicitly return []*api.Pod, instead of api.PodList, to avoid + // We explicitly return []*v1.Pod, instead of v1.PodList, to avoid // performing expensive copies that are unneded. - List(labels.Selector) ([]*api.Pod, error) + List(labels.Selector) ([]*v1.Pod, error) } -// FakePodLister implements PodLister on an []api.Pods for test purposes. -type FakePodLister []*api.Pod +// FakePodLister implements PodLister on an []v1.Pods for test purposes. +type FakePodLister []*v1.Pod -// List returns []*api.Pod matching a query. -func (f FakePodLister) List(s labels.Selector) (selected []*api.Pod, err error) { +// List returns []*v1.Pod matching a query. +func (f FakePodLister) List(s labels.Selector) (selected []*v1.Pod, err error) { for _, pod := range f { if s.Matches(labels.Set(pod.Labels)) { selected = append(selected, pod) @@ -63,21 +63,21 @@ func (f FakePodLister) List(s labels.Selector) (selected []*api.Pod, err error) // ServiceLister interface represents anything that can produce a list of services; the list is consumed by a scheduler. type ServiceLister interface { // Lists all the services - List(labels.Selector) ([]*api.Service, error) + List(labels.Selector) ([]*v1.Service, error) // Gets the services for the given pod - GetPodServices(*api.Pod) ([]*api.Service, error) + GetPodServices(*v1.Pod) ([]*v1.Service, error) } -// FakeServiceLister implements ServiceLister on []api.Service for test purposes. -type FakeServiceLister []*api.Service +// FakeServiceLister implements ServiceLister on []v1.Service for test purposes. +type FakeServiceLister []*v1.Service -// List returns api.ServiceList, the list of all services. -func (f FakeServiceLister) List(labels.Selector) ([]*api.Service, error) { +// List returns v1.ServiceList, the list of all services. +func (f FakeServiceLister) List(labels.Selector) ([]*v1.Service, error) { return f, nil } // GetPodServices gets the services that have the selector that match the labels on the given pod. -func (f FakeServiceLister) GetPodServices(pod *api.Pod) (services []*api.Service, err error) { +func (f FakeServiceLister) GetPodServices(pod *v1.Pod) (services []*v1.Service, err error) { var selector labels.Selector for i := range f { @@ -97,34 +97,34 @@ func (f FakeServiceLister) GetPodServices(pod *api.Pod) (services []*api.Service // ControllerLister interface represents anything that can produce a list of ReplicationController; the list is consumed by a scheduler. type ControllerLister interface { // Lists all the replication controllers - List(labels.Selector) ([]*api.ReplicationController, error) + List(labels.Selector) ([]*v1.ReplicationController, error) // Gets the services for the given pod - GetPodControllers(*api.Pod) ([]*api.ReplicationController, error) + GetPodControllers(*v1.Pod) ([]*v1.ReplicationController, error) } -// EmptyControllerLister implements ControllerLister on []api.ReplicationController returning empty data +// EmptyControllerLister implements ControllerLister on []v1.ReplicationController returning empty data type EmptyControllerLister struct{} // List returns nil -func (f EmptyControllerLister) List(labels.Selector) ([]*api.ReplicationController, error) { +func (f EmptyControllerLister) List(labels.Selector) ([]*v1.ReplicationController, error) { return nil, nil } // GetPodControllers returns nil -func (f EmptyControllerLister) GetPodControllers(pod *api.Pod) (controllers []*api.ReplicationController, err error) { +func (f EmptyControllerLister) GetPodControllers(pod *v1.Pod) (controllers []*v1.ReplicationController, err error) { return nil, nil } -// FakeControllerLister implements ControllerLister on []api.ReplicationController for test purposes. -type FakeControllerLister []*api.ReplicationController +// FakeControllerLister implements ControllerLister on []v1.ReplicationController for test purposes. +type FakeControllerLister []*v1.ReplicationController -// List returns []api.ReplicationController, the list of all ReplicationControllers. -func (f FakeControllerLister) List(labels.Selector) ([]*api.ReplicationController, error) { +// List returns []v1.ReplicationController, the list of all ReplicationControllers. +func (f FakeControllerLister) List(labels.Selector) ([]*v1.ReplicationController, error) { return f, nil } // GetPodControllers gets the ReplicationControllers that have the selector that match the labels on the given pod -func (f FakeControllerLister) GetPodControllers(pod *api.Pod) (controllers []*api.ReplicationController, err error) { +func (f FakeControllerLister) GetPodControllers(pod *v1.Pod) (controllers []*v1.ReplicationController, err error) { var selector labels.Selector for i := range f { @@ -147,14 +147,14 @@ func (f FakeControllerLister) GetPodControllers(pod *api.Pod) (controllers []*ap // ReplicaSetLister interface represents anything that can produce a list of ReplicaSet; the list is consumed by a scheduler. type ReplicaSetLister interface { // Gets the replicasets for the given pod - GetPodReplicaSets(*api.Pod) ([]*extensions.ReplicaSet, error) + GetPodReplicaSets(*v1.Pod) ([]*extensions.ReplicaSet, error) } // EmptyReplicaSetLister implements ReplicaSetLister on []extensions.ReplicaSet returning empty data type EmptyReplicaSetLister struct{} // GetPodReplicaSets returns nil -func (f EmptyReplicaSetLister) GetPodReplicaSets(pod *api.Pod) (rss []*extensions.ReplicaSet, err error) { +func (f EmptyReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*extensions.ReplicaSet, err error) { return nil, nil } @@ -162,7 +162,7 @@ func (f EmptyReplicaSetLister) GetPodReplicaSets(pod *api.Pod) (rss []*extension type FakeReplicaSetLister []*extensions.ReplicaSet // GetPodReplicaSets gets the ReplicaSets that have the selector that match the labels on the given pod -func (f FakeReplicaSetLister) GetPodReplicaSets(pod *api.Pod) (rss []*extensions.ReplicaSet, err error) { +func (f FakeReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*extensions.ReplicaSet, err error) { var selector labels.Selector for _, rs := range f { diff --git a/plugin/pkg/scheduler/algorithm/predicates/error.go b/plugin/pkg/scheduler/algorithm/predicates/error.go index a71cdb9aae7..61bb249bb9e 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/error.go +++ b/plugin/pkg/scheduler/algorithm/predicates/error.go @@ -19,7 +19,7 @@ package predicates import ( "fmt" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) var ( @@ -46,13 +46,13 @@ var ( // hit and caused the unfitting failure. type InsufficientResourceError struct { // resourceName is the name of the resource that is insufficient - ResourceName api.ResourceName + ResourceName v1.ResourceName requested int64 used int64 capacity int64 } -func NewInsufficientResourceError(resourceName api.ResourceName, requested, used, capacity int64) *InsufficientResourceError { +func NewInsufficientResourceError(resourceName v1.ResourceName, requested, used, capacity int64) *InsufficientResourceError { return &InsufficientResourceError{ ResourceName: resourceName, requested: requested, diff --git a/plugin/pkg/scheduler/algorithm/predicates/metadata.go b/plugin/pkg/scheduler/algorithm/predicates/metadata.go index 7c04d80a4fc..e407ac98af0 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/metadata.go +++ b/plugin/pkg/scheduler/algorithm/predicates/metadata.go @@ -18,7 +18,7 @@ package predicates import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) @@ -35,7 +35,7 @@ func NewPredicateMetadataFactory(podLister algorithm.PodLister) algorithm.Metada } // GetMetadata returns the predicateMetadata used which will be used by various predicates. -func (pfactory *PredicateMetadataFactory) GetMetadata(pod *api.Pod, nodeNameToInfoMap map[string]*schedulercache.NodeInfo) interface{} { +func (pfactory *PredicateMetadataFactory) GetMetadata(pod *v1.Pod, nodeNameToInfoMap map[string]*schedulercache.NodeInfo) interface{} { // If we cannot compute metadata, just return nil if pod == nil { return nil diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates.go b/plugin/pkg/scheduler/algorithm/predicates/predicates.go index e57afcceba0..7665bcc5572 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates.go @@ -24,8 +24,8 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/kubelet/qos" "k8s.io/kubernetes/pkg/labels" @@ -50,15 +50,15 @@ func RegisterPredicatePrecomputation(predicateName string, precomp PredicateMeta // Other types for predicate functions... type NodeInfo interface { - GetNodeInfo(nodeID string) (*api.Node, error) + GetNodeInfo(nodeID string) (*v1.Node, error) } type PersistentVolumeInfo interface { - GetPersistentVolumeInfo(pvID string) (*api.PersistentVolume, error) + GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) } type PersistentVolumeClaimInfo interface { - GetPersistentVolumeClaimInfo(namespace string, name string) (*api.PersistentVolumeClaim, error) + GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error) } // CachedPersistentVolumeClaimInfo implements PersistentVolumeClaimInfo @@ -67,7 +67,7 @@ type CachedPersistentVolumeClaimInfo struct { } // GetPersistentVolumeClaimInfo fetches the claim in specified namespace with specified name -func (c *CachedPersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, name string) (*api.PersistentVolumeClaim, error) { +func (c *CachedPersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, name string) (*v1.PersistentVolumeClaim, error) { return c.PersistentVolumeClaims(namespace).Get(name) } @@ -76,8 +76,8 @@ type CachedNodeInfo struct { } // GetNodeInfo returns cached data for the node 'id'. -func (c *CachedNodeInfo) GetNodeInfo(id string) (*api.Node, error) { - node, exists, err := c.Get(&api.Node{ObjectMeta: api.ObjectMeta{Name: id}}) +func (c *CachedNodeInfo) GetNodeInfo(id string) (*v1.Node, error) { + node, exists, err := c.Get(&v1.Node{ObjectMeta: v1.ObjectMeta{Name: id}}) if err != nil { return nil, fmt.Errorf("error retrieving node '%v' from cache: %v", id, err) @@ -87,27 +87,27 @@ func (c *CachedNodeInfo) GetNodeInfo(id string) (*api.Node, error) { return nil, fmt.Errorf("node '%v' not found", id) } - return node.(*api.Node), nil + return node.(*v1.Node), nil } // Note that predicateMetdata and matchingPodAntiAffinityTerm need to be declared in the same file // due to the way declarations are processed in predicate declaration unit tests. type matchingPodAntiAffinityTerm struct { - term *api.PodAffinityTerm - node *api.Node + term *v1.PodAffinityTerm + node *v1.Node } type predicateMetadata struct { - pod *api.Pod + pod *v1.Pod podBestEffort bool podRequest *schedulercache.Resource podPorts map[int]bool matchingAntiAffinityTerms []matchingPodAntiAffinityTerm - serviceAffinityMatchingPodList []*api.Pod - serviceAffinityMatchingPodServices []*api.Service + serviceAffinityMatchingPodList []*v1.Pod + serviceAffinityMatchingPodServices []*v1.Service } -func isVolumeConflict(volume api.Volume, pod *api.Pod) bool { +func isVolumeConflict(volume v1.Volume, pod *v1.Pod) bool { // fast path if there is no conflict checking targets. if volume.GCEPersistentDisk == nil && volume.AWSElasticBlockStore == nil && volume.RBD == nil { return false @@ -151,7 +151,7 @@ func isVolumeConflict(volume api.Volume, pod *api.Pod) bool { // - AWS EBS forbids any two pods mounting the same volume ID // - Ceph RBD forbids if any two pods share at least same monitor, and match pool and image. // TODO: migrate this into some per-volume specific code? -func NoDiskConflict(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func NoDiskConflict(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { for _, v := range pod.Spec.Volumes { for _, ev := range nodeInfo.Pods() { if isVolumeConflict(v, ev) { @@ -172,8 +172,8 @@ type MaxPDVolumeCountChecker struct { // VolumeFilter contains information on how to filter PD Volumes when checking PD Volume caps type VolumeFilter struct { // Filter normal volumes - FilterVolume func(vol *api.Volume) (id string, relevant bool) - FilterPersistentVolume func(pv *api.PersistentVolume) (id string, relevant bool) + FilterVolume func(vol *v1.Volume) (id string, relevant bool) + FilterPersistentVolume func(pv *v1.PersistentVolume) (id string, relevant bool) } // NewMaxPDVolumeCountPredicate creates a predicate which evaluates whether a pod can fit based on the @@ -194,7 +194,7 @@ func NewMaxPDVolumeCountPredicate(filter VolumeFilter, maxVolumes int, pvInfo Pe return c.predicate } -func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []api.Volume, namespace string, filteredVolumes map[string]bool) error { +func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []v1.Volume, namespace string, filteredVolumes map[string]bool) error { for _, vol := range volumes { if id, ok := c.filter.FilterVolume(&vol); ok { filteredVolumes[id] = true @@ -248,7 +248,7 @@ func (c *MaxPDVolumeCountChecker) filterVolumes(volumes []api.Volume, namespace return nil } -func (c *MaxPDVolumeCountChecker) predicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func (c *MaxPDVolumeCountChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { // If a pod doesn't have any volume attached to it, the predicate will always be true. // Thus we make a fast path for it, to avoid unnecessary computations in this case. if len(pod.Spec.Volumes) == 0 { @@ -293,14 +293,14 @@ func (c *MaxPDVolumeCountChecker) predicate(pod *api.Pod, meta interface{}, node // EBSVolumeFilter is a VolumeFilter for filtering AWS ElasticBlockStore Volumes var EBSVolumeFilter VolumeFilter = VolumeFilter{ - FilterVolume: func(vol *api.Volume) (string, bool) { + FilterVolume: func(vol *v1.Volume) (string, bool) { if vol.AWSElasticBlockStore != nil { return vol.AWSElasticBlockStore.VolumeID, true } return "", false }, - FilterPersistentVolume: func(pv *api.PersistentVolume) (string, bool) { + FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { if pv.Spec.AWSElasticBlockStore != nil { return pv.Spec.AWSElasticBlockStore.VolumeID, true } @@ -310,14 +310,14 @@ var EBSVolumeFilter VolumeFilter = VolumeFilter{ // GCEPDVolumeFilter is a VolumeFilter for filtering GCE PersistentDisk Volumes var GCEPDVolumeFilter VolumeFilter = VolumeFilter{ - FilterVolume: func(vol *api.Volume) (string, bool) { + FilterVolume: func(vol *v1.Volume) (string, bool) { if vol.GCEPersistentDisk != nil { return vol.GCEPersistentDisk.PDName, true } return "", false }, - FilterPersistentVolume: func(pv *api.PersistentVolume) (string, bool) { + FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { if pv.Spec.GCEPersistentDisk != nil { return pv.Spec.GCEPersistentDisk.PDName, true } @@ -352,7 +352,7 @@ func NewVolumeZonePredicate(pvInfo PersistentVolumeInfo, pvcInfo PersistentVolum return c.predicate } -func (c *VolumeZoneChecker) predicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func (c *VolumeZoneChecker) predicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { // If a pod doesn't have any volume attached to it, the predicate will always be true. // Thus we make a fast path for it, to avoid unnecessary computations in this case. if len(pod.Spec.Volumes) == 0 { @@ -427,22 +427,22 @@ func (c *VolumeZoneChecker) predicate(pod *api.Pod, meta interface{}, nodeInfo * return true, nil, nil } -func GetResourceRequest(pod *api.Pod) *schedulercache.Resource { +func GetResourceRequest(pod *v1.Pod) *schedulercache.Resource { result := schedulercache.Resource{} for _, container := range pod.Spec.Containers { for rName, rQuantity := range container.Resources.Requests { switch rName { - case api.ResourceMemory: + case v1.ResourceMemory: result.Memory += rQuantity.Value() - case api.ResourceCPU: + case v1.ResourceCPU: result.MilliCPU += rQuantity.MilliValue() - case api.ResourceNvidiaGPU: + case v1.ResourceNvidiaGPU: result.NvidiaGPU += rQuantity.Value() default: - if api.IsOpaqueIntResourceName(rName) { + if v1.IsOpaqueIntResourceName(rName) { // Lazily allocate this map only if required. if result.OpaqueIntResources == nil { - result.OpaqueIntResources = map[api.ResourceName]int64{} + result.OpaqueIntResources = map[v1.ResourceName]int64{} } result.OpaqueIntResources[rName] += rQuantity.Value() } @@ -453,23 +453,23 @@ func GetResourceRequest(pod *api.Pod) *schedulercache.Resource { for _, container := range pod.Spec.InitContainers { for rName, rQuantity := range container.Resources.Requests { switch rName { - case api.ResourceMemory: + case v1.ResourceMemory: if mem := rQuantity.Value(); mem > result.Memory { result.Memory = mem } - case api.ResourceCPU: + case v1.ResourceCPU: if cpu := rQuantity.MilliValue(); cpu > result.MilliCPU { result.MilliCPU = cpu } - case api.ResourceNvidiaGPU: + case v1.ResourceNvidiaGPU: if gpu := rQuantity.Value(); gpu > result.NvidiaGPU { result.NvidiaGPU = gpu } default: - if api.IsOpaqueIntResourceName(rName) { + if v1.IsOpaqueIntResourceName(rName) { // Lazily allocate this map only if required. if result.OpaqueIntResources == nil { - result.OpaqueIntResources = map[api.ResourceName]int64{} + result.OpaqueIntResources = map[v1.ResourceName]int64{} } value := rQuantity.Value() if value > result.OpaqueIntResources[rName] { @@ -482,11 +482,11 @@ func GetResourceRequest(pod *api.Pod) *schedulercache.Resource { return &result } -func podName(pod *api.Pod) string { +func podName(pod *v1.Pod) string { return pod.Namespace + "/" + pod.Name } -func PodFitsResources(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func PodFitsResources(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") @@ -495,7 +495,7 @@ func PodFitsResources(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.N var predicateFails []algorithm.PredicateFailureReason allowedPodNumber := nodeInfo.AllowedPodNumber() if len(nodeInfo.Pods())+1 > allowedPodNumber { - predicateFails = append(predicateFails, NewInsufficientResourceError(api.ResourcePods, 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber))) + predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourcePods, 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber))) } var podRequest *schedulercache.Resource @@ -511,13 +511,13 @@ func PodFitsResources(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.N allocatable := nodeInfo.AllocatableResource() if allocatable.MilliCPU < podRequest.MilliCPU+nodeInfo.RequestedResource().MilliCPU { - predicateFails = append(predicateFails, NewInsufficientResourceError(api.ResourceCPU, podRequest.MilliCPU, nodeInfo.RequestedResource().MilliCPU, allocatable.MilliCPU)) + predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceCPU, podRequest.MilliCPU, nodeInfo.RequestedResource().MilliCPU, allocatable.MilliCPU)) } if allocatable.Memory < podRequest.Memory+nodeInfo.RequestedResource().Memory { - predicateFails = append(predicateFails, NewInsufficientResourceError(api.ResourceMemory, podRequest.Memory, nodeInfo.RequestedResource().Memory, allocatable.Memory)) + predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceMemory, podRequest.Memory, nodeInfo.RequestedResource().Memory, allocatable.Memory)) } if allocatable.NvidiaGPU < podRequest.NvidiaGPU+nodeInfo.RequestedResource().NvidiaGPU { - predicateFails = append(predicateFails, NewInsufficientResourceError(api.ResourceNvidiaGPU, podRequest.NvidiaGPU, nodeInfo.RequestedResource().NvidiaGPU, allocatable.NvidiaGPU)) + predicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceNvidiaGPU, podRequest.NvidiaGPU, nodeInfo.RequestedResource().NvidiaGPU, allocatable.NvidiaGPU)) } for rName, rQuant := range podRequest.OpaqueIntResources { if allocatable.OpaqueIntResources[rName] < rQuant+nodeInfo.RequestedResource().OpaqueIntResources[rName] { @@ -536,9 +536,9 @@ func PodFitsResources(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.N // nodeMatchesNodeSelectorTerms checks if a node's labels satisfy a list of node selector terms, // terms are ORed, and an empty list of terms will match nothing. -func nodeMatchesNodeSelectorTerms(node *api.Node, nodeSelectorTerms []api.NodeSelectorTerm) bool { +func nodeMatchesNodeSelectorTerms(node *v1.Node, nodeSelectorTerms []v1.NodeSelectorTerm) bool { for _, req := range nodeSelectorTerms { - nodeSelector, err := api.NodeSelectorRequirementsAsSelector(req.MatchExpressions) + nodeSelector, err := v1.NodeSelectorRequirementsAsSelector(req.MatchExpressions) if err != nil { glog.V(10).Infof("Failed to parse MatchExpressions: %+v, regarding as not match.", req.MatchExpressions) return false @@ -551,7 +551,7 @@ func nodeMatchesNodeSelectorTerms(node *api.Node, nodeSelectorTerms []api.NodeSe } // The pod can only schedule onto nodes that satisfy requirements in both NodeAffinity and nodeSelector. -func podMatchesNodeLabels(pod *api.Pod, node *api.Node) bool { +func podMatchesNodeLabels(pod *v1.Pod, node *v1.Node) bool { // Check if node.Labels match pod.Spec.NodeSelector. if len(pod.Spec.NodeSelector) > 0 { selector := labels.SelectorFromSet(pod.Spec.NodeSelector) @@ -562,7 +562,7 @@ func podMatchesNodeLabels(pod *api.Pod, node *api.Node) bool { // Parse required node affinity scheduling requirements // and check if the current node match the requirements. - affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations) + affinity, err := v1.GetAffinityFromPodAnnotations(pod.Annotations) if err != nil { glog.V(10).Infof("Failed to get Affinity from Pod %+v, err: %+v", podName(pod), err) return false @@ -603,7 +603,7 @@ func podMatchesNodeLabels(pod *api.Pod, node *api.Node) bool { return nodeAffinityMatches } -func PodSelectorMatches(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func PodSelectorMatches(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") @@ -614,7 +614,7 @@ func PodSelectorMatches(pod *api.Pod, meta interface{}, nodeInfo *schedulercache return false, []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch}, nil } -func PodFitsHost(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func PodFitsHost(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { if len(pod.Spec.NodeName) == 0 { return true, nil, nil } @@ -653,7 +653,7 @@ func NewNodeLabelPredicate(labels []string, presence bool) algorithm.FitPredicat // Alternately, eliminating nodes that have a certain label, regardless of value, is also useful // A node may have a label with "retiring" as key and the date as the value // and it may be desirable to avoid scheduling new pods on this node -func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") @@ -732,9 +732,9 @@ func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister al // // WARNING: This Predicate is NOT guaranteed to work if some of the predicateMetadata data isn't precomputed... // For that reason it is not exported, i.e. it is highly coupled to the implementation of the FitPredicate construction. -func (s *ServiceAffinity) checkServiceAffinity(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { - var services []*api.Service - var pods []*api.Pod +func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { + var services []*v1.Service + var pods []*v1.Pod if pm, ok := meta.(*predicateMetadata); ok && (pm.serviceAffinityMatchingPodList != nil || pm.serviceAffinityMatchingPodServices != nil) { services = pm.serviceAffinityMatchingPodServices pods = pm.serviceAffinityMatchingPodList @@ -769,7 +769,7 @@ func (s *ServiceAffinity) checkServiceAffinity(pod *api.Pod, meta interface{}, n return false, []algorithm.PredicateFailureReason{ErrServiceAffinityViolated}, nil } -func PodFitsHostPorts(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func PodFitsHostPorts(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var wantPorts map[int]bool if predicateMeta, ok := meta.(*predicateMetadata); ok { wantPorts = predicateMeta.podPorts @@ -791,7 +791,7 @@ func PodFitsHostPorts(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.N return true, nil, nil } -func GetUsedPorts(pods ...*api.Pod) map[int]bool { +func GetUsedPorts(pods ...*v1.Pod) map[int]bool { ports := make(map[int]bool) for _, pod := range pods { for j := range pod.Spec.Containers { @@ -821,7 +821,7 @@ func haveSame(a1, a2 []string) bool { return false } -func GeneralPredicates(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func GeneralPredicates(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { var predicateFails []algorithm.PredicateFailureReason fit, reasons, err := PodFitsResources(pod, meta, nodeInfo) if err != nil { @@ -873,7 +873,7 @@ func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister, failu return checker.InterPodAffinityMatches } -func (c *PodAffinityChecker) InterPodAffinityMatches(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func (c *PodAffinityChecker) InterPodAffinityMatches(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") @@ -883,7 +883,7 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *api.Pod, meta interfac } // Now check if requirements will be satisfied on this node. - affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations) + affinity, err := v1.GetAffinityFromPodAnnotations(pod.Annotations) if err != nil { return false, nil, err } @@ -907,7 +907,7 @@ func (c *PodAffinityChecker) InterPodAffinityMatches(pod *api.Pod, meta interfac // First return value indicates whether a matching pod exists on a node that matches the topology key, // while the second return value indicates whether a matching pod exists anywhere. // TODO: Do we really need any pod matching, or all pods matching? I think the latter. -func (c *PodAffinityChecker) anyPodMatchesPodAffinityTerm(pod *api.Pod, allPods []*api.Pod, node *api.Node, term *api.PodAffinityTerm) (bool, bool, error) { +func (c *PodAffinityChecker) anyPodMatchesPodAffinityTerm(pod *v1.Pod, allPods []*v1.Pod, node *v1.Node, term *v1.PodAffinityTerm) (bool, bool, error) { matchingPodExists := false for _, existingPod := range allPods { match, err := priorityutil.PodMatchesTermsNamespaceAndSelector(existingPod, pod, term) @@ -928,7 +928,7 @@ func (c *PodAffinityChecker) anyPodMatchesPodAffinityTerm(pod *api.Pod, allPods return false, matchingPodExists, nil } -func getPodAffinityTerms(podAffinity *api.PodAffinity) (terms []api.PodAffinityTerm) { +func getPodAffinityTerms(podAffinity *v1.PodAffinity) (terms []v1.PodAffinityTerm) { if podAffinity != nil { if len(podAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 { terms = podAffinity.RequiredDuringSchedulingIgnoredDuringExecution @@ -941,7 +941,7 @@ func getPodAffinityTerms(podAffinity *api.PodAffinity) (terms []api.PodAffinityT return terms } -func getPodAntiAffinityTerms(podAntiAffinity *api.PodAntiAffinity) (terms []api.PodAffinityTerm) { +func getPodAntiAffinityTerms(podAntiAffinity *v1.PodAntiAffinity) (terms []v1.PodAffinityTerm) { if podAntiAffinity != nil { if len(podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution) != 0 { terms = podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution @@ -954,7 +954,7 @@ func getPodAntiAffinityTerms(podAntiAffinity *api.PodAntiAffinity) (terms []api. return terms } -func getMatchingAntiAffinityTerms(pod *api.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) ([]matchingPodAntiAffinityTerm, error) { +func getMatchingAntiAffinityTerms(pod *v1.Pod, nodeInfoMap map[string]*schedulercache.NodeInfo) ([]matchingPodAntiAffinityTerm, error) { allNodeNames := make([]string, 0, len(nodeInfoMap)) for name := range nodeInfoMap { allNodeNames = append(allNodeNames, name) @@ -985,7 +985,7 @@ func getMatchingAntiAffinityTerms(pod *api.Pod, nodeInfoMap map[string]*schedule } var nodeResult []matchingPodAntiAffinityTerm for _, existingPod := range nodeInfo.PodsWithAffinity() { - affinity, err := api.GetAffinityFromPodAnnotations(existingPod.Annotations) + affinity, err := v1.GetAffinityFromPodAnnotations(existingPod.Annotations) if err != nil { catchError(err) return @@ -1012,10 +1012,10 @@ func getMatchingAntiAffinityTerms(pod *api.Pod, nodeInfoMap map[string]*schedule return result, firstError } -func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *api.Pod, allPods []*api.Pod) ([]matchingPodAntiAffinityTerm, error) { +func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *v1.Pod, allPods []*v1.Pod) ([]matchingPodAntiAffinityTerm, error) { var result []matchingPodAntiAffinityTerm for _, existingPod := range allPods { - affinity, err := api.GetAffinityFromPodAnnotations(existingPod.Annotations) + affinity, err := v1.GetAffinityFromPodAnnotations(existingPod.Annotations) if err != nil { return nil, err } @@ -1040,7 +1040,7 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTerms(pod *api.Pod, allPods // Checks if scheduling the pod onto this node would break any anti-affinity // rules indicated by the existing pods. -func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *api.Pod, meta interface{}, node *api.Node) bool { +func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *v1.Pod, meta interface{}, node *v1.Node) bool { var matchingTerms []matchingPodAntiAffinityTerm if predicateMeta, ok := meta.(*predicateMetadata); ok { matchingTerms = predicateMeta.matchingAntiAffinityTerms @@ -1072,7 +1072,7 @@ func (c *PodAffinityChecker) satisfiesExistingPodsAntiAffinity(pod *api.Pod, met } // Checks if scheduling the pod onto this node would break any rules of this pod. -func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *api.Pod, node *api.Node, affinity *api.Affinity) bool { +func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *v1.Pod, node *v1.Node, affinity *v1.Affinity) bool { allPods, err := c.podLister.List(labels.Everything()) if err != nil { return false @@ -1118,18 +1118,18 @@ func (c *PodAffinityChecker) satisfiesPodsAffinityAntiAffinity(pod *api.Pod, nod return true } -func PodToleratesNodeTaints(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func PodToleratesNodeTaints(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") } - taints, err := api.GetTaintsFromNodeAnnotations(node.Annotations) + taints, err := v1.GetTaintsFromNodeAnnotations(node.Annotations) if err != nil { return false, nil, err } - tolerations, err := api.GetTolerationsFromPodAnnotations(pod.Annotations) + tolerations, err := v1.GetTolerationsFromPodAnnotations(pod.Annotations) if err != nil { return false, nil, err } @@ -1140,7 +1140,7 @@ func PodToleratesNodeTaints(pod *api.Pod, meta interface{}, nodeInfo *schedulerc return false, []algorithm.PredicateFailureReason{ErrTaintsTolerationsNotMatch}, nil } -func tolerationsToleratesTaints(tolerations []api.Toleration, taints []api.Taint) bool { +func tolerationsToleratesTaints(tolerations []v1.Toleration, taints []v1.Taint) bool { // If the taint list is nil/empty, it is tolerated by all tolerations by default. if len(taints) == 0 { return true @@ -1154,11 +1154,11 @@ func tolerationsToleratesTaints(tolerations []api.Toleration, taints []api.Taint for i := range taints { taint := &taints[i] // skip taints that have effect PreferNoSchedule, since it is for priorities - if taint.Effect == api.TaintEffectPreferNoSchedule { + if taint.Effect == v1.TaintEffectPreferNoSchedule { continue } - if !api.TaintToleratedByTolerations(taint, tolerations) { + if !v1.TaintToleratedByTolerations(taint, tolerations) { return false } } @@ -1167,13 +1167,13 @@ func tolerationsToleratesTaints(tolerations []api.Toleration, taints []api.Taint } // Determine if a pod is scheduled with best-effort QoS -func isPodBestEffort(pod *api.Pod) bool { +func isPodBestEffort(pod *v1.Pod) bool { return qos.GetPodQOS(pod) == qos.BestEffort } // CheckNodeMemoryPressurePredicate checks if a pod can be scheduled on a node // reporting memory pressure condition. -func CheckNodeMemoryPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func CheckNodeMemoryPressurePredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") @@ -1194,7 +1194,7 @@ func CheckNodeMemoryPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo * // is node under pressure? for _, cond := range node.Status.Conditions { - if cond.Type == api.NodeMemoryPressure && cond.Status == api.ConditionTrue { + if cond.Type == v1.NodeMemoryPressure && cond.Status == v1.ConditionTrue { return false, []algorithm.PredicateFailureReason{ErrNodeUnderMemoryPressure}, nil } } @@ -1204,7 +1204,7 @@ func CheckNodeMemoryPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo * // CheckNodeDiskPressurePredicate checks if a pod can be scheduled on a node // reporting disk pressure condition. -func CheckNodeDiskPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func CheckNodeDiskPressurePredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") @@ -1212,7 +1212,7 @@ func CheckNodeDiskPressurePredicate(pod *api.Pod, meta interface{}, nodeInfo *sc // is node under pressure? for _, cond := range node.Status.Conditions { - if cond.Type == api.NodeDiskPressure && cond.Status == api.ConditionTrue { + if cond.Type == v1.NodeDiskPressure && cond.Status == v1.ConditionTrue { return false, []algorithm.PredicateFailureReason{ErrNodeUnderDiskPressure}, nil } } diff --git a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go index 931c59229fa..9932d6cd437 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go +++ b/plugin/pkg/scheduler/algorithm/predicates/predicates_test.go @@ -26,24 +26,24 @@ import ( "k8s.io/gengo/parser" "k8s.io/gengo/types" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/util/codeinspector" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) -type FakeNodeInfo api.Node +type FakeNodeInfo v1.Node -func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*api.Node, error) { - node := api.Node(n) +func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*v1.Node, error) { + node := v1.Node(n) return &node, nil } -type FakeNodeListInfo []api.Node +type FakeNodeListInfo []v1.Node -func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*api.Node, error) { +func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*v1.Node, error) { for _, node := range nodes { if node.Name == nodeName { return &node, nil @@ -52,9 +52,9 @@ func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*api.Node, error) { return nil, fmt.Errorf("Unable to find node: %s", nodeName) } -type FakePersistentVolumeClaimInfo []api.PersistentVolumeClaim +type FakePersistentVolumeClaimInfo []v1.PersistentVolumeClaim -func (pvcs FakePersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, pvcID string) (*api.PersistentVolumeClaim, error) { +func (pvcs FakePersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, pvcID string) (*v1.PersistentVolumeClaim, error) { for _, pvc := range pvcs { if pvc.Name == pvcID && pvc.Namespace == namespace { return &pvc, nil @@ -63,9 +63,9 @@ func (pvcs FakePersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace return nil, fmt.Errorf("Unable to find persistent volume claim: %s/%s", namespace, pvcID) } -type FakePersistentVolumeInfo []api.PersistentVolume +type FakePersistentVolumeInfo []v1.PersistentVolume -func (pvs FakePersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*api.PersistentVolume, error) { +func (pvs FakePersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) { for _, pv := range pvs { if pv.Name == pvID { return &pv, nil @@ -75,66 +75,66 @@ func (pvs FakePersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*api.P } var ( - opaqueResourceA = api.OpaqueIntResourceName("AAA") - opaqueResourceB = api.OpaqueIntResourceName("BBB") + opaqueResourceA = v1.OpaqueIntResourceName("AAA") + opaqueResourceB = v1.OpaqueIntResourceName("BBB") ) -func makeResources(milliCPU, memory, nvidiaGPUs, pods, opaqueA int64) api.NodeResources { - return api.NodeResources{ - Capacity: api.ResourceList{ - api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), - api.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI), - api.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI), - api.ResourceNvidiaGPU: *resource.NewQuantity(nvidiaGPUs, resource.DecimalSI), - opaqueResourceA: *resource.NewQuantity(opaqueA, resource.DecimalSI), +func makeResources(milliCPU, memory, nvidiaGPUs, pods, opaqueA int64) v1.NodeResources { + return v1.NodeResources{ + Capacity: v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI), + v1.ResourceNvidiaGPU: *resource.NewQuantity(nvidiaGPUs, resource.DecimalSI), + opaqueResourceA: *resource.NewQuantity(opaqueA, resource.DecimalSI), }, } } -func makeAllocatableResources(milliCPU, memory, nvidiaGPUs, pods, opaqueA int64) api.ResourceList { - return api.ResourceList{ - api.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), - api.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI), - api.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI), - api.ResourceNvidiaGPU: *resource.NewQuantity(nvidiaGPUs, resource.DecimalSI), - opaqueResourceA: *resource.NewQuantity(opaqueA, resource.DecimalSI), +func makeAllocatableResources(milliCPU, memory, nvidiaGPUs, pods, opaqueA int64) v1.ResourceList { + return v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(memory, resource.BinarySI), + v1.ResourcePods: *resource.NewQuantity(pods, resource.DecimalSI), + v1.ResourceNvidiaGPU: *resource.NewQuantity(nvidiaGPUs, resource.DecimalSI), + opaqueResourceA: *resource.NewQuantity(opaqueA, resource.DecimalSI), } } -func newResourcePod(usage ...schedulercache.Resource) *api.Pod { - containers := []api.Container{} +func newResourcePod(usage ...schedulercache.Resource) *v1.Pod { + containers := []v1.Container{} for _, req := range usage { - containers = append(containers, api.Container{ - Resources: api.ResourceRequirements{Requests: req.ResourceList()}, + containers = append(containers, v1.Container{ + Resources: v1.ResourceRequirements{Requests: req.ResourceList()}, }) } - return &api.Pod{ - Spec: api.PodSpec{ + return &v1.Pod{ + Spec: v1.PodSpec{ Containers: containers, }, } } -func newResourceInitPod(pod *api.Pod, usage ...schedulercache.Resource) *api.Pod { +func newResourceInitPod(pod *v1.Pod, usage ...schedulercache.Resource) *v1.Pod { pod.Spec.InitContainers = newResourcePod(usage...).Spec.Containers return pod } -func PredicateMetadata(p *api.Pod, nodeInfo map[string]*schedulercache.NodeInfo) interface{} { +func PredicateMetadata(p *v1.Pod, nodeInfo map[string]*schedulercache.NodeInfo) interface{} { pm := PredicateMetadataFactory{algorithm.FakePodLister{p}} return pm.GetMetadata(p, nodeInfo) } func TestPodFitsResources(t *testing.T) { enoughPodsTests := []struct { - pod *api.Pod + pod *v1.Pod nodeInfo *schedulercache.NodeInfo fits bool test string reasons []algorithm.PredicateFailureReason }{ { - pod: &api.Pod{}, + pod: &v1.Pod{}, nodeInfo: schedulercache.NewNodeInfo( newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 20})), fits: true, @@ -147,8 +147,8 @@ func TestPodFitsResources(t *testing.T) { fits: false, test: "too many resources fails", reasons: []algorithm.PredicateFailureReason{ - NewInsufficientResourceError(api.ResourceCPU, 1, 10, 10), - NewInsufficientResourceError(api.ResourceMemory, 1, 20, 20), + NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10), + NewInsufficientResourceError(v1.ResourceMemory, 1, 20, 20), }, }, { @@ -157,7 +157,7 @@ func TestPodFitsResources(t *testing.T) { newResourcePod(schedulercache.Resource{MilliCPU: 8, Memory: 19})), fits: false, test: "too many resources fails due to init container cpu", - reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourceCPU, 3, 8, 10)}, + reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)}, }, { pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 3, Memory: 1}, schedulercache.Resource{MilliCPU: 2, Memory: 1}), @@ -165,7 +165,7 @@ func TestPodFitsResources(t *testing.T) { newResourcePod(schedulercache.Resource{MilliCPU: 8, Memory: 19})), fits: false, test: "too many resources fails due to highest init container cpu", - reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourceCPU, 3, 8, 10)}, + reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 3, 8, 10)}, }, { pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 3}), @@ -173,7 +173,7 @@ func TestPodFitsResources(t *testing.T) { newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})), fits: false, test: "too many resources fails due to init container memory", - reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourceMemory, 3, 19, 20)}, + reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)}, }, { pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 3}, schedulercache.Resource{MilliCPU: 1, Memory: 2}), @@ -181,7 +181,7 @@ func TestPodFitsResources(t *testing.T) { newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})), fits: false, test: "too many resources fails due to highest init container memory", - reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourceMemory, 3, 19, 20)}, + reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 3, 19, 20)}, }, { pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), schedulercache.Resource{MilliCPU: 1, Memory: 1}), @@ -210,7 +210,7 @@ func TestPodFitsResources(t *testing.T) { newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 5})), fits: false, test: "one resource memory fits", - reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourceCPU, 2, 9, 10)}, + reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceCPU, 2, 9, 10)}, }, { pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 2}), @@ -218,7 +218,7 @@ func TestPodFitsResources(t *testing.T) { newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})), fits: false, test: "one resource cpu fits", - reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourceMemory, 2, 19, 20)}, + reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceMemory, 2, 19, 20)}, }, { pod: newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 1}), @@ -235,85 +235,85 @@ func TestPodFitsResources(t *testing.T) { test: "equal edge case for init container", }, { - pod: newResourcePod(schedulercache.Resource{OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceA: 1}}), + pod: newResourcePod(schedulercache.Resource{OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}), nodeInfo: schedulercache.NewNodeInfo(newResourcePod(schedulercache.Resource{})), fits: true, test: "opaque resource fits", }, { - pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), schedulercache.Resource{OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceA: 1}}), + pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), schedulercache.Resource{OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}), nodeInfo: schedulercache.NewNodeInfo(newResourcePod(schedulercache.Resource{})), fits: true, test: "opaque resource fits for init container", }, { pod: newResourcePod( - schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceA: 10}}), + schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 10}}), nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceA: 0}})), + newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 0}})), fits: false, test: "opaque resource capacity enforced", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 10, 0, 5)}, }, { pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), - schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceA: 10}}), + schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 10}}), nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceA: 0}})), + newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 0}})), fits: false, test: "opaque resource capacity enforced for init container", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 10, 0, 5)}, }, { pod: newResourcePod( - schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceA: 1}}), + schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}), nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceA: 5}})), + newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 5}})), fits: false, test: "opaque resource allocatable enforced", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 1, 5, 5)}, }, { pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), - schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceA: 1}}), + schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 1}}), nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceA: 5}})), + newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 5}})), fits: false, test: "opaque resource allocatable enforced for init container", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 1, 5, 5)}, }, { pod: newResourcePod( - schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceA: 3}}, - schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceA: 3}}), + schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}, + schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}), nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceA: 2}})), + newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})), fits: false, test: "opaque resource allocatable enforced for multiple containers", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 6, 2, 5)}, }, { pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), - schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceA: 3}}, - schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceA: 3}}), + schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}, + schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}), nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceA: 2}})), + newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})), fits: true, test: "opaque resource allocatable admits multiple init containers", }, { pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), - schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceA: 6}}, - schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceA: 3}}), + schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 6}}, + schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 3}}), nodeInfo: schedulercache.NewNodeInfo( - newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceA: 2}})), + newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceA: 2}})), fits: false, test: "opaque resource allocatable enforced for multiple init containers", reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(opaqueResourceA, 6, 2, 5)}, }, { pod: newResourcePod( - schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceB: 1}}), + schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceB: 1}}), nodeInfo: schedulercache.NewNodeInfo( newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})), fits: false, @@ -322,7 +322,7 @@ func TestPodFitsResources(t *testing.T) { }, { pod: newResourceInitPod(newResourcePod(schedulercache.Resource{}), - schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[api.ResourceName]int64{opaqueResourceB: 1}}), + schedulercache.Resource{MilliCPU: 1, Memory: 1, OpaqueIntResources: map[v1.ResourceName]int64{opaqueResourceB: 1}}), nodeInfo: schedulercache.NewNodeInfo( newResourcePod(schedulercache.Resource{MilliCPU: 0, Memory: 0})), fits: false, @@ -332,7 +332,7 @@ func TestPodFitsResources(t *testing.T) { } for _, test := range enoughPodsTests { - node := api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 5)}} + node := v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 5).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 5)}} test.nodeInfo.SetNode(&node) fits, reasons, err := PodFitsResources(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo) if err != nil { @@ -347,19 +347,19 @@ func TestPodFitsResources(t *testing.T) { } notEnoughPodsTests := []struct { - pod *api.Pod + pod *v1.Pod nodeInfo *schedulercache.NodeInfo fits bool test string reasons []algorithm.PredicateFailureReason }{ { - pod: &api.Pod{}, + pod: &v1.Pod{}, nodeInfo: schedulercache.NewNodeInfo( newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 20})), fits: false, test: "even without specified resources predicate fails when there's no space for additional pod", - reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourcePods, 1, 1, 1)}, + reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)}, }, { pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1}), @@ -367,7 +367,7 @@ func TestPodFitsResources(t *testing.T) { newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 5})), fits: false, test: "even if both resources fit predicate fails when there's no space for additional pod", - reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourcePods, 1, 1, 1)}, + reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)}, }, { pod: newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 1}), @@ -375,7 +375,7 @@ func TestPodFitsResources(t *testing.T) { newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})), fits: false, test: "even for equal edge case predicate fails when there's no space for additional pod", - reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourcePods, 1, 1, 1)}, + reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)}, }, { pod: newResourceInitPod(newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 1}), schedulercache.Resource{MilliCPU: 5, Memory: 1}), @@ -383,11 +383,11 @@ func TestPodFitsResources(t *testing.T) { newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})), fits: false, test: "even for equal edge case predicate fails when there's no space for additional pod due to init container", - reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourcePods, 1, 1, 1)}, + reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourcePods, 1, 1, 1)}, }, } for _, test := range notEnoughPodsTests { - node := api.Node{Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 0, 1, 0)}} + node := v1.Node{Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: makeAllocatableResources(10, 20, 0, 1, 0)}} test.nodeInfo.SetNode(&node) fits, reasons, err := PodFitsResources(test.pod, PredicateMetadata(test.pod, nil), test.nodeInfo) if err != nil { @@ -404,25 +404,25 @@ func TestPodFitsResources(t *testing.T) { func TestPodFitsHost(t *testing.T) { tests := []struct { - pod *api.Pod - node *api.Node + pod *v1.Pod + node *v1.Node fits bool test string }{ { - pod: &api.Pod{}, - node: &api.Node{}, + pod: &v1.Pod{}, + node: &v1.Node{}, fits: true, test: "no host specified", }, { - pod: &api.Pod{ - Spec: api.PodSpec{ + pod: &v1.Pod{ + Spec: v1.PodSpec{ NodeName: "foo", }, }, - node: &api.Node{ - ObjectMeta: api.ObjectMeta{ + node: &v1.Node{ + ObjectMeta: v1.ObjectMeta{ Name: "foo", }, }, @@ -430,13 +430,13 @@ func TestPodFitsHost(t *testing.T) { test: "host matches", }, { - pod: &api.Pod{ - Spec: api.PodSpec{ + pod: &v1.Pod{ + Spec: v1.PodSpec{ NodeName: "bar", }, }, - node: &api.Node{ - ObjectMeta: api.ObjectMeta{ + node: &v1.Node{ + ObjectMeta: v1.ObjectMeta{ Name: "foo", }, }, @@ -462,15 +462,15 @@ func TestPodFitsHost(t *testing.T) { } } -func newPod(host string, hostPorts ...int) *api.Pod { - networkPorts := []api.ContainerPort{} +func newPod(host string, hostPorts ...int) *v1.Pod { + networkPorts := []v1.ContainerPort{} for _, port := range hostPorts { - networkPorts = append(networkPorts, api.ContainerPort{HostPort: int32(port)}) + networkPorts = append(networkPorts, v1.ContainerPort{HostPort: int32(port)}) } - return &api.Pod{ - Spec: api.PodSpec{ + return &v1.Pod{ + Spec: v1.PodSpec{ NodeName: host, - Containers: []api.Container{ + Containers: []v1.Container{ { Ports: networkPorts, }, @@ -481,13 +481,13 @@ func newPod(host string, hostPorts ...int) *api.Pod { func TestPodFitsHostPorts(t *testing.T) { tests := []struct { - pod *api.Pod + pod *v1.Pod nodeInfo *schedulercache.NodeInfo fits bool test string }{ { - pod: &api.Pod{}, + pod: &v1.Pod{}, nodeInfo: schedulercache.NewNodeInfo(), fits: true, test: "nothing running", @@ -539,25 +539,25 @@ func TestPodFitsHostPorts(t *testing.T) { func TestGetUsedPorts(t *testing.T) { tests := []struct { - pods []*api.Pod + pods []*v1.Pod ports map[int]bool }{ { - []*api.Pod{ + []*v1.Pod{ newPod("m1", 9090), }, map[int]bool{9090: true}, }, { - []*api.Pod{ + []*v1.Pod{ newPod("m1", 9090), newPod("m1", 9091), }, map[int]bool{9090: true, 9091: true}, }, { - []*api.Pod{ + []*v1.Pod{ newPod("m1", 9090), newPod("m2", 9091), }, @@ -574,22 +574,22 @@ func TestGetUsedPorts(t *testing.T) { } func TestDiskConflicts(t *testing.T) { - volState := api.PodSpec{ - Volumes: []api.Volume{ + volState := v1.PodSpec{ + Volumes: []v1.Volume{ { - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "foo", }, }, }, }, } - volState2 := api.PodSpec{ - Volumes: []api.Volume{ + volState2 := v1.PodSpec{ + Volumes: []v1.Volume{ { - VolumeSource: api.VolumeSource{ - GCEPersistentDisk: &api.GCEPersistentDiskVolumeSource{ + VolumeSource: v1.VolumeSource{ + GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{ PDName: "bar", }, }, @@ -597,15 +597,15 @@ func TestDiskConflicts(t *testing.T) { }, } tests := []struct { - pod *api.Pod + pod *v1.Pod nodeInfo *schedulercache.NodeInfo isOk bool test string }{ - {&api.Pod{}, schedulercache.NewNodeInfo(), true, "nothing"}, - {&api.Pod{}, schedulercache.NewNodeInfo(&api.Pod{Spec: volState}), true, "one state"}, - {&api.Pod{Spec: volState}, schedulercache.NewNodeInfo(&api.Pod{Spec: volState}), false, "same state"}, - {&api.Pod{Spec: volState2}, schedulercache.NewNodeInfo(&api.Pod{Spec: volState}), true, "different state"}, + {&v1.Pod{}, schedulercache.NewNodeInfo(), true, "nothing"}, + {&v1.Pod{}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"}, + {&v1.Pod{Spec: volState}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"}, + {&v1.Pod{Spec: volState2}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"}, } expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict} @@ -627,22 +627,22 @@ func TestDiskConflicts(t *testing.T) { } func TestAWSDiskConflicts(t *testing.T) { - volState := api.PodSpec{ - Volumes: []api.Volume{ + volState := v1.PodSpec{ + Volumes: []v1.Volume{ { - VolumeSource: api.VolumeSource{ - AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{ + VolumeSource: v1.VolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ VolumeID: "foo", }, }, }, }, } - volState2 := api.PodSpec{ - Volumes: []api.Volume{ + volState2 := v1.PodSpec{ + Volumes: []v1.Volume{ { - VolumeSource: api.VolumeSource{ - AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{ + VolumeSource: v1.VolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ VolumeID: "bar", }, }, @@ -650,15 +650,15 @@ func TestAWSDiskConflicts(t *testing.T) { }, } tests := []struct { - pod *api.Pod + pod *v1.Pod nodeInfo *schedulercache.NodeInfo isOk bool test string }{ - {&api.Pod{}, schedulercache.NewNodeInfo(), true, "nothing"}, - {&api.Pod{}, schedulercache.NewNodeInfo(&api.Pod{Spec: volState}), true, "one state"}, - {&api.Pod{Spec: volState}, schedulercache.NewNodeInfo(&api.Pod{Spec: volState}), false, "same state"}, - {&api.Pod{Spec: volState2}, schedulercache.NewNodeInfo(&api.Pod{Spec: volState}), true, "different state"}, + {&v1.Pod{}, schedulercache.NewNodeInfo(), true, "nothing"}, + {&v1.Pod{}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"}, + {&v1.Pod{Spec: volState}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"}, + {&v1.Pod{Spec: volState2}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"}, } expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict} @@ -680,11 +680,11 @@ func TestAWSDiskConflicts(t *testing.T) { } func TestRBDDiskConflicts(t *testing.T) { - volState := api.PodSpec{ - Volumes: []api.Volume{ + volState := v1.PodSpec{ + Volumes: []v1.Volume{ { - VolumeSource: api.VolumeSource{ - RBD: &api.RBDVolumeSource{ + VolumeSource: v1.VolumeSource{ + RBD: &v1.RBDVolumeSource{ CephMonitors: []string{"a", "b"}, RBDPool: "foo", RBDImage: "bar", @@ -694,11 +694,11 @@ func TestRBDDiskConflicts(t *testing.T) { }, }, } - volState2 := api.PodSpec{ - Volumes: []api.Volume{ + volState2 := v1.PodSpec{ + Volumes: []v1.Volume{ { - VolumeSource: api.VolumeSource{ - RBD: &api.RBDVolumeSource{ + VolumeSource: v1.VolumeSource{ + RBD: &v1.RBDVolumeSource{ CephMonitors: []string{"c", "d"}, RBDPool: "foo", RBDImage: "bar", @@ -709,15 +709,15 @@ func TestRBDDiskConflicts(t *testing.T) { }, } tests := []struct { - pod *api.Pod + pod *v1.Pod nodeInfo *schedulercache.NodeInfo isOk bool test string }{ - {&api.Pod{}, schedulercache.NewNodeInfo(), true, "nothing"}, - {&api.Pod{}, schedulercache.NewNodeInfo(&api.Pod{Spec: volState}), true, "one state"}, - {&api.Pod{Spec: volState}, schedulercache.NewNodeInfo(&api.Pod{Spec: volState}), false, "same state"}, - {&api.Pod{Spec: volState2}, schedulercache.NewNodeInfo(&api.Pod{Spec: volState}), true, "different state"}, + {&v1.Pod{}, schedulercache.NewNodeInfo(), true, "nothing"}, + {&v1.Pod{}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "one state"}, + {&v1.Pod{Spec: volState}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), false, "same state"}, + {&v1.Pod{Spec: volState2}, schedulercache.NewNodeInfo(&v1.Pod{Spec: volState}), true, "different state"}, } expectedFailureReasons := []algorithm.PredicateFailureReason{ErrDiskConflict} @@ -740,19 +740,19 @@ func TestRBDDiskConflicts(t *testing.T) { func TestPodFitsSelector(t *testing.T) { tests := []struct { - pod *api.Pod + pod *v1.Pod labels map[string]string fits bool test string }{ { - pod: &api.Pod{}, + pod: &v1.Pod{}, fits: true, test: "no selector", }, { - pod: &api.Pod{ - Spec: api.PodSpec{ + pod: &v1.Pod{ + Spec: v1.PodSpec{ NodeSelector: map[string]string{ "foo": "bar", }, @@ -762,8 +762,8 @@ func TestPodFitsSelector(t *testing.T) { test: "missing labels", }, { - pod: &api.Pod{ - Spec: api.PodSpec{ + pod: &v1.Pod{ + Spec: v1.PodSpec{ NodeSelector: map[string]string{ "foo": "bar", }, @@ -776,8 +776,8 @@ func TestPodFitsSelector(t *testing.T) { test: "same labels", }, { - pod: &api.Pod{ - Spec: api.PodSpec{ + pod: &v1.Pod{ + Spec: v1.PodSpec{ NodeSelector: map[string]string{ "foo": "bar", }, @@ -791,8 +791,8 @@ func TestPodFitsSelector(t *testing.T) { test: "node labels are superset", }, { - pod: &api.Pod{ - Spec: api.PodSpec{ + pod: &v1.Pod{ + Spec: v1.PodSpec{ NodeSelector: map[string]string{ "foo": "bar", "baz": "blah", @@ -806,10 +806,10 @@ func TestPodFitsSelector(t *testing.T) { test: "node labels are subset", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { "nodeSelectorTerms": [{ "matchExpressions": [{ @@ -829,10 +829,10 @@ func TestPodFitsSelector(t *testing.T) { test: "Pod with matchExpressions using In operator that matches the existing node", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { "nodeSelectorTerms": [{ "matchExpressions": [{ @@ -853,10 +853,10 @@ func TestPodFitsSelector(t *testing.T) { test: "Pod with matchExpressions using Gt operator that matches the existing node", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { "nodeSelectorTerms": [{ "matchExpressions": [{ @@ -876,10 +876,10 @@ func TestPodFitsSelector(t *testing.T) { test: "Pod with matchExpressions using NotIn operator that matches the existing node", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { "nodeSelectorTerms": [{ "matchExpressions": [{ @@ -898,10 +898,10 @@ func TestPodFitsSelector(t *testing.T) { test: "Pod with matchExpressions using Exists operator that matches the existing node", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { "nodeSelectorTerms": [{ "matchExpressions": [{ @@ -921,10 +921,10 @@ func TestPodFitsSelector(t *testing.T) { test: "Pod with affinity that don't match node's labels won't schedule onto the node", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { "nodeSelectorTerms": null }}}`, @@ -938,10 +938,10 @@ func TestPodFitsSelector(t *testing.T) { test: "Pod with a nil []NodeSelectorTerm in affinity, can't match the node's labels and won't schedule onto the node", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { "nodeSelectorTerms": [] }}}`, @@ -955,10 +955,10 @@ func TestPodFitsSelector(t *testing.T) { test: "Pod with an empty []NodeSelectorTerm in affinity, can't match the node's labels and won't schedule onto the node", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { "nodeSelectorTerms": [{}, {}] }}}`, @@ -972,10 +972,10 @@ func TestPodFitsSelector(t *testing.T) { test: "Pod with invalid NodeSelectTerms in affinity will match no objects and won't schedule onto the node", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { "nodeSelectorTerms": [{"matchExpressions": [{}]}] }}}`, @@ -989,8 +989,8 @@ func TestPodFitsSelector(t *testing.T) { test: "Pod with empty MatchExpressions is not a valid value will match no objects and won't schedule onto the node", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ "some-key": "some-value", }, @@ -1003,10 +1003,10 @@ func TestPodFitsSelector(t *testing.T) { test: "Pod with no Affinity will schedule onto a node", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": null }}`, }, @@ -1019,10 +1019,10 @@ func TestPodFitsSelector(t *testing.T) { test: "Pod with Affinity but nil NodeSelector will schedule onto a node", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { "nodeSelectorTerms": [{ "matchExpressions": [{ @@ -1045,10 +1045,10 @@ func TestPodFitsSelector(t *testing.T) { test: "Pod with multiple matchExpressions ANDed that matches the existing node", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { "nodeSelectorTerms": [{ "matchExpressions": [{ @@ -1071,10 +1071,10 @@ func TestPodFitsSelector(t *testing.T) { test: "Pod with multiple matchExpressions ANDed that doesn't match the existing node", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { "nodeSelectorTerms": [ { @@ -1104,10 +1104,10 @@ func TestPodFitsSelector(t *testing.T) { }, // TODO: Uncomment this test when implement RequiredDuringSchedulingRequiredDuringExecution // { - // pod: &api.Pod{ - // ObjectMeta: api.ObjectMeta{ + // pod: &v1.Pod{ + // ObjectMeta: v1.ObjectMeta{ // Annotations: map[string]string{ - // api.AffinityAnnotationKey: ` + // v1.AffinityAnnotationKey: ` // {"nodeAffinity": { // "requiredDuringSchedulingRequiredDuringExecution": { // "nodeSelectorTerms": [{ @@ -1139,10 +1139,10 @@ func TestPodFitsSelector(t *testing.T) { // "requiredDuringSchedulingIgnoredDuringExecution indicated that don't match node's labels and won't schedule onto the node", // }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { "nodeSelectorTerms": [{ "matchExpressions": [{ @@ -1153,7 +1153,7 @@ func TestPodFitsSelector(t *testing.T) { }}}`, }, }, - Spec: api.PodSpec{ + Spec: v1.PodSpec{ NodeSelector: map[string]string{ "foo": "bar", }, @@ -1167,10 +1167,10 @@ func TestPodFitsSelector(t *testing.T) { "both are satisfied, will schedule onto the node", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { "nodeSelectorTerms": [{ "matchExpressions": [{ @@ -1181,7 +1181,7 @@ func TestPodFitsSelector(t *testing.T) { }}}`, }, }, - Spec: api.PodSpec{ + Spec: v1.PodSpec{ NodeSelector: map[string]string{ "foo": "bar", }, @@ -1198,7 +1198,7 @@ func TestPodFitsSelector(t *testing.T) { expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeSelectorNotMatch} for _, test := range tests { - node := api.Node{ObjectMeta: api.ObjectMeta{Labels: test.labels}} + node := v1.Node{ObjectMeta: v1.ObjectMeta{Labels: test.labels}} nodeInfo := schedulercache.NewNodeInfo() nodeInfo.SetNode(&node) @@ -1218,7 +1218,7 @@ func TestPodFitsSelector(t *testing.T) { func TestNodeLabelPresence(t *testing.T) { label := map[string]string{"foo": "bar", "bar": "foo"} tests := []struct { - pod *api.Pod + pod *v1.Pod labels []string presence bool fits bool @@ -1264,7 +1264,7 @@ func TestNodeLabelPresence(t *testing.T) { expectedFailureReasons := []algorithm.PredicateFailureReason{ErrNodeLabelPresenceViolated} for _, test := range tests { - node := api.Node{ObjectMeta: api.ObjectMeta{Labels: label}} + node := v1.Node{ObjectMeta: v1.ObjectMeta{Labels: label}} nodeInfo := schedulercache.NewNodeInfo() nodeInfo.SetNode(&node) @@ -1300,109 +1300,109 @@ func TestServiceAffinity(t *testing.T) { "region": "r2", "zone": "z22", } - node1 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labels1}} - node2 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labels2}} - node3 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labels3}} - node4 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labels4}} - node5 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: labels4}} + node1 := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labels1}} + node2 := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labels2}} + node3 := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labels3}} + node4 := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "machine4", Labels: labels4}} + node5 := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "machine5", Labels: labels4}} tests := []struct { - pod *api.Pod - pods []*api.Pod - services []*api.Service - node *api.Node + pod *v1.Pod + pods []*v1.Pod + services []*v1.Service + node *v1.Node labels []string fits bool test string }{ { - pod: new(api.Pod), + pod: new(v1.Pod), node: &node1, fits: true, labels: []string{"region"}, test: "nothing scheduled", }, { - pod: &api.Pod{Spec: api.PodSpec{NodeSelector: map[string]string{"region": "r1"}}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeSelector: map[string]string{"region": "r1"}}}, node: &node1, fits: true, labels: []string{"region"}, test: "pod with region label match", }, { - pod: &api.Pod{Spec: api.PodSpec{NodeSelector: map[string]string{"region": "r2"}}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeSelector: map[string]string{"region": "r2"}}}, node: &node1, fits: false, labels: []string{"region"}, test: "pod with region label mismatch", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: selector}}, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: selector}}}, node: &node1, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}}}, fits: true, labels: []string{"region"}, test: "service pod on same node", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: selector}}, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: selector}}}, node: &node1, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}}}, fits: true, labels: []string{"region"}, test: "service pod on different node, region match", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: selector}}, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: selector}}}, node: &node1, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}}}, fits: false, labels: []string{"region"}, test: "service pod on different node, region mismatch", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: selector, Namespace: "ns1"}}, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: selector, Namespace: "ns1"}}}, node: &node1, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns2"}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}, ObjectMeta: v1.ObjectMeta{Namespace: "ns2"}}}, fits: true, labels: []string{"region"}, test: "service in different namespace, region mismatch", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns2"}}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: selector, Namespace: "ns1"}}, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: selector, Namespace: "ns2"}}}, node: &node1, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}, ObjectMeta: v1.ObjectMeta{Namespace: "ns1"}}}, fits: true, labels: []string{"region"}, test: "pod in different namespace, region mismatch", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector, Namespace: "ns1"}}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: selector, Namespace: "ns1"}}, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: selector, Namespace: "ns1"}}}, node: &node1, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: selector}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}, ObjectMeta: v1.ObjectMeta{Namespace: "ns1"}}}, fits: false, labels: []string{"region"}, test: "service and pod in same namespace, region mismatch", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: selector}}, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: selector}}}, node: &node1, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}}}, fits: false, labels: []string{"region", "zone"}, test: "service pod on different node, multiple labels, not all match", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}}, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: selector}}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: selector}}, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine5"}, ObjectMeta: v1.ObjectMeta{Labels: selector}}}, node: &node4, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: selector}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: selector}}}, fits: true, labels: []string{"region", "zone"}, test: "service pod on different node, multiple labels, all match", @@ -1411,7 +1411,7 @@ func TestServiceAffinity(t *testing.T) { expectedFailureReasons := []algorithm.PredicateFailureReason{ErrServiceAffinityViolated} for _, test := range tests { testIt := func(skipPrecompute bool) { - nodes := []api.Node{node1, node2, node3, node4, node5} + nodes := []v1.Node{node1, node2, node3, node4, node5} nodeInfo := schedulercache.NewNodeInfo() nodeInfo.SetNode(test.node) nodeInfoMap := map[string]*schedulercache.NodeInfo{test.node.Name: nodeInfo} @@ -1445,23 +1445,23 @@ func TestServiceAffinity(t *testing.T) { } func TestEBSVolumeCountConflicts(t *testing.T) { - oneVolPod := &api.Pod{ - Spec: api.PodSpec{ - Volumes: []api.Volume{ + oneVolPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { - VolumeSource: api.VolumeSource{ - AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"}, + VolumeSource: v1.VolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ovp"}, }, }, }, }, } - ebsPVCPod := &api.Pod{ - Spec: api.PodSpec{ - Volumes: []api.Volume{ + ebsPVCPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { - VolumeSource: api.VolumeSource{ - PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ ClaimName: "someEBSVol", }, }, @@ -1469,19 +1469,19 @@ func TestEBSVolumeCountConflicts(t *testing.T) { }, }, } - splitPVCPod := &api.Pod{ - Spec: api.PodSpec{ - Volumes: []api.Volume{ + splitPVCPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { - VolumeSource: api.VolumeSource{ - PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ ClaimName: "someNonEBSVol", }, }, }, { - VolumeSource: api.VolumeSource{ - PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ ClaimName: "someEBSVol", }, }, @@ -1489,55 +1489,55 @@ func TestEBSVolumeCountConflicts(t *testing.T) { }, }, } - twoVolPod := &api.Pod{ - Spec: api.PodSpec{ - Volumes: []api.Volume{ + twoVolPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { - VolumeSource: api.VolumeSource{ - AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"}, + VolumeSource: v1.VolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp1"}, }, }, { - VolumeSource: api.VolumeSource{ - AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"}, + VolumeSource: v1.VolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "tvp2"}, }, }, }, }, } - splitVolsPod := &api.Pod{ - Spec: api.PodSpec{ - Volumes: []api.Volume{ + splitVolsPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { - VolumeSource: api.VolumeSource{ - HostPath: &api.HostPathVolumeSource{}, + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{}, }, }, { - VolumeSource: api.VolumeSource{ - AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"}, + VolumeSource: v1.VolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "svp"}, }, }, }, }, } - nonApplicablePod := &api.Pod{ - Spec: api.PodSpec{ - Volumes: []api.Volume{ + nonApplicablePod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { - VolumeSource: api.VolumeSource{ - HostPath: &api.HostPathVolumeSource{}, + VolumeSource: v1.VolumeSource{ + HostPath: &v1.HostPathVolumeSource{}, }, }, }, }, } - deletedPVCPod := &api.Pod{ - Spec: api.PodSpec{ - Volumes: []api.Volume{ + deletedPVCPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { - VolumeSource: api.VolumeSource{ - PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ ClaimName: "deletedPVC", }, }, @@ -1545,12 +1545,12 @@ func TestEBSVolumeCountConflicts(t *testing.T) { }, }, } - deletedPVPod := &api.Pod{ - Spec: api.PodSpec{ - Volumes: []api.Volume{ + deletedPVPod := &v1.Pod{ + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ { - VolumeSource: api.VolumeSource{ - PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ ClaimName: "pvcWithDeletedPV", }, }, @@ -1558,104 +1558,104 @@ func TestEBSVolumeCountConflicts(t *testing.T) { }, }, } - emptyPod := &api.Pod{ - Spec: api.PodSpec{}, + emptyPod := &v1.Pod{ + Spec: v1.PodSpec{}, } tests := []struct { - newPod *api.Pod - existingPods []*api.Pod + newPod *v1.Pod + existingPods []*v1.Pod maxVols int fits bool test string }{ { newPod: oneVolPod, - existingPods: []*api.Pod{twoVolPod, oneVolPod}, + existingPods: []*v1.Pod{twoVolPod, oneVolPod}, maxVols: 4, fits: true, test: "fits when node capacity >= new pod's EBS volumes", }, { newPod: twoVolPod, - existingPods: []*api.Pod{oneVolPod}, + existingPods: []*v1.Pod{oneVolPod}, maxVols: 2, fits: false, test: "doesn't fit when node capacity < new pod's EBS volumes", }, { newPod: splitVolsPod, - existingPods: []*api.Pod{twoVolPod}, + existingPods: []*v1.Pod{twoVolPod}, maxVols: 3, fits: true, test: "new pod's count ignores non-EBS volumes", }, { newPod: twoVolPod, - existingPods: []*api.Pod{splitVolsPod, nonApplicablePod, emptyPod}, + existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, maxVols: 3, fits: true, test: "existing pods' counts ignore non-EBS volumes", }, { newPod: ebsPVCPod, - existingPods: []*api.Pod{splitVolsPod, nonApplicablePod, emptyPod}, + existingPods: []*v1.Pod{splitVolsPod, nonApplicablePod, emptyPod}, maxVols: 3, fits: true, test: "new pod's count considers PVCs backed by EBS volumes", }, { newPod: splitPVCPod, - existingPods: []*api.Pod{splitVolsPod, oneVolPod}, + existingPods: []*v1.Pod{splitVolsPod, oneVolPod}, maxVols: 3, fits: true, test: "new pod's count ignores PVCs not backed by EBS volumes", }, { newPod: twoVolPod, - existingPods: []*api.Pod{oneVolPod, ebsPVCPod}, + existingPods: []*v1.Pod{oneVolPod, ebsPVCPod}, maxVols: 3, fits: false, test: "existing pods' counts considers PVCs backed by EBS volumes", }, { newPod: twoVolPod, - existingPods: []*api.Pod{oneVolPod, twoVolPod, ebsPVCPod}, + existingPods: []*v1.Pod{oneVolPod, twoVolPod, ebsPVCPod}, maxVols: 4, fits: true, test: "already-mounted EBS volumes are always ok to allow", }, { newPod: splitVolsPod, - existingPods: []*api.Pod{oneVolPod, oneVolPod, ebsPVCPod}, + existingPods: []*v1.Pod{oneVolPod, oneVolPod, ebsPVCPod}, maxVols: 3, fits: true, test: "the same EBS volumes are not counted multiple times", }, { newPod: ebsPVCPod, - existingPods: []*api.Pod{oneVolPod, deletedPVCPod}, + existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, maxVols: 2, fits: false, test: "pod with missing PVC is counted towards the PV limit", }, { newPod: ebsPVCPod, - existingPods: []*api.Pod{oneVolPod, deletedPVCPod}, + existingPods: []*v1.Pod{oneVolPod, deletedPVCPod}, maxVols: 3, fits: true, test: "pod with missing PVC is counted towards the PV limit", }, { newPod: ebsPVCPod, - existingPods: []*api.Pod{oneVolPod, deletedPVPod}, + existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, maxVols: 2, fits: false, test: "pod with missing PV is counted towards the PV limit", }, { newPod: ebsPVCPod, - existingPods: []*api.Pod{oneVolPod, deletedPVPod}, + existingPods: []*v1.Pod{oneVolPod, deletedPVPod}, maxVols: 3, fits: true, test: "pod with missing PV is counted towards the PV limit", @@ -1664,44 +1664,44 @@ func TestEBSVolumeCountConflicts(t *testing.T) { pvInfo := FakePersistentVolumeInfo{ { - ObjectMeta: api.ObjectMeta{Name: "someEBSVol"}, - Spec: api.PersistentVolumeSpec{ - PersistentVolumeSource: api.PersistentVolumeSource{ - AWSElasticBlockStore: &api.AWSElasticBlockStoreVolumeSource{VolumeID: "ebsVol"}, + ObjectMeta: v1.ObjectMeta{Name: "someEBSVol"}, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{VolumeID: "ebsVol"}, }, }, }, { - ObjectMeta: api.ObjectMeta{Name: "someNonEBSVol"}, - Spec: api.PersistentVolumeSpec{ - PersistentVolumeSource: api.PersistentVolumeSource{}, + ObjectMeta: v1.ObjectMeta{Name: "someNonEBSVol"}, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{}, }, }, } pvcInfo := FakePersistentVolumeClaimInfo{ { - ObjectMeta: api.ObjectMeta{Name: "someEBSVol"}, - Spec: api.PersistentVolumeClaimSpec{VolumeName: "someEBSVol"}, + ObjectMeta: v1.ObjectMeta{Name: "someEBSVol"}, + Spec: v1.PersistentVolumeClaimSpec{VolumeName: "someEBSVol"}, }, { - ObjectMeta: api.ObjectMeta{Name: "someNonEBSVol"}, - Spec: api.PersistentVolumeClaimSpec{VolumeName: "someNonEBSVol"}, + ObjectMeta: v1.ObjectMeta{Name: "someNonEBSVol"}, + Spec: v1.PersistentVolumeClaimSpec{VolumeName: "someNonEBSVol"}, }, { - ObjectMeta: api.ObjectMeta{Name: "pvcWithDeletedPV"}, - Spec: api.PersistentVolumeClaimSpec{VolumeName: "pvcWithDeletedPV"}, + ObjectMeta: v1.ObjectMeta{Name: "pvcWithDeletedPV"}, + Spec: v1.PersistentVolumeClaimSpec{VolumeName: "pvcWithDeletedPV"}, }, } filter := VolumeFilter{ - FilterVolume: func(vol *api.Volume) (string, bool) { + FilterVolume: func(vol *v1.Volume) (string, bool) { if vol.AWSElasticBlockStore != nil { return vol.AWSElasticBlockStore.VolumeID, true } return "", false }, - FilterPersistentVolume: func(pv *api.PersistentVolume) (string, bool) { + FilterPersistentVolume: func(pv *v1.PersistentVolume) (string, bool) { if pv.Spec.AWSElasticBlockStore != nil { return pv.Spec.AWSElasticBlockStore.VolumeID, true } @@ -1803,14 +1803,14 @@ func TestPredicatesRegistered(t *testing.T) { } } -func newPodWithPort(hostPorts ...int) *api.Pod { - networkPorts := []api.ContainerPort{} +func newPodWithPort(hostPorts ...int) *v1.Pod { + networkPorts := []v1.ContainerPort{} for _, port := range hostPorts { - networkPorts = append(networkPorts, api.ContainerPort{HostPort: int32(port)}) + networkPorts = append(networkPorts, v1.ContainerPort{HostPort: int32(port)}) } - return &api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + return &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Ports: networkPorts, }, @@ -1821,21 +1821,21 @@ func newPodWithPort(hostPorts ...int) *api.Pod { func TestRunGeneralPredicates(t *testing.T) { resourceTests := []struct { - pod *api.Pod + pod *v1.Pod nodeInfo *schedulercache.NodeInfo - node *api.Node + node *v1.Node fits bool test string wErr error reasons []algorithm.PredicateFailureReason }{ { - pod: &api.Pod{}, + pod: &v1.Pod{}, nodeInfo: schedulercache.NewNodeInfo( newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})), - node: &api.Node{ - ObjectMeta: api.ObjectMeta{Name: "machine1"}, - Status: api.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0)}, + node: &v1.Node{ + ObjectMeta: v1.ObjectMeta{Name: "machine1"}, + Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0)}, }, fits: true, wErr: nil, @@ -1845,23 +1845,23 @@ func TestRunGeneralPredicates(t *testing.T) { pod: newResourcePod(schedulercache.Resource{MilliCPU: 8, Memory: 10}), nodeInfo: schedulercache.NewNodeInfo( newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 19})), - node: &api.Node{ - ObjectMeta: api.ObjectMeta{Name: "machine1"}, - Status: api.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0)}, + node: &v1.Node{ + ObjectMeta: v1.ObjectMeta{Name: "machine1"}, + Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0)}, }, fits: false, wErr: nil, reasons: []algorithm.PredicateFailureReason{ - NewInsufficientResourceError(api.ResourceCPU, 8, 5, 10), - NewInsufficientResourceError(api.ResourceMemory, 10, 19, 20), + NewInsufficientResourceError(v1.ResourceCPU, 8, 5, 10), + NewInsufficientResourceError(v1.ResourceMemory, 10, 19, 20), }, test: "not enough cpu and memory resource", }, { - pod: &api.Pod{}, + pod: &v1.Pod{}, nodeInfo: schedulercache.NewNodeInfo( newResourcePod(schedulercache.Resource{MilliCPU: 9, Memory: 19})), - node: &api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 1, 32, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32, 0)}}, + node: &v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 1, 32, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32, 0)}}, fits: true, wErr: nil, test: "no resources/port/host requested always fits on GPU machine", @@ -1870,31 +1870,31 @@ func TestRunGeneralPredicates(t *testing.T) { pod: newResourcePod(schedulercache.Resource{MilliCPU: 3, Memory: 1, NvidiaGPU: 1}), nodeInfo: schedulercache.NewNodeInfo( newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 10, NvidiaGPU: 1})), - node: &api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 1, 32, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32, 0)}}, + node: &v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 1, 32, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32, 0)}}, fits: false, wErr: nil, - reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(api.ResourceNvidiaGPU, 1, 1, 1)}, + reasons: []algorithm.PredicateFailureReason{NewInsufficientResourceError(v1.ResourceNvidiaGPU, 1, 1, 1)}, test: "not enough GPU resource", }, { pod: newResourcePod(schedulercache.Resource{MilliCPU: 3, Memory: 1, NvidiaGPU: 1}), nodeInfo: schedulercache.NewNodeInfo( newResourcePod(schedulercache.Resource{MilliCPU: 5, Memory: 10, NvidiaGPU: 0})), - node: &api.Node{Status: api.NodeStatus{Capacity: makeResources(10, 20, 1, 32, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32, 0)}}, + node: &v1.Node{Status: v1.NodeStatus{Capacity: makeResources(10, 20, 1, 32, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 1, 32, 0)}}, fits: true, wErr: nil, test: "enough GPU resource", }, { - pod: &api.Pod{ - Spec: api.PodSpec{ + pod: &v1.Pod{ + Spec: v1.PodSpec{ NodeName: "machine2", }, }, nodeInfo: schedulercache.NewNodeInfo(), - node: &api.Node{ - ObjectMeta: api.ObjectMeta{Name: "machine1"}, - Status: api.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0)}, + node: &v1.Node{ + ObjectMeta: v1.ObjectMeta{Name: "machine1"}, + Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0)}, }, fits: false, wErr: nil, @@ -1904,9 +1904,9 @@ func TestRunGeneralPredicates(t *testing.T) { { pod: newPodWithPort(123), nodeInfo: schedulercache.NewNodeInfo(newPodWithPort(123)), - node: &api.Node{ - ObjectMeta: api.ObjectMeta{Name: "machine1"}, - Status: api.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0)}, + node: &v1.Node{ + ObjectMeta: v1.ObjectMeta{Name: "machine1"}, + Status: v1.NodeStatus{Capacity: makeResources(10, 20, 0, 32, 0).Capacity, Allocatable: makeAllocatableResources(10, 20, 0, 32, 0)}, }, fits: false, wErr: nil, @@ -1936,26 +1936,26 @@ func TestInterPodAffinity(t *testing.T) { "zone": "z11", } podLabel2 := map[string]string{"security": "S1"} - node1 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labels1}} + node1 := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labels1}} tests := []struct { - pod *api.Pod - pods []*api.Pod - node *api.Node + pod *v1.Pod + pods []*v1.Pod + node *v1.Node fits bool test string }{ { - pod: new(api.Pod), + pod: new(v1.Pod), node: &node1, fits: true, test: "A pod that has no required pod affinity scheduling rules can schedule onto a node with no existing pods", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Labels: podLabel2, Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [{ "labelSelector": { @@ -1971,17 +1971,17 @@ func TestInterPodAffinity(t *testing.T) { }, }, }, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}}, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel}}}, node: &node1, fits: true, test: "satisfies with requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using In operator that matches the existing pod", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Labels: podLabel2, Annotations: map[string]string{ - api.AffinityAnnotationKey: `{"podAffinity": { + v1.AffinityAnnotationKey: `{"podAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [{ "labelSelector": { "matchExpressions": [{ @@ -1996,17 +1996,17 @@ func TestInterPodAffinity(t *testing.T) { }, }, }, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}}, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel}}}, node: &node1, fits: true, test: "satisfies the pod with requiredDuringSchedulingIgnoredDuringExecution in PodAffinity using not in operator in labelSelector that matches the existing pod", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Labels: podLabel2, Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [{ "labelSelector": { @@ -2022,17 +2022,17 @@ func TestInterPodAffinity(t *testing.T) { }, }, }, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel, Namespace: "ns"}}}, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel, Namespace: "ns"}}}, node: &node1, fits: false, test: "Does not satisfy the PodAffinity with labelSelector because of diff Namespace", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Labels: podLabel, Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [{ "labelSelector": { @@ -2047,17 +2047,17 @@ func TestInterPodAffinity(t *testing.T) { }, }, }, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}}, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel}}}, node: &node1, fits: false, test: "Doesn't satisfy the PodAffinity because of unmatching labelSelector with the existing pod", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Labels: podLabel2, Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [ { @@ -2090,17 +2090,17 @@ func TestInterPodAffinity(t *testing.T) { }, }, }, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}}, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel}}}, node: &node1, fits: true, test: "satisfies the PodAffinity with different label Operators in multiple RequiredDuringSchedulingIgnoredDuringExecution ", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Labels: podLabel2, Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [ { @@ -2133,17 +2133,17 @@ func TestInterPodAffinity(t *testing.T) { }, }, }, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}}, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel}}}, node: &node1, fits: false, test: "The labelSelector requirements(items of matchExpressions) are ANDed, the pod cannot schedule onto the node because one of the matchExpression item don't match.", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Labels: podLabel2, Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [{ "labelSelector": { @@ -2171,18 +2171,18 @@ func TestInterPodAffinity(t *testing.T) { }, }, }, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}}, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel}}}, node: &node1, fits: true, test: "satisfies the PodAffinity and PodAntiAffinity with the existing pod", }, // TODO: Uncomment this block when implement RequiredDuringSchedulingRequiredDuringExecution. //{ - // pod: &api.Pod{ - // ObjectMeta: api.ObjectMeta{ + // pod: &v1.Pod{ + // ObjectMeta: v1.ObjectMeta{ // Labels: podLabel2, // Annotations: map[string]string{ - // api.AffinityAnnotationKey: ` + // v1.AffinityAnnotationKey: ` // {"podAffinity": { // "requiredDuringSchedulingRequiredDuringExecution": [ // { @@ -2215,17 +2215,17 @@ func TestInterPodAffinity(t *testing.T) { // }, // }, // }, - // pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podlabel}}}, + // pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podlabel}}}, // node: &node1, // fits: true, // test: "satisfies the PodAffinity with different Label Operators in multiple RequiredDuringSchedulingRequiredDuringExecution ", //}, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Labels: podLabel2, Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [{ "labelSelector": { @@ -2253,10 +2253,10 @@ func TestInterPodAffinity(t *testing.T) { }, }, }, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, - ObjectMeta: api.ObjectMeta{Labels: podLabel, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, + ObjectMeta: v1.ObjectMeta{Labels: podLabel, Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"PodAntiAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [{ "labelSelector": { @@ -2276,11 +2276,11 @@ func TestInterPodAffinity(t *testing.T) { test: "satisfies the PodAffinity and PodAntiAffinity and PodAntiAffinity symmetry with the existing pod", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Labels: podLabel2, Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [{ "labelSelector": { @@ -2308,17 +2308,17 @@ func TestInterPodAffinity(t *testing.T) { }, }, }, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}}, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel}}}, node: &node1, fits: false, test: "satisfies the PodAffinity but doesn't satisfies the PodAntiAffinity with the existing pod", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Labels: podLabel, Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [{ "labelSelector": { @@ -2346,10 +2346,10 @@ func TestInterPodAffinity(t *testing.T) { }, }, }, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, - ObjectMeta: api.ObjectMeta{Labels: podLabel, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, + ObjectMeta: v1.ObjectMeta{Labels: podLabel, Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"PodAntiAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [{ "labelSelector": { @@ -2369,11 +2369,11 @@ func TestInterPodAffinity(t *testing.T) { test: "satisfies the PodAffinity and PodAntiAffinity but doesn't satisfies PodAntiAffinity symmetry with the existing pod", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Labels: podLabel, Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [{ "labelSelector": { @@ -2389,21 +2389,21 @@ func TestInterPodAffinity(t *testing.T) { }, }, }, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel}}}, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel}}}, node: &node1, fits: false, test: "pod matches its own Label in PodAffinity and that matches the existing pod Labels", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Labels: podLabel, }, }, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, - ObjectMeta: api.ObjectMeta{Labels: podLabel, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, + ObjectMeta: v1.ObjectMeta{Labels: podLabel, Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"PodAntiAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [{ "labelSelector": { @@ -2423,15 +2423,15 @@ func TestInterPodAffinity(t *testing.T) { test: "verify that PodAntiAffinity from existing pod is respected when pod has no AntiAffinity constraints. doesn't satisfy PodAntiAffinity symmetry with the existing pod", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Labels: podLabel, }, }, - pods: []*api.Pod{{Spec: api.PodSpec{NodeName: "machine1"}, - ObjectMeta: api.ObjectMeta{Labels: podLabel, + pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "machine1"}, + ObjectMeta: v1.ObjectMeta{Labels: podLabel, Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"PodAntiAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [{ "labelSelector": { @@ -2455,7 +2455,7 @@ func TestInterPodAffinity(t *testing.T) { for _, test := range tests { node := test.node - var podsOnNode []*api.Pod + var podsOnNode []*v1.Pod for _, pod := range test.pods { if pod.Spec.NodeName == node.Name { podsOnNode = append(podsOnNode, pod) @@ -2465,7 +2465,7 @@ func TestInterPodAffinity(t *testing.T) { fit := PodAffinityChecker{ info: FakeNodeInfo(*node), podLister: algorithm.FakePodLister(test.pods), - failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")}, + failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(v1.DefaultFailureDomains, ",")}, } nodeInfo := schedulercache.NewNodeInfo(podsOnNode...) nodeInfo.SetNode(test.node) @@ -2498,17 +2498,17 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { "region": "India", } tests := []struct { - pod *api.Pod - pods []*api.Pod - nodes []api.Node + pod *v1.Pod + pods []*v1.Pod + nodes []v1.Node fits map[string]bool test string }{ { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [{ "labelSelector": { @@ -2524,13 +2524,13 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { }, }, }, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelA}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelA}}, }, - nodes: []api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}}, + nodes: []v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}}, }, fits: map[string]bool{ "machine1": true, @@ -2540,10 +2540,10 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { test: "A pod can be scheduled onto all the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` { "nodeAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": { @@ -2572,13 +2572,13 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { }, }, }, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "nodeA"}, ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "abc"}}}, - {Spec: api.PodSpec{NodeName: "nodeB"}, ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "def"}}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: v1.ObjectMeta{Labels: map[string]string{"foo": "abc"}}}, + {Spec: v1.PodSpec{NodeName: "nodeB"}, ObjectMeta: v1.ObjectMeta{Labels: map[string]string{"foo": "def"}}}, }, - nodes: []api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "h1"}}}, - {ObjectMeta: api.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "h2"}}}, + nodes: []v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "h1"}}}, + {ObjectMeta: v1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "h2"}}}, }, fits: map[string]bool{ "nodeA": false, @@ -2587,13 +2587,13 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { test: "NodeA and nodeB have same topologyKey and label value. NodeA does not satisfy node affinity rule, but has an existing pod that match the inter pod affinity rule. The pod can be scheduled onto nodeB.", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Labels: map[string]string{ "foo": "bar", }, Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [{ "labelSelector": { @@ -2609,10 +2609,10 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { }, }, }, - pods: []*api.Pod{}, - nodes: []api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "nodeA", Labels: map[string]string{"zone": "az1", "hostname": "h1"}}}, - {ObjectMeta: api.ObjectMeta{Name: "nodeB", Labels: map[string]string{"zone": "az2", "hostname": "h2"}}}, + pods: []*v1.Pod{}, + nodes: []v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"zone": "az1", "hostname": "h1"}}}, + {ObjectMeta: v1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"zone": "az2", "hostname": "h2"}}}, }, fits: map[string]bool{ "nodeA": true, @@ -2622,10 +2622,10 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { "should not be blocked from being scheduled onto any node, even there's no existing pod that match the rule anywhere.", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` { "podAntiAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [{ @@ -2643,12 +2643,12 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { }, }, }, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "nodeA"}, ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "abc"}}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: v1.ObjectMeta{Labels: map[string]string{"foo": "abc"}}}, }, - nodes: []api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "nodeA"}}}, - {ObjectMeta: api.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "nodeB"}}}, + nodes: []v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "nodeA"}}}, + {ObjectMeta: v1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "nodeB"}}}, }, fits: map[string]bool{ "nodeA": false, @@ -2657,10 +2657,10 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { test: "NodeA and nodeB have same topologyKey and label value. NodeA has an existing pod that match the inter pod affinity rule. The pod can not be scheduled onto nodeA and nodeB.", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` { "podAntiAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [{ @@ -2678,13 +2678,13 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { }, }, }, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "nodeA"}, ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "abc"}}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: v1.ObjectMeta{Labels: map[string]string{"foo": "abc"}}}, }, - nodes: []api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "nodeA", Labels: labelRgChina}}, - {ObjectMeta: api.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}}, - {ObjectMeta: api.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}}, + nodes: []v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "nodeA", Labels: labelRgChina}}, + {ObjectMeta: v1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}}, + {ObjectMeta: v1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}}, }, fits: map[string]bool{ "nodeA": false, @@ -2700,7 +2700,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { for _, test := range tests { nodeListInfo := FakeNodeListInfo(test.nodes) for _, node := range test.nodes { - var podsOnNode []*api.Pod + var podsOnNode []*v1.Pod for _, pod := range test.pods { if pod.Spec.NodeName == node.Name { podsOnNode = append(podsOnNode, pod) @@ -2710,7 +2710,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { testFit := PodAffinityChecker{ info: nodeListInfo, podLister: algorithm.FakePodLister(test.pods), - failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")}, + failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(v1.DefaultFailureDomains, ",")}, } nodeInfo := schedulercache.NewNodeInfo(podsOnNode...) nodeInfo.SetNode(&node) @@ -2722,7 +2722,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { if !fits && !reflect.DeepEqual(reasons, affinityExpectedFailureReasons) { t.Errorf("%s: unexpected failure reasons: %v", test.test, reasons) } - affinity, err := api.GetAffinityFromPodAnnotations(test.pod.ObjectMeta.Annotations) + affinity, err := v1.GetAffinityFromPodAnnotations(test.pod.ObjectMeta.Annotations) if err != nil { t.Errorf("%s: unexpected error: %v", test.test, err) } @@ -2749,21 +2749,21 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) { func TestPodToleratesTaints(t *testing.T) { podTolerateTaintsTests := []struct { - pod *api.Pod - node api.Node + pod *v1.Pod + node v1.Node fits bool test string }{ { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod0", }, }, - node: api.Node{ - ObjectMeta: api.ObjectMeta{ + node: v1.Node{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.TaintsAnnotationKey: ` + v1.TaintsAnnotationKey: ` [{ "key": "dedicated", "value": "user1", @@ -2776,11 +2776,11 @@ func TestPodToleratesTaints(t *testing.T) { test: "a pod having no tolerations can't be scheduled onto a node with nonempty taints", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod1", Annotations: map[string]string{ - api.TolerationsAnnotationKey: ` + v1.TolerationsAnnotationKey: ` [{ "key": "dedicated", "value": "user1", @@ -2788,14 +2788,14 @@ func TestPodToleratesTaints(t *testing.T) { }]`, }, }, - Spec: api.PodSpec{ - Containers: []api.Container{{Image: "pod1:V1"}}, + Spec: v1.PodSpec{ + Containers: []v1.Container{{Image: "pod1:V1"}}, }, }, - node: api.Node{ - ObjectMeta: api.ObjectMeta{ + node: v1.Node{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.TaintsAnnotationKey: ` + v1.TaintsAnnotationKey: ` [{ "key": "dedicated", "value": "user1", @@ -2808,11 +2808,11 @@ func TestPodToleratesTaints(t *testing.T) { test: "a pod which can be scheduled on a dedicated node assigned to user1 with effect NoSchedule", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod2", Annotations: map[string]string{ - api.TolerationsAnnotationKey: ` + v1.TolerationsAnnotationKey: ` [{ "key": "dedicated", "operator": "Equal", @@ -2821,14 +2821,14 @@ func TestPodToleratesTaints(t *testing.T) { }]`, }, }, - Spec: api.PodSpec{ - Containers: []api.Container{{Image: "pod2:V1"}}, + Spec: v1.PodSpec{ + Containers: []v1.Container{{Image: "pod2:V1"}}, }, }, - node: api.Node{ - ObjectMeta: api.ObjectMeta{ + node: v1.Node{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.TaintsAnnotationKey: ` + v1.TaintsAnnotationKey: ` [{ "key": "dedicated", "value": "user1", @@ -2841,11 +2841,11 @@ func TestPodToleratesTaints(t *testing.T) { test: "a pod which can't be scheduled on a dedicated node assigned to user2 with effect NoSchedule", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod2", Annotations: map[string]string{ - api.TolerationsAnnotationKey: ` + v1.TolerationsAnnotationKey: ` [{ "key": "foo", "operator": "Exists", @@ -2853,14 +2853,14 @@ func TestPodToleratesTaints(t *testing.T) { }]`, }, }, - Spec: api.PodSpec{ - Containers: []api.Container{{Image: "pod2:V1"}}, + Spec: v1.PodSpec{ + Containers: []v1.Container{{Image: "pod2:V1"}}, }, }, - node: api.Node{ - ObjectMeta: api.ObjectMeta{ + node: v1.Node{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.TaintsAnnotationKey: ` + v1.TaintsAnnotationKey: ` [{ "key": "foo", "value": "bar", @@ -2873,11 +2873,11 @@ func TestPodToleratesTaints(t *testing.T) { test: "a pod can be scheduled onto the node, with a toleration uses operator Exists that tolerates the taints on the node", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod2", Annotations: map[string]string{ - api.TolerationsAnnotationKey: ` + v1.TolerationsAnnotationKey: ` [{ "key": "dedicated", "operator": "Equal", @@ -2890,14 +2890,14 @@ func TestPodToleratesTaints(t *testing.T) { }]`, }, }, - Spec: api.PodSpec{ - Containers: []api.Container{{Image: "pod2:V1"}}, + Spec: v1.PodSpec{ + Containers: []v1.Container{{Image: "pod2:V1"}}, }, }, - node: api.Node{ - ObjectMeta: api.ObjectMeta{ + node: v1.Node{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.TaintsAnnotationKey: ` + v1.TaintsAnnotationKey: ` [{ "key": "dedicated", "value": "user2", @@ -2914,11 +2914,11 @@ func TestPodToleratesTaints(t *testing.T) { test: "a pod has multiple tolerations, node has multiple taints, all the taints are tolerated, pod can be scheduled onto the node", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod2", Annotations: map[string]string{ - api.TolerationsAnnotationKey: ` + v1.TolerationsAnnotationKey: ` [{ "key": "foo", "operator": "Equal", @@ -2927,14 +2927,14 @@ func TestPodToleratesTaints(t *testing.T) { }]`, }, }, - Spec: api.PodSpec{ - Containers: []api.Container{{Image: "pod2:V1"}}, + Spec: v1.PodSpec{ + Containers: []v1.Container{{Image: "pod2:V1"}}, }, }, - node: api.Node{ - ObjectMeta: api.ObjectMeta{ + node: v1.Node{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.TaintsAnnotationKey: ` + v1.TaintsAnnotationKey: ` [{ "key": "foo", "value": "bar", @@ -2948,11 +2948,11 @@ func TestPodToleratesTaints(t *testing.T) { "can't be scheduled onto the node", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod2", Annotations: map[string]string{ - api.TolerationsAnnotationKey: ` + v1.TolerationsAnnotationKey: ` [{ "key": "foo", "operator": "Equal", @@ -2960,14 +2960,14 @@ func TestPodToleratesTaints(t *testing.T) { }]`, }, }, - Spec: api.PodSpec{ - Containers: []api.Container{{Image: "pod2:V1"}}, + Spec: v1.PodSpec{ + Containers: []v1.Container{{Image: "pod2:V1"}}, }, }, - node: api.Node{ - ObjectMeta: api.ObjectMeta{ + node: v1.Node{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.TaintsAnnotationKey: ` + v1.TaintsAnnotationKey: ` [{ "key": "foo", "value": "bar", @@ -2981,11 +2981,11 @@ func TestPodToleratesTaints(t *testing.T) { "and the effect of taint is NoSchedule. Pod can be scheduled onto the node", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Name: "pod2", Annotations: map[string]string{ - api.TolerationsAnnotationKey: ` + v1.TolerationsAnnotationKey: ` [{ "key": "dedicated", "operator": "Equal", @@ -2994,14 +2994,14 @@ func TestPodToleratesTaints(t *testing.T) { }]`, }, }, - Spec: api.PodSpec{ - Containers: []api.Container{{Image: "pod2:V1"}}, + Spec: v1.PodSpec{ + Containers: []v1.Container{{Image: "pod2:V1"}}, }, }, - node: api.Node{ - ObjectMeta: api.ObjectMeta{ + node: v1.Node{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.TaintsAnnotationKey: ` + v1.TaintsAnnotationKey: ` [{ "key": "dedicated", "value": "user1", @@ -3033,7 +3033,7 @@ func TestPodToleratesTaints(t *testing.T) { } } -func makeEmptyNodeInfo(node *api.Node) *schedulercache.NodeInfo { +func makeEmptyNodeInfo(node *v1.Node) *schedulercache.NodeInfo { nodeInfo := schedulercache.NewNodeInfo() nodeInfo.SetNode(node) return nodeInfo @@ -3041,30 +3041,30 @@ func makeEmptyNodeInfo(node *api.Node) *schedulercache.NodeInfo { func TestPodSchedulesOnNodeWithMemoryPressureCondition(t *testing.T) { // specify best-effort pod - bestEffortPod := &api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + bestEffortPod := &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Name: "container", Image: "image", ImagePullPolicy: "Always", // no requirements -> best effort pod - Resources: api.ResourceRequirements{}, + Resources: v1.ResourceRequirements{}, }, }, }, } // specify non-best-effort pod - nonBestEffortPod := &api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + nonBestEffortPod := &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Name: "container", Image: "image", ImagePullPolicy: "Always", // at least one requirement -> burstable pod - Resources: api.ResourceRequirements{ + Resources: v1.ResourceRequirements{ Requests: makeAllocatableResources(100, 100, 100, 100, 0), }, }, @@ -3073,9 +3073,9 @@ func TestPodSchedulesOnNodeWithMemoryPressureCondition(t *testing.T) { } // specify a node with no memory pressure condition on - noMemoryPressureNode := &api.Node{ - Status: api.NodeStatus{ - Conditions: []api.NodeCondition{ + noMemoryPressureNode := &v1.Node{ + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ { Type: "Ready", Status: "True", @@ -3085,9 +3085,9 @@ func TestPodSchedulesOnNodeWithMemoryPressureCondition(t *testing.T) { } // specify a node with memory pressure condition on - memoryPressureNode := &api.Node{ - Status: api.NodeStatus{ - Conditions: []api.NodeCondition{ + memoryPressureNode := &v1.Node{ + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ { Type: "MemoryPressure", Status: "True", @@ -3097,7 +3097,7 @@ func TestPodSchedulesOnNodeWithMemoryPressureCondition(t *testing.T) { } tests := []struct { - pod *api.Pod + pod *v1.Pod nodeInfo *schedulercache.NodeInfo fits bool name string @@ -3144,9 +3144,9 @@ func TestPodSchedulesOnNodeWithMemoryPressureCondition(t *testing.T) { } func TestPodSchedulesOnNodeWithDiskPressureCondition(t *testing.T) { - pod := &api.Pod{ - Spec: api.PodSpec{ - Containers: []api.Container{ + pod := &v1.Pod{ + Spec: v1.PodSpec{ + Containers: []v1.Container{ { Name: "container", Image: "image", @@ -3157,9 +3157,9 @@ func TestPodSchedulesOnNodeWithDiskPressureCondition(t *testing.T) { } // specify a node with no disk pressure condition on - noPressureNode := &api.Node{ - Status: api.NodeStatus{ - Conditions: []api.NodeCondition{ + noPressureNode := &v1.Node{ + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ { Type: "Ready", Status: "True", @@ -3169,9 +3169,9 @@ func TestPodSchedulesOnNodeWithDiskPressureCondition(t *testing.T) { } // specify a node with pressure condition on - pressureNode := &api.Node{ - Status: api.NodeStatus{ - Conditions: []api.NodeCondition{ + pressureNode := &v1.Node{ + Status: v1.NodeStatus{ + Conditions: []v1.NodeCondition{ { Type: "DiskPressure", Status: "True", @@ -3181,7 +3181,7 @@ func TestPodSchedulesOnNodeWithDiskPressureCondition(t *testing.T) { } tests := []struct { - pod *api.Pod + pod *v1.Pod nodeInfo *schedulercache.NodeInfo fits bool name string diff --git a/plugin/pkg/scheduler/algorithm/predicates/utils.go b/plugin/pkg/scheduler/algorithm/predicates/utils.go index e5a1faeec25..46e7597c392 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/utils.go +++ b/plugin/pkg/scheduler/algorithm/predicates/utils.go @@ -16,8 +16,10 @@ limitations under the License. package predicates -import "k8s.io/kubernetes/pkg/labels" -import "k8s.io/kubernetes/pkg/api" +import ( + "k8s.io/kubernetes/pkg/api/v1" + "k8s.io/kubernetes/pkg/labels" +) // FindLabelsInSet gets as many key/value pairs as possible out of a label set. func FindLabelsInSet(labelsToKeep []string, selector labels.Set) map[string]string { @@ -45,8 +47,8 @@ func AddUnsetLabelsToMap(aL map[string]string, labelsToAdd []string, labelSet la } // FilterPodsByNamespace filters pods outside a namespace from the given list. -func FilterPodsByNamespace(pods []*api.Pod, ns string) []*api.Pod { - filtered := []*api.Pod{} +func FilterPodsByNamespace(pods []*v1.Pod, ns string) []*v1.Pod { + filtered := []*v1.Pod{} for _, nsPod := range pods { if nsPod.Namespace == ns { filtered = append(filtered, nsPod) diff --git a/plugin/pkg/scheduler/algorithm/predicates/utils_test.go b/plugin/pkg/scheduler/algorithm/predicates/utils_test.go index 32729304cfd..1e1125a6fd3 100644 --- a/plugin/pkg/scheduler/algorithm/predicates/utils_test.go +++ b/plugin/pkg/scheduler/algorithm/predicates/utils_test.go @@ -19,7 +19,7 @@ package predicates import ( "fmt" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/labels" ) @@ -30,9 +30,9 @@ func ExampleFindLabelsInSet() { labelSubset["label2"] = "value2" // Lets make believe that these pods are on the cluster. // Utility functions will inspect their labels, filter them, and so on. - nsPods := []*api.Pod{ + nsPods := []*v1.Pod{ { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ Name: "pod1", Namespace: "ns1", Labels: map[string]string{ @@ -43,14 +43,14 @@ func ExampleFindLabelsInSet() { }, }, // first pod which will be used via the utilities { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ Name: "pod2", Namespace: "ns1", }, }, { - ObjectMeta: api.ObjectMeta{ + ObjectMeta: v1.ObjectMeta{ Name: "pod3ThatWeWontSee", }, }, diff --git a/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go b/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go index fedc16463c6..002c184cadf 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go +++ b/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation.go @@ -20,7 +20,7 @@ import ( "fmt" "math" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" @@ -37,7 +37,7 @@ const ( // Also used in most/least_requested nad metadata. // TODO: despaghettify it -func getNonZeroRequests(pod *api.Pod) *schedulercache.Resource { +func getNonZeroRequests(pod *v1.Pod) *schedulercache.Resource { result := &schedulercache.Resource{} for i := range pod.Spec.Containers { container := &pod.Spec.Containers[i] @@ -48,7 +48,7 @@ func getNonZeroRequests(pod *api.Pod) *schedulercache.Resource { return result } -func calculateBalancedResourceAllocation(pod *api.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func calculateBalancedResourceAllocation(pod *v1.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { node := nodeInfo.Node() if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") @@ -104,7 +104,7 @@ func fractionOfCapacity(requested, capacity int64) float64 { // close the two metrics are to each other. // Detail: score = 10 - abs(cpuFraction-memoryFraction)*10. The algorithm is partly inspired by: // "Wei Huang et al. An Energy Efficient Virtual Machine Placement Algorithm with Balanced Resource Utilization" -func BalancedResourceAllocationMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func BalancedResourceAllocationMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { var nonZeroRequest *schedulercache.Resource if priorityMeta, ok := meta.(*priorityMetadata); ok { nonZeroRequest = priorityMeta.nonZeroRequest diff --git a/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go b/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go index 4003ded9a8e..607668ae6b2 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/balanced_resource_allocation_test.go @@ -20,8 +20,8 @@ import ( "reflect" "testing" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) @@ -35,29 +35,29 @@ func TestBalancedResourceAllocation(t *testing.T) { "bar": "foo", "baz": "blah", } - machine1Spec := api.PodSpec{ + machine1Spec := v1.PodSpec{ NodeName: "machine1", } - machine2Spec := api.PodSpec{ + machine2Spec := v1.PodSpec{ NodeName: "machine2", } - noResources := api.PodSpec{ - Containers: []api.Container{}, + noResources := v1.PodSpec{ + Containers: []v1.Container{}, } - cpuOnly := api.PodSpec{ + cpuOnly := v1.PodSpec{ NodeName: "machine1", - Containers: []api.Container{ + Containers: []v1.Container{ { - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ "cpu": resource.MustParse("1000m"), "memory": resource.MustParse("0"), }, }, }, { - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ "cpu": resource.MustParse("2000m"), "memory": resource.MustParse("0"), }, @@ -67,20 +67,20 @@ func TestBalancedResourceAllocation(t *testing.T) { } cpuOnly2 := cpuOnly cpuOnly2.NodeName = "machine2" - cpuAndMemory := api.PodSpec{ + cpuAndMemory := v1.PodSpec{ NodeName: "machine2", - Containers: []api.Container{ + Containers: []v1.Container{ { - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ "cpu": resource.MustParse("1000m"), "memory": resource.MustParse("2000"), }, }, }, { - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ "cpu": resource.MustParse("2000m"), "memory": resource.MustParse("3000"), }, @@ -89,9 +89,9 @@ func TestBalancedResourceAllocation(t *testing.T) { }, } tests := []struct { - pod *api.Pod - pods []*api.Pod - nodes []*api.Node + pod *v1.Pod + pods []*v1.Pod + nodes []*v1.Node expectedList schedulerapi.HostPriorityList test string }{ @@ -107,8 +107,8 @@ func TestBalancedResourceAllocation(t *testing.T) { Memory Fraction: 0 / 10000 = 0% Node2 Score: 10 - (0-0)*10 = 10 */ - pod: &api.Pod{Spec: noResources}, - nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, + pod: &v1.Pod{Spec: noResources}, + nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}}, test: "nothing scheduled, nothing requested", }, @@ -124,8 +124,8 @@ func TestBalancedResourceAllocation(t *testing.T) { Memory Fraction: 5000/10000 = 50% Node2 Score: 10 - (0.5-0.5)*10 = 10 */ - pod: &api.Pod{Spec: cpuAndMemory}, - nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, + pod: &v1.Pod{Spec: cpuAndMemory}, + nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: 10}}, test: "nothing scheduled, resources requested, differently sized machines", }, @@ -141,15 +141,15 @@ func TestBalancedResourceAllocation(t *testing.T) { Memory Fraction: 0 / 10000 = 0% Node2 Score: 10 - (0-0)*10 = 10 */ - pod: &api.Pod{Spec: noResources}, - nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, + pod: &v1.Pod{Spec: noResources}, + nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}}, test: "no resources requested, pods scheduled", - pods: []*api.Pod{ - {Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: machine1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: machine1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: machine2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: machine2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, }, }, { @@ -164,15 +164,15 @@ func TestBalancedResourceAllocation(t *testing.T) { Memory Fraction: 5000 / 20000 = 25% Node2 Score: 10 - (0.6-0.25)*10 = 6 */ - pod: &api.Pod{Spec: noResources}, - nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, + pod: &v1.Pod{Spec: noResources}, + nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 6}}, test: "no resources requested, pods scheduled with resources", - pods: []*api.Pod{ - {Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: cpuAndMemory, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: cpuOnly, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: cpuOnly, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: cpuOnly2, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: cpuAndMemory, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, }, }, { @@ -187,11 +187,11 @@ func TestBalancedResourceAllocation(t *testing.T) { Memory Fraction: 10000 / 20000 = 50% Node2 Score: 10 - (0.6-0.5)*10 = 9 */ - pod: &api.Pod{Spec: cpuAndMemory}, - nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, + pod: &v1.Pod{Spec: cpuAndMemory}, + nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 9}}, test: "resources requested, pods scheduled with resources", - pods: []*api.Pod{ + pods: []*v1.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, @@ -208,11 +208,11 @@ func TestBalancedResourceAllocation(t *testing.T) { Memory Fraction: 10000 / 50000 = 20% Node2 Score: 10 - (0.6-0.2)*10 = 6 */ - pod: &api.Pod{Spec: cpuAndMemory}, - nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)}, + pod: &v1.Pod{Spec: cpuAndMemory}, + nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 6}}, test: "resources requested, pods scheduled with resources, differently sized machines", - pods: []*api.Pod{ + pods: []*v1.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, @@ -229,21 +229,21 @@ func TestBalancedResourceAllocation(t *testing.T) { Memory Fraction 5000 / 10000 = 50% Node2 Score: 0 */ - pod: &api.Pod{Spec: cpuOnly}, - nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, + pod: &v1.Pod{Spec: cpuOnly}, + nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}}, test: "requested resources exceed node capacity", - pods: []*api.Pod{ + pods: []*v1.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, }, { - pod: &api.Pod{Spec: noResources}, - nodes: []*api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)}, + pod: &v1.Pod{Spec: noResources}, + nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}}, test: "zero node resources, pods scheduled with resources", - pods: []*api.Pod{ + pods: []*v1.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, diff --git a/plugin/pkg/scheduler/algorithm/priorities/image_locality.go b/plugin/pkg/scheduler/algorithm/priorities/image_locality.go index 0bdd3ba3980..c83373484af 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/image_locality.go +++ b/plugin/pkg/scheduler/algorithm/priorities/image_locality.go @@ -19,7 +19,7 @@ package priorities import ( "fmt" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) @@ -29,7 +29,7 @@ import ( // based on the total size of those images. // - If none of the images are present, this node will be given the lowest priority. // - If some of the images are present on a node, the larger their sizes' sum, the higher the node's priority. -func ImageLocalityPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func ImageLocalityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { node := nodeInfo.Node() if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") @@ -66,7 +66,7 @@ func calculateScoreFromSize(sumSize int64) int { } // checkContainerImageOnNode checks if a container image is present on a node and returns its size. -func checkContainerImageOnNode(node *api.Node, container *api.Container) int64 { +func checkContainerImageOnNode(node *v1.Node, container *v1.Container) int64 { for _, image := range node.Status.Images { for _, name := range image.Names { if container.Image == name { diff --git a/plugin/pkg/scheduler/algorithm/priorities/image_locality_test.go b/plugin/pkg/scheduler/algorithm/priorities/image_locality_test.go index 0921e70ab92..0612a2e45f4 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/image_locality_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/image_locality_test.go @@ -21,14 +21,14 @@ import ( "sort" "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) func TestImageLocalityPriority(t *testing.T) { - test_40_250 := api.PodSpec{ - Containers: []api.Container{ + test_40_250 := v1.PodSpec{ + Containers: []v1.Container{ { Image: "gcr.io/40", }, @@ -38,8 +38,8 @@ func TestImageLocalityPriority(t *testing.T) { }, } - test_40_140 := api.PodSpec{ - Containers: []api.Container{ + test_40_140 := v1.PodSpec{ + Containers: []v1.Container{ { Image: "gcr.io/40", }, @@ -49,8 +49,8 @@ func TestImageLocalityPriority(t *testing.T) { }, } - test_min_max := api.PodSpec{ - Containers: []api.Container{ + test_min_max := v1.PodSpec{ + Containers: []v1.Container{ { Image: "gcr.io/10", }, @@ -60,8 +60,8 @@ func TestImageLocalityPriority(t *testing.T) { }, } - node_40_140_2000 := api.NodeStatus{ - Images: []api.ContainerImage{ + node_40_140_2000 := v1.NodeStatus{ + Images: []v1.ContainerImage{ { Names: []string{ "gcr.io/40", @@ -86,8 +86,8 @@ func TestImageLocalityPriority(t *testing.T) { }, } - node_250_10 := api.NodeStatus{ - Images: []api.ContainerImage{ + node_250_10 := v1.NodeStatus{ + Images: []v1.ContainerImage{ { Names: []string{ "gcr.io/250", @@ -105,9 +105,9 @@ func TestImageLocalityPriority(t *testing.T) { } tests := []struct { - pod *api.Pod - pods []*api.Pod - nodes []*api.Node + pod *v1.Pod + pods []*v1.Pod + nodes []*v1.Node expectedList schedulerapi.HostPriorityList test string }{ @@ -121,8 +121,8 @@ func TestImageLocalityPriority(t *testing.T) { // Node2 // Image: gcr.io/250 250MB // Score: (250M-23M)/97.7M + 1 = 3 - pod: &api.Pod{Spec: test_40_250}, - nodes: []*api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)}, + pod: &v1.Pod{Spec: test_40_250}, + nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine2", Score: 3}}, test: "two images spread on two nodes, prefer the larger image one", }, @@ -136,8 +136,8 @@ func TestImageLocalityPriority(t *testing.T) { // Node2 // Image: not present // Score: 0 - pod: &api.Pod{Spec: test_40_140}, - nodes: []*api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)}, + pod: &v1.Pod{Spec: test_40_140}, + nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 0}}, test: "two images on one node, prefer this node", }, @@ -151,8 +151,8 @@ func TestImageLocalityPriority(t *testing.T) { // Node2 // Image: gcr.io/10 10MB // Score: 10 < min score = 0 - pod: &api.Pod{Spec: test_min_max}, - nodes: []*api.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)}, + pod: &v1.Pod{Spec: test_min_max}, + nodes: []*v1.Node{makeImageNode("machine1", node_40_140_2000), makeImageNode("machine2", node_250_10)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}}, test: "if exceed limit, use limit", }, @@ -174,9 +174,9 @@ func TestImageLocalityPriority(t *testing.T) { } } -func makeImageNode(node string, status api.NodeStatus) *api.Node { - return &api.Node{ - ObjectMeta: api.ObjectMeta{Name: node}, +func makeImageNode(node string, status v1.NodeStatus) *v1.Node { + return &v1.Node{ + ObjectMeta: v1.ObjectMeta{Name: node}, Status: status, } } diff --git a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go index 394eb27239e..5f73183709d 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go +++ b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity.go @@ -20,7 +20,7 @@ import ( "sync" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/util/workqueue" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/predicates" @@ -57,7 +57,7 @@ type podAffinityPriorityMap struct { sync.Mutex // nodes contain all nodes that should be considered - nodes []*api.Node + nodes []*v1.Node // counts store the mapping from node name to so-far computed score of // the node. counts map[string]float64 @@ -67,7 +67,7 @@ type podAffinityPriorityMap struct { firstError error } -func newPodAffinityPriorityMap(nodes []*api.Node, failureDomains priorityutil.Topologies) *podAffinityPriorityMap { +func newPodAffinityPriorityMap(nodes []*v1.Node, failureDomains priorityutil.Topologies) *podAffinityPriorityMap { return &podAffinityPriorityMap{ nodes: nodes, counts: make(map[string]float64, len(nodes)), @@ -83,7 +83,7 @@ func (p *podAffinityPriorityMap) setError(err error) { } } -func (p *podAffinityPriorityMap) processTerm(term *api.PodAffinityTerm, podDefiningAffinityTerm, podToCheck *api.Pod, fixedNode *api.Node, weight float64) { +func (p *podAffinityPriorityMap) processTerm(term *v1.PodAffinityTerm, podDefiningAffinityTerm, podToCheck *v1.Pod, fixedNode *v1.Node, weight float64) { match, err := priorityutil.PodMatchesTermsNamespaceAndSelector(podToCheck, podDefiningAffinityTerm, term) if err != nil { p.setError(err) @@ -102,7 +102,7 @@ func (p *podAffinityPriorityMap) processTerm(term *api.PodAffinityTerm, podDefin } } -func (p *podAffinityPriorityMap) processTerms(terms []api.WeightedPodAffinityTerm, podDefiningAffinityTerm, podToCheck *api.Pod, fixedNode *api.Node, multiplier int) { +func (p *podAffinityPriorityMap) processTerms(terms []v1.WeightedPodAffinityTerm, podDefiningAffinityTerm, podToCheck *v1.Pod, fixedNode *v1.Node, multiplier int) { for i := range terms { term := &terms[i] p.processTerm(&term.PodAffinityTerm, podDefiningAffinityTerm, podToCheck, fixedNode, float64(term.Weight*int32(multiplier))) @@ -114,8 +114,8 @@ func (p *podAffinityPriorityMap) processTerms(terms []api.WeightedPodAffinityTer // that node; the node(s) with the highest sum are the most preferred. // Symmetry need to be considered for preferredDuringSchedulingIgnoredDuringExecution from podAffinity & podAntiAffinity, // symmetry need to be considered for hard requirements from podAffinity -func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) { - affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations) +func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { + affinity, err := v1.GetAffinityFromPodAnnotations(pod.Annotations) if err != nil { return nil, err } @@ -134,12 +134,12 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *api.Pod, nod // the node. pm := newPodAffinityPriorityMap(nodes, ipa.failureDomains) - processPod := func(existingPod *api.Pod) error { + processPod := func(existingPod *v1.Pod) error { existingPodNode, err := ipa.info.GetNodeInfo(existingPod.Spec.NodeName) if err != nil { return err } - existingPodAffinity, err := api.GetAffinityFromPodAnnotations(existingPod.Annotations) + existingPodAffinity, err := v1.GetAffinityFromPodAnnotations(existingPod.Annotations) if err != nil { return err } diff --git a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go index 98ba9ffd013..1e4dd8586e0 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go @@ -22,17 +22,17 @@ import ( "strings" "testing" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) -type FakeNodeListInfo []*api.Node +type FakeNodeListInfo []*v1.Node -func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*api.Node, error) { +func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*v1.Node, error) { for _, node := range nodes { if node.Name == nodeName { return node, nil @@ -66,7 +66,7 @@ func TestInterPodAffinityPriority(t *testing.T) { } // considered only preferredDuringSchedulingIgnoredDuringExecution in pod affinity stayWithS1InRegion := map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAffinity": { "preferredDuringSchedulingIgnoredDuringExecution": [{ "weight": 5, @@ -85,7 +85,7 @@ func TestInterPodAffinityPriority(t *testing.T) { }}`, } stayWithS2InRegion := map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAffinity": { "preferredDuringSchedulingIgnoredDuringExecution": [{ "weight": 6, @@ -104,7 +104,7 @@ func TestInterPodAffinityPriority(t *testing.T) { }}`, } affinity3 := map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAffinity": { "preferredDuringSchedulingIgnoredDuringExecution": [ { @@ -144,7 +144,7 @@ func TestInterPodAffinityPriority(t *testing.T) { }}`, } hardAffinity := map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [ { @@ -174,7 +174,7 @@ func TestInterPodAffinityPriority(t *testing.T) { }}`, } awayFromS1InAz := map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAntiAffinity": { "preferredDuringSchedulingIgnoredDuringExecution": [{ "weight": 5, @@ -194,7 +194,7 @@ func TestInterPodAffinityPriority(t *testing.T) { } // to stay away from security S2 in any az. awayFromS2InAz := map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAntiAffinity": { "preferredDuringSchedulingIgnoredDuringExecution": [{ "weight": 5, @@ -214,7 +214,7 @@ func TestInterPodAffinityPriority(t *testing.T) { } // to stay with security S1 in same region, stay away from security S2 in any az. stayWithS1InRegionAwayFromS2InAz := map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAffinity": { "preferredDuringSchedulingIgnoredDuringExecution": [{ "weight": 8, @@ -250,18 +250,18 @@ func TestInterPodAffinityPriority(t *testing.T) { } tests := []struct { - pod *api.Pod - pods []*api.Pod - nodes []*api.Node + pod *v1.Pod + pods []*v1.Pod + nodes []*v1.Node expectedList schedulerapi.HostPriorityList test string }{ { - pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: map[string]string{}}}, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: map[string]string{}}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, test: "all machines are same priority as Affinity is nil", @@ -270,16 +270,16 @@ func TestInterPodAffinityPriority(t *testing.T) { // the node(machine3) that don't have the label {"region": "whatever the value is"} (mismatch the topology key) but that have existing pods that match the labelSelector get low score // the node(machine2) that have the label {"region": "China"} (match the topology key) but that have existing pods that mismatch the labelSelector get low score { - pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}}, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, - {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, - {Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, test: "Affinity: pod that matches topology key & pods in nodes will get high score comparing to others" + @@ -290,14 +290,14 @@ func TestInterPodAffinityPriority(t *testing.T) { // the node3(machine3) that have the label {"region": "India"}, match the topology key but have a different label value, don't have existing pods that match the labelSelector, // get a low score. { - pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Annotations: stayWithS1InRegion}}, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Annotations: stayWithS1InRegion}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, test: "All the nodes that have the same topology key & label value with one of them has an existing pod that match the affinity rules, have the same score", @@ -307,37 +307,37 @@ func TestInterPodAffinityPriority(t *testing.T) { // Then, nodes in regionChina get higher score than nodes in regionIndia, and all the nodes in regionChina should get a same score(high score), // while all the nodes in regionIndia should get another same score(low score). { - pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS2InRegion}}, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, - {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, - {Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, - {Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, - {Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS2InRegion}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: v1.PodSpec{NodeName: "machine5"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgChina}}, - {ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labelRgChina}}, - {ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: labelRgIndia}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelRgChina}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine4", Labels: labelRgChina}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 5}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 5}}, test: "Affinity: nodes in one region has more matching pods comparing to other reqion, so the region which has more macthes will get high score", }, // Test with the different operators and values for pod affinity scheduling preference, including some match failures. { - pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: affinity3}}, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, - {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, - {Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: affinity3}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 2}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, test: "Affinity: different Label operators and values for pod affinity scheduling preference, including some match failures ", @@ -345,29 +345,29 @@ func TestInterPodAffinityPriority(t *testing.T) { // Test the symmetry cases for affinity, the difference between affinity and symmetry is not the pod wants to run together with some existing pods, // but the existing pods have the inter pod affinity preference while the pod to schedule satisfy the preference. { - pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}}, - {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: stayWithS2InRegion}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegion}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2, Annotations: stayWithS2InRegion}}, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, test: "Affinity symmetry: considred only the preferredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry", }, { - pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: hardAffinity}}, - {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: hardAffinity}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: hardAffinity}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2, Annotations: hardAffinity}}, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, test: "Affinity symmetry: considred RequiredDuringSchedulingIgnoredDuringExecution in pod affinity symmetry", @@ -380,69 +380,69 @@ func TestInterPodAffinityPriority(t *testing.T) { // there are 2 nodes, say node1 and node2, both nodes have pods that match the labelSelector and have topology-key in node.Labels. // But there are more pods on node1 that match the preference than node2. Then, node1 get a lower score than node2. { - pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}}, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, - {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChina}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgChina}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}}, test: "Anti Affinity: pod that doesnot match existing pods in node will get high score ", }, { - pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}}, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, - {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgChina}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgChina}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}}, test: "Anti Affinity: pod that does not matches topology key & matches the pods in nodes will get higher score comparing to others ", }, { - pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}}, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, - {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS1InAz}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}}, test: "Anti Affinity: one node has more matching pods comparing to other node, so the node which has more unmacthes will get high score", }, // Test the symmetry cases for anti affinity { - pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS2InAz}}, - {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2, Annotations: awayFromS1InAz}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: awayFromS2InAz}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2, Annotations: awayFromS1InAz}}, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz2}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelAzAz1}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAz2}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}}, test: "Anti Affinity symmetry: the existing pods in node which has anti affinity match will get high score", }, // Test both affinity and anti-affinity { - pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}}, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, - {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz1}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}}, test: "Affinity and Anti Affinity: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity", @@ -452,22 +452,22 @@ func TestInterPodAffinityPriority(t *testing.T) { // so that all the pods of a RC/service can stay in a same region but trying to separate with each other // machine-1,machine-3,machine-4 are in ChinaRegion others machin-2,machine-5 are in IndiaRegion { - pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}}, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, - {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, - {Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, - {Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, - {Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, - {Spec: api.PodSpec{NodeName: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine5"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChinaAzAz1}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgChina}}, - {ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labelRgChina}}, - {ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: labelRgIndia}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChinaAzAz1}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelRgChina}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine4", Labels: labelRgChina}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine5", Labels: labelRgIndia}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 4}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 10}, {Host: "machine5", Score: 4}}, test: "Affinity and Anti Affinity: considering both affinity and anti-affinity, the pod to schedule and existing pods have the same labels", @@ -478,18 +478,18 @@ func TestInterPodAffinityPriority(t *testing.T) { // for Affinity symmetry, the weights are: 0, 0, 8, 0 // for Anti Affinity symmetry, the weights are: 0, 0, 0, -5 { - pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}}, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS1}}, - {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabelSecurityS2}}, - {Spec: api.PodSpec{NodeName: "machine3"}, ObjectMeta: api.ObjectMeta{Annotations: stayWithS1InRegionAwayFromS2InAz}}, - {Spec: api.PodSpec{NodeName: "machine4"}, ObjectMeta: api.ObjectMeta{Annotations: awayFromS1InAz}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1, Annotations: stayWithS1InRegionAwayFromS2InAz}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabelSecurityS2}}, + {Spec: v1.PodSpec{NodeName: "machine3"}, ObjectMeta: v1.ObjectMeta{Annotations: stayWithS1InRegionAwayFromS2InAz}}, + {Spec: v1.PodSpec{NodeName: "machine4"}, ObjectMeta: v1.ObjectMeta{Annotations: awayFromS1InAz}}, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAz1}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelRgIndia}}, - {ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labelAzAz2}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAz1}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine4", Labels: labelAzAz2}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 10}, {Host: "machine4", Score: 0}}, test: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry", @@ -501,8 +501,8 @@ func TestInterPodAffinityPriority(t *testing.T) { info: FakeNodeListInfo(test.nodes), nodeLister: algorithm.FakeNodeLister(test.nodes), podLister: algorithm.FakePodLister(test.pods), - hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight, - failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")}, + hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight, + failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(v1.DefaultFailureDomains, ",")}, } list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes) if err != nil { @@ -528,7 +528,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) { "az": "az1", } hardPodAffinity := map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAffinity": { "requiredDuringSchedulingIgnoredDuringExecution": [ { @@ -546,38 +546,38 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) { }}`, } tests := []struct { - pod *api.Pod - pods []*api.Pod - nodes []*api.Node + pod *v1.Pod + pods []*v1.Pod + nodes []*v1.Node hardPodAffinityWeight int expectedList schedulerapi.HostPriorityList test string }{ { - pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelServiceS1}}, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}}, - {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelServiceS1}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Annotations: hardPodAffinity}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Annotations: hardPodAffinity}}, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, - hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight, + hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 0}}, test: "Hard Pod Affinity symmetry: hard pod affinity symmetry weights 1 by default, then nodes that match the hard pod affinity symmetry rules, get a high score", }, { - pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabelServiceS1}}, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}}, - {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Annotations: hardPodAffinity}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabelServiceS1}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Annotations: hardPodAffinity}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Annotations: hardPodAffinity}}, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelRgIndia}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: labelAzAz1}}, }, hardPodAffinityWeight: 0, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, @@ -613,7 +613,7 @@ func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) { "security": "S1", } antiAffinity1 := map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"podAntiAffinity": { "preferredDuringSchedulingIgnoredDuringExecution": [{ "weight": 5, @@ -632,36 +632,36 @@ func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) { }}`, } tests := []struct { - pod *api.Pod - pods []*api.Pod - nodes []*api.Node + pod *v1.Pod + pods []*v1.Pod + nodes []*v1.Node failureDomains priorityutil.Topologies expectedList schedulerapi.HostPriorityList test string }{ { - pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabel1, Annotations: antiAffinity1}}, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}}, - {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1, Annotations: antiAffinity1}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1}}, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}}, }, - failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(api.DefaultFailureDomains, ",")}, + failureDomains: priorityutil.Topologies{DefaultKeys: strings.Split(v1.DefaultFailureDomains, ",")}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}}, test: "Soft Pod Anti Affinity: when the topologyKey is emtpy, match among topologyKeys indicated by failure domains.", }, { - pod: &api.Pod{Spec: api.PodSpec{NodeName: ""}, ObjectMeta: api.ObjectMeta{Labels: podLabel1, Annotations: antiAffinity1}}, - pods: []*api.Pod{ - {Spec: api.PodSpec{NodeName: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}}, - {Spec: api.PodSpec{NodeName: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: podLabel1}}, + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1, Annotations: antiAffinity1}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1}}, + {Spec: v1.PodSpec{NodeName: "machine2"}, ObjectMeta: v1.ObjectMeta{Labels: podLabel1}}, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: LabelZoneFailureDomainAZ1}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: labelAzAZ1}}, }, failureDomains: priorityutil.Topologies{}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}}, @@ -674,7 +674,7 @@ func TestSoftPodAntiAffinityWithFailureDomains(t *testing.T) { info: FakeNodeListInfo(test.nodes), nodeLister: algorithm.FakeNodeLister(test.nodes), podLister: algorithm.FakePodLister(test.pods), - hardPodAffinityWeight: api.DefaultHardPodAffinitySymmetricWeight, + hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight, failureDomains: test.failureDomains, } list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes) diff --git a/plugin/pkg/scheduler/algorithm/priorities/least_requested.go b/plugin/pkg/scheduler/algorithm/priorities/least_requested.go index 4e8b4289799..5e0f7bcaed1 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/least_requested.go +++ b/plugin/pkg/scheduler/algorithm/priorities/least_requested.go @@ -19,7 +19,7 @@ package priorities import ( "fmt" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" @@ -30,7 +30,7 @@ import ( // It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes // based on the minimum of the average of the fraction of requested to capacity. // Details: cpu((capacity - sum(requested)) * 10 / capacity) + memory((capacity - sum(requested)) * 10 / capacity) / 2 -func LeastRequestedPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func LeastRequestedPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { var nonZeroRequest *schedulercache.Resource if priorityMeta, ok := meta.(*priorityMetadata); ok { nonZeroRequest = priorityMeta.nonZeroRequest @@ -59,7 +59,7 @@ func calculateUnusedScore(requested int64, capacity int64, node string) int64 { // Calculates host priority based on the amount of unused resources. // 'node' has information about the resources on the node. // 'pods' is a list of pods currently scheduled on the node. -func calculateUnusedPriority(pod *api.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func calculateUnusedPriority(pod *v1.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { node := nodeInfo.Node() if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") diff --git a/plugin/pkg/scheduler/algorithm/priorities/least_requested_test.go b/plugin/pkg/scheduler/algorithm/priorities/least_requested_test.go index 55ebdb8b94a..f47a76e65d0 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/least_requested_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/least_requested_test.go @@ -20,8 +20,8 @@ import ( "reflect" "testing" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) @@ -35,29 +35,29 @@ func TestLeastRequested(t *testing.T) { "bar": "foo", "baz": "blah", } - machine1Spec := api.PodSpec{ + machine1Spec := v1.PodSpec{ NodeName: "machine1", } - machine2Spec := api.PodSpec{ + machine2Spec := v1.PodSpec{ NodeName: "machine2", } - noResources := api.PodSpec{ - Containers: []api.Container{}, + noResources := v1.PodSpec{ + Containers: []v1.Container{}, } - cpuOnly := api.PodSpec{ + cpuOnly := v1.PodSpec{ NodeName: "machine1", - Containers: []api.Container{ + Containers: []v1.Container{ { - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ "cpu": resource.MustParse("1000m"), "memory": resource.MustParse("0"), }, }, }, { - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ "cpu": resource.MustParse("2000m"), "memory": resource.MustParse("0"), }, @@ -67,20 +67,20 @@ func TestLeastRequested(t *testing.T) { } cpuOnly2 := cpuOnly cpuOnly2.NodeName = "machine2" - cpuAndMemory := api.PodSpec{ + cpuAndMemory := v1.PodSpec{ NodeName: "machine2", - Containers: []api.Container{ + Containers: []v1.Container{ { - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ "cpu": resource.MustParse("1000m"), "memory": resource.MustParse("2000"), }, }, }, { - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ "cpu": resource.MustParse("2000m"), "memory": resource.MustParse("3000"), }, @@ -89,9 +89,9 @@ func TestLeastRequested(t *testing.T) { }, } tests := []struct { - pod *api.Pod - pods []*api.Pod - nodes []*api.Node + pod *v1.Pod + pods []*v1.Pod + nodes []*v1.Node expectedList schedulerapi.HostPriorityList test string }{ @@ -107,8 +107,8 @@ func TestLeastRequested(t *testing.T) { Memory Score: ((10000 - 0) *10) / 10000 = 10 Node2 Score: (10 + 10) / 2 = 10 */ - pod: &api.Pod{Spec: noResources}, - nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, + pod: &v1.Pod{Spec: noResources}, + nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}}, test: "nothing scheduled, nothing requested", }, @@ -124,8 +124,8 @@ func TestLeastRequested(t *testing.T) { Memory Score: ((10000 - 5000) *10) / 10000 = 5 Node2 Score: (5 + 5) / 2 = 5 */ - pod: &api.Pod{Spec: cpuAndMemory}, - nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, + pod: &v1.Pod{Spec: cpuAndMemory}, + nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 5}}, test: "nothing scheduled, resources requested, differently sized machines", }, @@ -141,15 +141,15 @@ func TestLeastRequested(t *testing.T) { Memory Score: ((10000 - 0) *10) / 10000 = 10 Node2 Score: (10 + 10) / 2 = 10 */ - pod: &api.Pod{Spec: noResources}, - nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, + pod: &v1.Pod{Spec: noResources}, + nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}}, test: "no resources requested, pods scheduled", - pods: []*api.Pod{ - {Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: machine1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: machine2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: machine1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: machine1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: machine2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: machine2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, }, }, { @@ -164,15 +164,15 @@ func TestLeastRequested(t *testing.T) { Memory Score: ((20000 - 5000) *10) / 20000 = 7.5 Node2 Score: (4 + 7.5) / 2 = 5 */ - pod: &api.Pod{Spec: noResources}, - nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, + pod: &v1.Pod{Spec: noResources}, + nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 7}, {Host: "machine2", Score: 5}}, test: "no resources requested, pods scheduled with resources", - pods: []*api.Pod{ - {Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: cpuAndMemory, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: cpuOnly, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: cpuOnly, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: cpuOnly2, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: cpuAndMemory, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, }, }, { @@ -187,11 +187,11 @@ func TestLeastRequested(t *testing.T) { Memory Score: ((20000 - 10000) *10) / 20000 = 5 Node2 Score: (4 + 5) / 2 = 4 */ - pod: &api.Pod{Spec: cpuAndMemory}, - nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, + pod: &v1.Pod{Spec: cpuAndMemory}, + nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 4}}, test: "resources requested, pods scheduled with resources", - pods: []*api.Pod{ + pods: []*v1.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, @@ -208,11 +208,11 @@ func TestLeastRequested(t *testing.T) { Memory Score: ((50000 - 10000) *10) / 50000 = 8 Node2 Score: (4 + 8) / 2 = 6 */ - pod: &api.Pod{Spec: cpuAndMemory}, - nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)}, + pod: &v1.Pod{Spec: cpuAndMemory}, + nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 50000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 6}}, test: "resources requested, pods scheduled with resources, differently sized machines", - pods: []*api.Pod{ + pods: []*v1.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, @@ -229,21 +229,21 @@ func TestLeastRequested(t *testing.T) { Memory Score: ((10000 - 5000) *10) / 10000 = 5 Node2 Score: (0 + 5) / 2 = 2 */ - pod: &api.Pod{Spec: cpuOnly}, - nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, + pod: &v1.Pod{Spec: cpuOnly}, + nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 2}}, test: "requested resources exceed node capacity", - pods: []*api.Pod{ + pods: []*v1.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, }, { - pod: &api.Pod{Spec: noResources}, - nodes: []*api.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)}, + pod: &v1.Pod{Spec: noResources}, + nodes: []*v1.Node{makeNode("machine1", 0, 0), makeNode("machine2", 0, 0)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}}, test: "zero node resources, pods scheduled with resources", - pods: []*api.Pod{ + pods: []*v1.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, diff --git a/plugin/pkg/scheduler/algorithm/priorities/metadata.go b/plugin/pkg/scheduler/algorithm/priorities/metadata.go index d41b34bfde4..1a83826547d 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/metadata.go +++ b/plugin/pkg/scheduler/algorithm/priorities/metadata.go @@ -17,19 +17,19 @@ limitations under the License. package priorities import ( - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) // priorityMetadata is a type that is passed as metadata for priority functions type priorityMetadata struct { nonZeroRequest *schedulercache.Resource - podTolerations []api.Toleration - affinity *api.Affinity + podTolerations []v1.Toleration + affinity *v1.Affinity } // PriorityMetadata is a MetadataProducer. Node info can be nil. -func PriorityMetadata(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} { +func PriorityMetadata(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} { // If we cannot compute metadata, just return nil if pod == nil { return nil @@ -38,7 +38,7 @@ func PriorityMetadata(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.No if err != nil { return nil } - affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations) + affinity, err := v1.GetAffinityFromPodAnnotations(pod.Annotations) if err != nil { return nil } diff --git a/plugin/pkg/scheduler/algorithm/priorities/most_requested.go b/plugin/pkg/scheduler/algorithm/priorities/most_requested.go index 426cb6ca449..1492dc57f8a 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/most_requested.go +++ b/plugin/pkg/scheduler/algorithm/priorities/most_requested.go @@ -19,7 +19,7 @@ package priorities import ( "fmt" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" @@ -30,7 +30,7 @@ import ( // It calculates the percentage of memory and CPU requested by pods scheduled on the node, and prioritizes // based on the maximum of the average of the fraction of requested to capacity. // Details: (cpu(10 * sum(requested) / capacity) + memory(10 * sum(requested) / capacity)) / 2 -func MostRequestedPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func MostRequestedPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { var nonZeroRequest *schedulercache.Resource if priorityMeta, ok := meta.(*priorityMetadata); ok { nonZeroRequest = priorityMeta.nonZeroRequest @@ -62,7 +62,7 @@ func calculateUsedScore(requested int64, capacity int64, node string) int64 { // Calculate the resource used on a node. 'node' has information about the resources on the node. // 'pods' is a list of pods currently scheduled on the node. -func calculateUsedPriority(pod *api.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func calculateUsedPriority(pod *v1.Pod, podRequests *schedulercache.Resource, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { node := nodeInfo.Node() if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") diff --git a/plugin/pkg/scheduler/algorithm/priorities/most_requested_test.go b/plugin/pkg/scheduler/algorithm/priorities/most_requested_test.go index a11aaf5e3c5..fef9748a4a6 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/most_requested_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/most_requested_test.go @@ -20,8 +20,8 @@ import ( "reflect" "testing" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) @@ -35,23 +35,23 @@ func TestMostRequested(t *testing.T) { "bar": "foo", "baz": "blah", } - noResources := api.PodSpec{ - Containers: []api.Container{}, + noResources := v1.PodSpec{ + Containers: []v1.Container{}, } - cpuOnly := api.PodSpec{ + cpuOnly := v1.PodSpec{ NodeName: "machine1", - Containers: []api.Container{ + Containers: []v1.Container{ { - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ "cpu": resource.MustParse("1000m"), "memory": resource.MustParse("0"), }, }, }, { - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ "cpu": resource.MustParse("2000m"), "memory": resource.MustParse("0"), }, @@ -61,20 +61,20 @@ func TestMostRequested(t *testing.T) { } cpuOnly2 := cpuOnly cpuOnly2.NodeName = "machine2" - cpuAndMemory := api.PodSpec{ + cpuAndMemory := v1.PodSpec{ NodeName: "machine2", - Containers: []api.Container{ + Containers: []v1.Container{ { - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ "cpu": resource.MustParse("1000m"), "memory": resource.MustParse("2000"), }, }, }, { - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ "cpu": resource.MustParse("2000m"), "memory": resource.MustParse("3000"), }, @@ -83,9 +83,9 @@ func TestMostRequested(t *testing.T) { }, } tests := []struct { - pod *api.Pod - pods []*api.Pod - nodes []*api.Node + pod *v1.Pod + pods []*v1.Pod + nodes []*v1.Node expectedList schedulerapi.HostPriorityList test string }{ @@ -101,8 +101,8 @@ func TestMostRequested(t *testing.T) { Memory Score: (0 * 10 / 10000 = 0 Node2 Score: (0 + 0) / 2 = 0 */ - pod: &api.Pod{Spec: noResources}, - nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, + pod: &v1.Pod{Spec: noResources}, + nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 4000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}}, test: "nothing scheduled, nothing requested", }, @@ -118,8 +118,8 @@ func TestMostRequested(t *testing.T) { Memory Score: (5000 * 10 / 10000 = 5 Node2 Score: (5 + 5) / 2 = 5 */ - pod: &api.Pod{Spec: cpuAndMemory}, - nodes: []*api.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, + pod: &v1.Pod{Spec: cpuAndMemory}, + nodes: []*v1.Node{makeNode("machine1", 4000, 10000), makeNode("machine2", 6000, 10000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 6}, {Host: "machine2", Score: 5}}, test: "nothing scheduled, resources requested, differently sized machines", }, @@ -135,15 +135,15 @@ func TestMostRequested(t *testing.T) { Memory Score: (5000 * 10) / 20000 = 2.5 Node2 Score: (6 + 2.5) / 2 = 4 */ - pod: &api.Pod{Spec: noResources}, - nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, + pod: &v1.Pod{Spec: noResources}, + nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 3}, {Host: "machine2", Score: 4}}, test: "no resources requested, pods scheduled with resources", - pods: []*api.Pod{ - {Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: cpuOnly, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: cpuOnly2, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: cpuAndMemory, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: cpuOnly, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: cpuOnly, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: cpuOnly2, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: cpuAndMemory, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, }, }, { @@ -158,11 +158,11 @@ func TestMostRequested(t *testing.T) { Memory Score: (10000 * 10) / 20000 = 5 Node2 Score: (6 + 5) / 2 = 5 */ - pod: &api.Pod{Spec: cpuAndMemory}, - nodes: []*api.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, + pod: &v1.Pod{Spec: cpuAndMemory}, + nodes: []*v1.Node{makeNode("machine1", 10000, 20000), makeNode("machine2", 10000, 20000)}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 4}, {Host: "machine2", Score: 5}}, test: "resources requested, pods scheduled with resources", - pods: []*api.Pod{ + pods: []*v1.Pod{ {Spec: cpuOnly}, {Spec: cpuAndMemory}, }, diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go b/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go index 861f9e7ca0b..4c9c151d752 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go +++ b/plugin/pkg/scheduler/algorithm/priorities/node_affinity.go @@ -20,7 +20,7 @@ import ( "fmt" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/labels" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" @@ -31,19 +31,19 @@ import ( // it will a get an add of preferredSchedulingTerm.Weight. Thus, the more preferredSchedulingTerms // the node satisfies and the more the preferredSchedulingTerm that is satisfied weights, the higher // score the node gets. -func CalculateNodeAffinityPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func CalculateNodeAffinityPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { node := nodeInfo.Node() if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") } - var affinity *api.Affinity + var affinity *v1.Affinity if priorityMeta, ok := meta.(*priorityMetadata); ok { affinity = priorityMeta.affinity } else { // We couldn't parse metadata - fallback to computing it. var err error - affinity, err = api.GetAffinityFromPodAnnotations(pod.Annotations) + affinity, err = v1.GetAffinityFromPodAnnotations(pod.Annotations) if err != nil { return schedulerapi.HostPriority{}, err } @@ -62,7 +62,7 @@ func CalculateNodeAffinityPriorityMap(pod *api.Pod, meta interface{}, nodeInfo * } // TODO: Avoid computing it for all nodes if this becomes a performance problem. - nodeSelector, err := api.NodeSelectorRequirementsAsSelector(preferredSchedulingTerm.Preference.MatchExpressions) + nodeSelector, err := v1.NodeSelectorRequirementsAsSelector(preferredSchedulingTerm.Preference.MatchExpressions) if err != nil { return schedulerapi.HostPriority{}, err } @@ -78,7 +78,7 @@ func CalculateNodeAffinityPriorityMap(pod *api.Pod, meta interface{}, nodeInfo * }, nil } -func CalculateNodeAffinityPriorityReduce(pod *api.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error { +func CalculateNodeAffinityPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error { var maxCount int for i := range result { if result[i].Score > maxCount { diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_affinity_test.go b/plugin/pkg/scheduler/algorithm/priorities/node_affinity_test.go index 85c390c903e..64335f37b57 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_affinity_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/node_affinity_test.go @@ -20,7 +20,7 @@ import ( "reflect" "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) @@ -33,7 +33,7 @@ func TestNodeAffinityPriority(t *testing.T) { label5 := map[string]string{"foo": "bar", "key": "value", "az": "az1"} affinity1 := map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"nodeAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [ { "weight": 2, @@ -50,7 +50,7 @@ func TestNodeAffinityPriority(t *testing.T) { } affinity2 := map[string]string{ - api.AffinityAnnotationKey: ` + v1.AffinityAnnotationKey: ` {"nodeAffinity": {"preferredDuringSchedulingIgnoredDuringExecution": [ { "weight": 2, @@ -91,63 +91,63 @@ func TestNodeAffinityPriority(t *testing.T) { } tests := []struct { - pod *api.Pod - nodes []*api.Node + pod *v1.Pod + nodes []*v1.Node expectedList schedulerapi.HostPriorityList test string }{ { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{}, }, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, test: "all machines are same priority as NodeAffinity is nil", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: affinity1, }, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label4}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label4}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, test: "no machine macthes preferred scheduling requirements in NodeAffinity of pod so all machines' priority is zero", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: affinity1, }, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, test: "only machine1 matches the preferred scheduling requirements of pod", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: affinity2, }, }, - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}}, - {ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: label5}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine5", Labels: label5}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 1}, {Host: "machine5", Score: 10}, {Host: "machine2", Score: 3}}, test: "all machines matches the preferred scheduling requirements of pod but with different priorities ", diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_label.go b/plugin/pkg/scheduler/algorithm/priorities/node_label.go index ed177e02e4b..d22c29b505c 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_label.go +++ b/plugin/pkg/scheduler/algorithm/priorities/node_label.go @@ -19,7 +19,7 @@ package priorities import ( "fmt" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" @@ -42,7 +42,7 @@ func NewNodeLabelPriority(label string, presence bool) (algorithm.PriorityMapFun // CalculateNodeLabelPriority checks whether a particular label exists on a node or not, regardless of its value. // If presence is true, prioritizes nodes that have the specified label, regardless of value. // If presence is false, prioritizes nodes that do not have the specified label. -func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func (n *NodeLabelPrioritizer) CalculateNodeLabelPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { node := nodeInfo.Node() if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_label_test.go b/plugin/pkg/scheduler/algorithm/priorities/node_label_test.go index 826ceb3f818..58913ae4e0a 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_label_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/node_label_test.go @@ -21,7 +21,7 @@ import ( "sort" "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) @@ -31,17 +31,17 @@ func TestNewNodeLabelPriority(t *testing.T) { label2 := map[string]string{"bar": "foo"} label3 := map[string]string{"bar": "baz"} tests := []struct { - nodes []*api.Node + nodes []*v1.Node label string presence bool expectedList schedulerapi.HostPriorityList test string }{ { - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, label: "baz", @@ -49,10 +49,10 @@ func TestNewNodeLabelPriority(t *testing.T) { test: "no match found, presence true", }, { - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}}, label: "baz", @@ -60,10 +60,10 @@ func TestNewNodeLabelPriority(t *testing.T) { test: "no match found, presence false", }, { - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, label: "foo", @@ -71,10 +71,10 @@ func TestNewNodeLabelPriority(t *testing.T) { test: "one match found, presence true", }, { - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}}, label: "foo", @@ -82,10 +82,10 @@ func TestNewNodeLabelPriority(t *testing.T) { test: "one match found, presence false", }, { - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 10}, {Host: "machine3", Score: 10}}, label: "bar", @@ -93,10 +93,10 @@ func TestNewNodeLabelPriority(t *testing.T) { test: "two matches found, presence true", }, { - nodes: []*api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}}, - {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}}, - {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}}, + nodes: []*v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "machine1", Labels: label1}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine2", Labels: label2}}, + {ObjectMeta: v1.ObjectMeta{Name: "machine3", Labels: label3}}, }, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: 0}}, label: "bar", diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go b/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go index d58890e75dd..e65c00d7552 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go +++ b/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods.go @@ -19,13 +19,13 @@ package priorities import ( "fmt" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) -func CalculateNodePreferAvoidPodsPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func CalculateNodePreferAvoidPodsPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { node := nodeInfo.Node() if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") @@ -43,7 +43,7 @@ func CalculateNodePreferAvoidPodsPriorityMap(pod *api.Pod, meta interface{}, nod return schedulerapi.HostPriority{Host: node.Name, Score: 10}, nil } - avoids, err := api.GetAvoidPodsFromNodeAnnotations(node.Annotations) + avoids, err := v1.GetAvoidPodsFromNodeAnnotations(node.Annotations) if err != nil { // If we cannot get annotation, assume it's schedulable there. return schedulerapi.HostPriority{Host: node.Name, Score: 10}, nil diff --git a/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go b/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go index 66d00249c76..cdac523984e 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/node_prefer_avoid_pods_test.go @@ -21,14 +21,14 @@ import ( "sort" "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) func TestNodePreferAvoidPriority(t *testing.T) { annotations1 := map[string]string{ - api.PreferAvoidPodsAnnotationKey: ` + v1.PreferAvoidPodsAnnotationKey: ` { "preferAvoidPods": [ { @@ -48,7 +48,7 @@ func TestNodePreferAvoidPriority(t *testing.T) { }`, } annotations2 := map[string]string{ - api.PreferAvoidPodsAnnotationKey: ` + v1.PreferAvoidPodsAnnotationKey: ` { "preferAvoidPods": [ { @@ -67,29 +67,29 @@ func TestNodePreferAvoidPriority(t *testing.T) { ] }`, } - testNodes := []*api.Node{ + testNodes := []*v1.Node{ { - ObjectMeta: api.ObjectMeta{Name: "machine1", Annotations: annotations1}, + ObjectMeta: v1.ObjectMeta{Name: "machine1", Annotations: annotations1}, }, { - ObjectMeta: api.ObjectMeta{Name: "machine2", Annotations: annotations2}, + ObjectMeta: v1.ObjectMeta{Name: "machine2", Annotations: annotations2}, }, { - ObjectMeta: api.ObjectMeta{Name: "machine3"}, + ObjectMeta: v1.ObjectMeta{Name: "machine3"}, }, } trueVar := true tests := []struct { - pod *api.Pod - nodes []*api.Node + pod *v1.Pod + nodes []*v1.Node expectedList schedulerapi.HostPriorityList test string }{ { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Namespace: "default", - OwnerReferences: []api.OwnerReference{ + OwnerReferences: []v1.OwnerReference{ {Kind: "ReplicationController", Name: "foo", UID: "abcdef123456", Controller: &trueVar}, }, }, @@ -99,10 +99,10 @@ func TestNodePreferAvoidPriority(t *testing.T) { test: "pod managed by ReplicationController should avoid a node, this node get lowest priority score", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Namespace: "default", - OwnerReferences: []api.OwnerReference{ + OwnerReferences: []v1.OwnerReference{ {Kind: "RandomController", Name: "foo", UID: "abcdef123456", Controller: &trueVar}, }, }, @@ -112,10 +112,10 @@ func TestNodePreferAvoidPriority(t *testing.T) { test: "ownership by random controller should be ignored", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Namespace: "default", - OwnerReferences: []api.OwnerReference{ + OwnerReferences: []v1.OwnerReference{ {Kind: "ReplicationController", Name: "foo", UID: "abcdef123456"}, }, }, @@ -125,10 +125,10 @@ func TestNodePreferAvoidPriority(t *testing.T) { test: "owner without Controller field set should be ignored", }, { - pod: &api.Pod{ - ObjectMeta: api.ObjectMeta{ + pod: &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Namespace: "default", - OwnerReferences: []api.OwnerReference{ + OwnerReferences: []v1.OwnerReference{ {Kind: "ReplicaSet", Name: "foo", UID: "qwert12345", Controller: &trueVar}, }, }, diff --git a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go index da31aa6a541..20d5f4a9ee4 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go +++ b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading.go @@ -20,8 +20,8 @@ import ( "sync" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/labels" utilnode "k8s.io/kubernetes/pkg/util/node" "k8s.io/kubernetes/pkg/util/workqueue" @@ -57,7 +57,7 @@ func NewSelectorSpreadPriority( } // Returns selectors of services, RCs and RSs matching the given pod. -func getSelectors(pod *api.Pod, sl algorithm.ServiceLister, cl algorithm.ControllerLister, rsl algorithm.ReplicaSetLister) []labels.Selector { +func getSelectors(pod *v1.Pod, sl algorithm.ServiceLister, cl algorithm.ControllerLister, rsl algorithm.ReplicaSetLister) []labels.Selector { selectors := make([]labels.Selector, 0, 3) if services, err := sl.GetPodServices(pod); err == nil { for _, service := range services { @@ -79,7 +79,7 @@ func getSelectors(pod *api.Pod, sl algorithm.ServiceLister, cl algorithm.Control return selectors } -func (s *SelectorSpread) getSelectors(pod *api.Pod) []labels.Selector { +func (s *SelectorSpread) getSelectors(pod *v1.Pod) []labels.Selector { return getSelectors(pod, s.serviceLister, s.controllerLister, s.replicaSetLister) } @@ -89,7 +89,7 @@ func (s *SelectorSpread) getSelectors(pod *api.Pod) []labels.Selector { // i.e. it pushes the scheduler towards a node where there's the smallest number of // pods which match the same service, RC or RS selectors as the pod being scheduled. // Where zone information is included on the nodes, it favors nodes in zones with fewer existing matching pods. -func (s *SelectorSpread) CalculateSpreadPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) { +func (s *SelectorSpread) CalculateSpreadPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { selectors := s.getSelectors(pod) // Count similar pods by node @@ -199,8 +199,8 @@ func NewServiceAntiAffinityPriority(podLister algorithm.PodLister, serviceLister // CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service // on machines with the same value for a particular label. // The label to be considered is provided to the struct (ServiceAntiAffinity). -func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) { - var nsServicePods []*api.Pod +func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { + var nsServicePods []*v1.Pod if services, err := s.serviceLister.GetPodServices(pod); err == nil && len(services) > 0 { // just use the first service and get the other pods within the service // TODO: a separate predicate can be created that tries to handle all services for the pod diff --git a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go index 908398a5750..7d6ebe870ab 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/selector_spreading_test.go @@ -21,19 +21,19 @@ import ( "sort" "testing" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" - "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/api/v1" + extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) -func controllerRef(kind, name, uid string) []api.OwnerReference { +func controllerRef(kind, name, uid string) []v1.OwnerReference { // TODO: When ControllerRef will be implemented uncomment code below. return nil //trueVar := true - //return []api.OwnerReference{ + //return []v1.OwnerReference{ // {Kind: kind, Name: name, UID: types.UID(uid), Controller: &trueVar}, //} } @@ -47,208 +47,208 @@ func TestSelectorSpreadPriority(t *testing.T) { "bar": "foo", "baz": "blah", } - zone1Spec := api.PodSpec{ + zone1Spec := v1.PodSpec{ NodeName: "machine1", } - zone2Spec := api.PodSpec{ + zone2Spec := v1.PodSpec{ NodeName: "machine2", } tests := []struct { - pod *api.Pod - pods []*api.Pod + pod *v1.Pod + pods []*v1.Pod nodes []string - rcs []*api.ReplicationController + rcs []*v1.ReplicationController rss []*extensions.ReplicaSet - services []*api.Service + services []*v1.Service expectedList schedulerapi.HostPriorityList test string }{ { - pod: new(api.Pod), + pod: new(v1.Pod), nodes: []string{"machine1", "machine2"}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}}, test: "nothing scheduled", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []*api.Pod{{Spec: zone1Spec}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{{Spec: zone1Spec}}, nodes: []string{"machine1", "machine2"}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}}, test: "no services", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}}, nodes: []string{"machine1", "machine2"}, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 10}}, test: "different services", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []*api.Pod{ - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, }, nodes: []string{"machine1", "machine2"}, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}}, test: "two pods, one service pod", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []*api.Pod{ - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, }, nodes: []string{"machine1", "machine2"}, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}}, test: "five pods, one service pod in no namespace", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, - pods: []*api.Pod{ - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, }, nodes: []string{"machine1", "machine2"}, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: v1.ObjectMeta{Namespace: v1.NamespaceDefault}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}}, test: "four pods, one service pod in default namespace", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, - pods: []*api.Pod{ - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns2"}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: "ns2"}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, }, nodes: []string{"machine1", "machine2"}, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: "ns1"}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: v1.ObjectMeta{Namespace: "ns1"}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 10}, {Host: "machine2", Score: 0}}, test: "five pods, one service pod in specific namespace", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []*api.Pod{ - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, }, nodes: []string{"machine1", "machine2"}, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}}, test: "three pods, two service pods on different machines", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []*api.Pod{ - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, }, nodes: []string{"machine1", "machine2"}, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 5}, {Host: "machine2", Score: 0}}, test: "four pods, three service pods", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []*api.Pod{ - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, }, nodes: []string{"machine1", "machine2"}, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, test: "service with partial pod label matches", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, - pods: []*api.Pod{ - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, }, nodes: []string{"machine1", "machine2"}, - rcs: []*api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, + rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, // "baz=blah" matches both labels1 and labels2, and "foo=bar" matches only labels 1. This means that we assume that we want to // do spreading between all pods. The result should be exactly as above. expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, test: "service with partial pod label matches with service and replication controller", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, - pods: []*api.Pod{ - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, }, nodes: []string{"machine1", "machine2"}, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, // We use ReplicaSet, instead of ReplicationController. The result should be exactly as above. expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, test: "service with partial pod label matches with service and replica set", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, - pods: []*api.Pod{ - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, }, nodes: []string{"machine1", "machine2"}, - rcs: []*api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}}, + rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}}, // Taken together Service and Replication Controller should match all Pods, hence result should be equal to one above. expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, test: "disjoined service and replication controller should be treated equally", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, - pods: []*api.Pod{ - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: map[string]string{"foo": "bar", "bar": "foo"}, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, }, nodes: []string{"machine1", "machine2"}, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"bar": "foo"}}}}, rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, // We use ReplicaSet, instead of ReplicationController. The result should be exactly as above. expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, test: "disjoined service and replica set should be treated equally", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, - pods: []*api.Pod{ - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, }, nodes: []string{"machine1", "machine2"}, - rcs: []*api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}, + rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"foo": "bar"}}}}, // Both Nodes have one pod from the given RC, hence both get 0 score. expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 0}}, test: "Replication controller with partial pod label matches", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, - pods: []*api.Pod{ - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, }, nodes: []string{"machine1", "machine2"}, rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"foo": "bar"}}}}}, @@ -257,23 +257,23 @@ func TestSelectorSpreadPriority(t *testing.T) { test: "Replica set with partial pod label matches", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, - pods: []*api.Pod{ - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicationController", "name", "abc123")}}, }, nodes: []string{"machine1", "machine2"}, - rcs: []*api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}}, + rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: map[string]string{"baz": "blah"}}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: 0}, {Host: "machine2", Score: 5}}, test: "Another replication controller with partial pod label matches", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, - pods: []*api.Pod{ - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, OwnerReferences: controllerRef("ReplicaSet", "name", "abc123")}}, }, nodes: []string{"machine1", "machine2"}, rss: []*extensions.ReplicaSet{{Spec: extensions.ReplicaSetSpec{Selector: &unversioned.LabelSelector{MatchLabels: map[string]string{"baz": "blah"}}}}}, @@ -300,10 +300,10 @@ func TestSelectorSpreadPriority(t *testing.T) { } } -func buildPod(nodeName string, labels map[string]string, ownerRefs []api.OwnerReference) *api.Pod { - return &api.Pod{ - ObjectMeta: api.ObjectMeta{Labels: labels, OwnerReferences: ownerRefs}, - Spec: api.PodSpec{NodeName: nodeName}, +func buildPod(nodeName string, labels map[string]string, ownerRefs []v1.OwnerReference) *v1.Pod { + return &v1.Pod{ + ObjectMeta: v1.ObjectMeta{Labels: labels, OwnerReferences: ownerRefs}, + Spec: v1.PodSpec{NodeName: nodeName}, } } @@ -340,17 +340,17 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { } tests := []struct { - pod *api.Pod - pods []*api.Pod + pod *v1.Pod + pods []*v1.Pod nodes []string - rcs []*api.ReplicationController + rcs []*v1.ReplicationController rss []*extensions.ReplicaSet - services []*api.Service + services []*v1.Service expectedList schedulerapi.HostPriorityList test string }{ { - pod: new(api.Pod), + pod: new(v1.Pod), expectedList: []schedulerapi.HostPriority{ {Host: nodeMachine1Zone1, Score: 10}, {Host: nodeMachine1Zone2, Score: 10}, @@ -363,7 +363,7 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { }, { pod: buildPod("", labels1, nil), - pods: []*api.Pod{buildPod(nodeMachine1Zone1, nil, nil)}, + pods: []*v1.Pod{buildPod(nodeMachine1Zone1, nil, nil)}, expectedList: []schedulerapi.HostPriority{ {Host: nodeMachine1Zone1, Score: 10}, {Host: nodeMachine1Zone2, Score: 10}, @@ -376,8 +376,8 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { }, { pod: buildPod("", labels1, nil), - pods: []*api.Pod{buildPod(nodeMachine1Zone1, labels2, nil)}, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}}, + pods: []*v1.Pod{buildPod(nodeMachine1Zone1, labels2, nil)}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}}, expectedList: []schedulerapi.HostPriority{ {Host: nodeMachine1Zone1, Score: 10}, {Host: nodeMachine1Zone2, Score: 10}, @@ -390,11 +390,11 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { }, { pod: buildPod("", labels1, nil), - pods: []*api.Pod{ + pods: []*v1.Pod{ buildPod(nodeMachine1Zone1, labels2, nil), buildPod(nodeMachine1Zone2, labels1, nil), }, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{ {Host: nodeMachine1Zone1, Score: 10}, {Host: nodeMachine1Zone2, Score: 0}, // Already have pod on machine @@ -407,14 +407,14 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { }, { pod: buildPod("", labels1, nil), - pods: []*api.Pod{ + pods: []*v1.Pod{ buildPod(nodeMachine1Zone1, labels2, nil), buildPod(nodeMachine1Zone2, labels1, nil), buildPod(nodeMachine2Zone2, labels1, nil), buildPod(nodeMachine1Zone3, labels2, nil), buildPod(nodeMachine2Zone3, labels1, nil), }, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{ {Host: nodeMachine1Zone1, Score: 10}, {Host: nodeMachine1Zone2, Score: 0}, // Pod on node @@ -427,13 +427,13 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { }, { pod: buildPod("", labels1, nil), - pods: []*api.Pod{ + pods: []*v1.Pod{ buildPod(nodeMachine1Zone1, labels1, nil), buildPod(nodeMachine1Zone2, labels1, nil), buildPod(nodeMachine2Zone2, labels2, nil), buildPod(nodeMachine1Zone3, labels1, nil), }, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{ {Host: nodeMachine1Zone1, Score: 0}, // Pod on node {Host: nodeMachine1Zone2, Score: 0}, // Pod on node @@ -446,13 +446,13 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { }, { pod: buildPod("", labels1, nil), - pods: []*api.Pod{ + pods: []*v1.Pod{ buildPod(nodeMachine1Zone1, labels1, nil), buildPod(nodeMachine1Zone2, labels1, nil), buildPod(nodeMachine1Zone3, labels1, nil), buildPod(nodeMachine2Zone2, labels2, nil), }, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{ {Host: nodeMachine1Zone1, Score: 0}, // Pod on node {Host: nodeMachine1Zone2, Score: 0}, // Pod on node @@ -465,12 +465,12 @@ func TestZoneSelectorSpreadPriority(t *testing.T) { }, { pod: buildPod("", labels1, controllerRef("ReplicationController", "name", "abc123")), - pods: []*api.Pod{ + pods: []*v1.Pod{ buildPod(nodeMachine1Zone3, labels1, controllerRef("ReplicationController", "name", "abc123")), buildPod(nodeMachine1Zone2, labels1, controllerRef("ReplicationController", "name", "abc123")), buildPod(nodeMachine1Zone3, labels1, controllerRef("ReplicationController", "name", "abc123")), }, - rcs: []*api.ReplicationController{{Spec: api.ReplicationControllerSpec{Selector: labels1}}}, + rcs: []*v1.ReplicationController{{Spec: v1.ReplicationControllerSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{ // Note that because we put two pods on the same node (nodeMachine1Zone3), // the values here are questionable for zone2, in particular for nodeMachine1Zone2. @@ -528,13 +528,13 @@ func TestZoneSpreadPriority(t *testing.T) { nozone := map[string]string{ "name": "value", } - zone0Spec := api.PodSpec{ + zone0Spec := v1.PodSpec{ NodeName: "machine01", } - zone1Spec := api.PodSpec{ + zone1Spec := v1.PodSpec{ NodeName: "machine11", } - zone2Spec := api.PodSpec{ + zone2Spec := v1.PodSpec{ NodeName: "machine21", } labeledNodes := map[string]map[string]string{ @@ -543,15 +543,15 @@ func TestZoneSpreadPriority(t *testing.T) { "machine21": zone2, "machine22": zone2, } tests := []struct { - pod *api.Pod - pods []*api.Pod + pod *v1.Pod + pods []*v1.Pod nodes map[string]map[string]string - services []*api.Service + services []*v1.Service expectedList schedulerapi.HostPriorityList test string }{ { - pod: new(api.Pod), + pod: new(v1.Pod), nodes: labeledNodes, expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10}, {Host: "machine21", Score: 10}, {Host: "machine22", Score: 10}, @@ -559,8 +559,8 @@ func TestZoneSpreadPriority(t *testing.T) { test: "nothing scheduled", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []*api.Pod{{Spec: zone1Spec}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{{Spec: zone1Spec}}, nodes: labeledNodes, expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10}, {Host: "machine21", Score: 10}, {Host: "machine22", Score: 10}, @@ -568,97 +568,97 @@ func TestZoneSpreadPriority(t *testing.T) { test: "no services", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []*api.Pod{{Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{{Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}}, nodes: labeledNodes, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"key": "value"}}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10}, {Host: "machine21", Score: 10}, {Host: "machine22", Score: 10}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, test: "different services", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []*api.Pod{ - {Spec: zone0Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: zone0Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, }, nodes: labeledNodes, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 10}, {Host: "machine12", Score: 10}, {Host: "machine21", Score: 0}, {Host: "machine22", Score: 0}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, test: "three pods, one service pod", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []*api.Pod{ - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, }, nodes: labeledNodes, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 5}, {Host: "machine12", Score: 5}, {Host: "machine21", Score: 5}, {Host: "machine22", Score: 5}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, test: "three pods, two service pods on different machines", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, - pods: []*api.Pod{ - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: api.NamespaceDefault}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: v1.NamespaceDefault}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1, Namespace: "ns1"}}, }, nodes: labeledNodes, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}, ObjectMeta: api.ObjectMeta{Namespace: api.NamespaceDefault}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}, ObjectMeta: v1.ObjectMeta{Namespace: v1.NamespaceDefault}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 0}, {Host: "machine12", Score: 0}, {Host: "machine21", Score: 10}, {Host: "machine22", Score: 10}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, test: "three service label match pods in different namespaces", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []*api.Pod{ - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, }, nodes: labeledNodes, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 6}, {Host: "machine12", Score: 6}, {Host: "machine21", Score: 3}, {Host: "machine22", Score: 3}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, test: "four pods, three service pods", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []*api.Pod{ - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels2}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels2}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, }, nodes: labeledNodes, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 3}, {Host: "machine12", Score: 3}, {Host: "machine21", Score: 6}, {Host: "machine22", Score: 6}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, test: "service with partial pod label matches", }, { - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}}, - pods: []*api.Pod{ - {Spec: zone0Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: zone1Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, - {Spec: zone2Spec, ObjectMeta: api.ObjectMeta{Labels: labels1}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + pods: []*v1.Pod{ + {Spec: zone0Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: zone1Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, + {Spec: zone2Spec, ObjectMeta: v1.ObjectMeta{Labels: labels1}}, }, nodes: labeledNodes, - services: []*api.Service{{Spec: api.ServiceSpec{Selector: labels1}}}, + services: []*v1.Service{{Spec: v1.ServiceSpec{Selector: labels1}}}, expectedList: []schedulerapi.HostPriority{{Host: "machine11", Score: 7}, {Host: "machine12", Score: 7}, {Host: "machine21", Score: 5}, {Host: "machine22", Score: 5}, {Host: "machine01", Score: 0}, {Host: "machine02", Score: 0}}, @@ -682,18 +682,18 @@ func TestZoneSpreadPriority(t *testing.T) { } } -func makeLabeledNodeList(nodeMap map[string]map[string]string) []*api.Node { - nodes := make([]*api.Node, 0, len(nodeMap)) +func makeLabeledNodeList(nodeMap map[string]map[string]string) []*v1.Node { + nodes := make([]*v1.Node, 0, len(nodeMap)) for nodeName, labels := range nodeMap { - nodes = append(nodes, &api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName, Labels: labels}}) + nodes = append(nodes, &v1.Node{ObjectMeta: v1.ObjectMeta{Name: nodeName, Labels: labels}}) } return nodes } -func makeNodeList(nodeNames []string) []*api.Node { - nodes := make([]*api.Node, 0, len(nodeNames)) +func makeNodeList(nodeNames []string) []*v1.Node { + nodes := make([]*v1.Node, 0, len(nodeNames)) for _, nodeName := range nodeNames { - nodes = append(nodes, &api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName}}) + nodes = append(nodes, &v1.Node{ObjectMeta: v1.ObjectMeta{Name: nodeName}}) } return nodes } diff --git a/plugin/pkg/scheduler/algorithm/priorities/taint_toleration.go b/plugin/pkg/scheduler/algorithm/priorities/taint_toleration.go index 791a521ce29..d08c12b00f9 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/taint_toleration.go +++ b/plugin/pkg/scheduler/algorithm/priorities/taint_toleration.go @@ -20,21 +20,21 @@ import ( "fmt" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) // CountIntolerableTaintsPreferNoSchedule gives the count of intolerable taints of a pod with effect PreferNoSchedule -func countIntolerableTaintsPreferNoSchedule(taints []api.Taint, tolerations []api.Toleration) (intolerableTaints int) { +func countIntolerableTaintsPreferNoSchedule(taints []v1.Taint, tolerations []v1.Toleration) (intolerableTaints int) { for i := range taints { taint := &taints[i] // check only on taints that have effect PreferNoSchedule - if taint.Effect != api.TaintEffectPreferNoSchedule { + if taint.Effect != v1.TaintEffectPreferNoSchedule { continue } - if !api.TaintToleratedByTolerations(taint, tolerations) { + if !v1.TaintToleratedByTolerations(taint, tolerations) { intolerableTaints++ } } @@ -42,18 +42,18 @@ func countIntolerableTaintsPreferNoSchedule(taints []api.Taint, tolerations []ap } // getAllTolerationEffectPreferNoSchedule gets the list of all Toleration with Effect PreferNoSchedule -func getAllTolerationPreferNoSchedule(tolerations []api.Toleration) (tolerationList []api.Toleration) { +func getAllTolerationPreferNoSchedule(tolerations []v1.Toleration) (tolerationList []v1.Toleration) { for i := range tolerations { toleration := &tolerations[i] - if len(toleration.Effect) == 0 || toleration.Effect == api.TaintEffectPreferNoSchedule { + if len(toleration.Effect) == 0 || toleration.Effect == v1.TaintEffectPreferNoSchedule { tolerationList = append(tolerationList, *toleration) } } return } -func getTolerationListFromPod(pod *api.Pod) ([]api.Toleration, error) { - tolerations, err := api.GetTolerationsFromPodAnnotations(pod.Annotations) +func getTolerationListFromPod(pod *v1.Pod) ([]v1.Toleration, error) { + tolerations, err := v1.GetTolerationsFromPodAnnotations(pod.Annotations) if err != nil { return nil, err } @@ -61,13 +61,13 @@ func getTolerationListFromPod(pod *api.Pod) ([]api.Toleration, error) { } // ComputeTaintTolerationPriority prepares the priority list for all the nodes based on the number of intolerable taints on the node -func ComputeTaintTolerationPriorityMap(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func ComputeTaintTolerationPriorityMap(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { node := nodeInfo.Node() if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") } - var tolerationList []api.Toleration + var tolerationList []v1.Toleration if priorityMeta, ok := meta.(*priorityMetadata); ok { tolerationList = priorityMeta.podTolerations } else { @@ -78,7 +78,7 @@ func ComputeTaintTolerationPriorityMap(pod *api.Pod, meta interface{}, nodeInfo } } - taints, err := api.GetTaintsFromNodeAnnotations(node.Annotations) + taints, err := v1.GetTaintsFromNodeAnnotations(node.Annotations) if err != nil { return schedulerapi.HostPriority{}, err } @@ -88,7 +88,7 @@ func ComputeTaintTolerationPriorityMap(pod *api.Pod, meta interface{}, nodeInfo }, nil } -func ComputeTaintTolerationPriorityReduce(pod *api.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error { +func ComputeTaintTolerationPriorityReduce(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error { var maxCount int for i := range result { if result[i].Score > maxCount { diff --git a/plugin/pkg/scheduler/algorithm/priorities/taint_toleration_test.go b/plugin/pkg/scheduler/algorithm/priorities/taint_toleration_test.go index 93277ea59b3..c71664a816c 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/taint_toleration_test.go +++ b/plugin/pkg/scheduler/algorithm/priorities/taint_toleration_test.go @@ -21,29 +21,29 @@ import ( "reflect" "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) -func nodeWithTaints(nodeName string, taints []api.Taint) *api.Node { +func nodeWithTaints(nodeName string, taints []v1.Taint) *v1.Node { taintsData, _ := json.Marshal(taints) - return &api.Node{ - ObjectMeta: api.ObjectMeta{ + return &v1.Node{ + ObjectMeta: v1.ObjectMeta{ Name: nodeName, Annotations: map[string]string{ - api.TaintsAnnotationKey: string(taintsData), + v1.TaintsAnnotationKey: string(taintsData), }, }, } } -func podWithTolerations(tolerations []api.Toleration) *api.Pod { +func podWithTolerations(tolerations []v1.Toleration) *v1.Pod { tolerationData, _ := json.Marshal(tolerations) - return &api.Pod{ - ObjectMeta: api.ObjectMeta{ + return &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Annotations: map[string]string{ - api.TolerationsAnnotationKey: string(tolerationData), + v1.TolerationsAnnotationKey: string(tolerationData), }, }, } @@ -55,30 +55,30 @@ func podWithTolerations(tolerations []api.Toleration) *api.Pod { func TestTaintAndToleration(t *testing.T) { tests := []struct { - pod *api.Pod - nodes []*api.Node + pod *v1.Pod + nodes []*v1.Node expectedList schedulerapi.HostPriorityList test string }{ // basic test case { test: "node with taints tolerated by the pod, gets a higher score than those node with intolerable taints", - pod: podWithTolerations([]api.Toleration{{ + pod: podWithTolerations([]v1.Toleration{{ Key: "foo", - Operator: api.TolerationOpEqual, + Operator: v1.TolerationOpEqual, Value: "bar", - Effect: api.TaintEffectPreferNoSchedule, + Effect: v1.TaintEffectPreferNoSchedule, }}), - nodes: []*api.Node{ - nodeWithTaints("nodeA", []api.Taint{{ + nodes: []*v1.Node{ + nodeWithTaints("nodeA", []v1.Taint{{ Key: "foo", Value: "bar", - Effect: api.TaintEffectPreferNoSchedule, + Effect: v1.TaintEffectPreferNoSchedule, }}), - nodeWithTaints("nodeB", []api.Taint{{ + nodeWithTaints("nodeB", []v1.Taint{{ Key: "foo", Value: "blah", - Effect: api.TaintEffectPreferNoSchedule, + Effect: v1.TaintEffectPreferNoSchedule, }}), }, expectedList: []schedulerapi.HostPriority{ @@ -89,37 +89,37 @@ func TestTaintAndToleration(t *testing.T) { // the count of taints that are tolerated by pod, does not matter. { test: "the nodes that all of their taints are tolerated by the pod, get the same score, no matter how many tolerable taints a node has", - pod: podWithTolerations([]api.Toleration{ + pod: podWithTolerations([]v1.Toleration{ { Key: "cpu-type", - Operator: api.TolerationOpEqual, + Operator: v1.TolerationOpEqual, Value: "arm64", - Effect: api.TaintEffectPreferNoSchedule, + Effect: v1.TaintEffectPreferNoSchedule, }, { Key: "disk-type", - Operator: api.TolerationOpEqual, + Operator: v1.TolerationOpEqual, Value: "ssd", - Effect: api.TaintEffectPreferNoSchedule, + Effect: v1.TaintEffectPreferNoSchedule, }, }), - nodes: []*api.Node{ - nodeWithTaints("nodeA", []api.Taint{}), - nodeWithTaints("nodeB", []api.Taint{ + nodes: []*v1.Node{ + nodeWithTaints("nodeA", []v1.Taint{}), + nodeWithTaints("nodeB", []v1.Taint{ { Key: "cpu-type", Value: "arm64", - Effect: api.TaintEffectPreferNoSchedule, + Effect: v1.TaintEffectPreferNoSchedule, }, }), - nodeWithTaints("nodeC", []api.Taint{ + nodeWithTaints("nodeC", []v1.Taint{ { Key: "cpu-type", Value: "arm64", - Effect: api.TaintEffectPreferNoSchedule, + Effect: v1.TaintEffectPreferNoSchedule, }, { Key: "disk-type", Value: "ssd", - Effect: api.TaintEffectPreferNoSchedule, + Effect: v1.TaintEffectPreferNoSchedule, }, }), }, @@ -132,30 +132,30 @@ func TestTaintAndToleration(t *testing.T) { // the count of taints on a node that are not tolerated by pod, matters. { test: "the more intolerable taints a node has, the lower score it gets.", - pod: podWithTolerations([]api.Toleration{{ + pod: podWithTolerations([]v1.Toleration{{ Key: "foo", - Operator: api.TolerationOpEqual, + Operator: v1.TolerationOpEqual, Value: "bar", - Effect: api.TaintEffectPreferNoSchedule, + Effect: v1.TaintEffectPreferNoSchedule, }}), - nodes: []*api.Node{ - nodeWithTaints("nodeA", []api.Taint{}), - nodeWithTaints("nodeB", []api.Taint{ + nodes: []*v1.Node{ + nodeWithTaints("nodeA", []v1.Taint{}), + nodeWithTaints("nodeB", []v1.Taint{ { Key: "cpu-type", Value: "arm64", - Effect: api.TaintEffectPreferNoSchedule, + Effect: v1.TaintEffectPreferNoSchedule, }, }), - nodeWithTaints("nodeC", []api.Taint{ + nodeWithTaints("nodeC", []v1.Taint{ { Key: "cpu-type", Value: "arm64", - Effect: api.TaintEffectPreferNoSchedule, + Effect: v1.TaintEffectPreferNoSchedule, }, { Key: "disk-type", Value: "ssd", - Effect: api.TaintEffectPreferNoSchedule, + Effect: v1.TaintEffectPreferNoSchedule, }, }), }, @@ -168,37 +168,37 @@ func TestTaintAndToleration(t *testing.T) { // taints-tolerations priority only takes care about the taints and tolerations that have effect PreferNoSchedule { test: "only taints and tolerations that have effect PreferNoSchedule are checked by taints-tolerations priority function", - pod: podWithTolerations([]api.Toleration{ + pod: podWithTolerations([]v1.Toleration{ { Key: "cpu-type", - Operator: api.TolerationOpEqual, + Operator: v1.TolerationOpEqual, Value: "arm64", - Effect: api.TaintEffectNoSchedule, + Effect: v1.TaintEffectNoSchedule, }, { Key: "disk-type", - Operator: api.TolerationOpEqual, + Operator: v1.TolerationOpEqual, Value: "ssd", - Effect: api.TaintEffectNoSchedule, + Effect: v1.TaintEffectNoSchedule, }, }), - nodes: []*api.Node{ - nodeWithTaints("nodeA", []api.Taint{}), - nodeWithTaints("nodeB", []api.Taint{ + nodes: []*v1.Node{ + nodeWithTaints("nodeA", []v1.Taint{}), + nodeWithTaints("nodeB", []v1.Taint{ { Key: "cpu-type", Value: "arm64", - Effect: api.TaintEffectNoSchedule, + Effect: v1.TaintEffectNoSchedule, }, }), - nodeWithTaints("nodeC", []api.Taint{ + nodeWithTaints("nodeC", []v1.Taint{ { Key: "cpu-type", Value: "arm64", - Effect: api.TaintEffectPreferNoSchedule, + Effect: v1.TaintEffectPreferNoSchedule, }, { Key: "disk-type", Value: "ssd", - Effect: api.TaintEffectPreferNoSchedule, + Effect: v1.TaintEffectPreferNoSchedule, }, }), }, diff --git a/plugin/pkg/scheduler/algorithm/priorities/test_util.go b/plugin/pkg/scheduler/algorithm/priorities/test_util.go index 76fd847aa49..a09a5943485 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/test_util.go +++ b/plugin/pkg/scheduler/algorithm/priorities/test_util.go @@ -17,22 +17,22 @@ limitations under the License. package priorities import ( - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) -func makeNode(node string, milliCPU, memory int64) *api.Node { - return &api.Node{ - ObjectMeta: api.ObjectMeta{Name: node}, - Status: api.NodeStatus{ - Capacity: api.ResourceList{ +func makeNode(node string, milliCPU, memory int64) *v1.Node { + return &v1.Node{ + ObjectMeta: v1.ObjectMeta{Name: node}, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ "cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), "memory": *resource.NewQuantity(memory, resource.BinarySI), }, - Allocatable: api.ResourceList{ + Allocatable: v1.ResourceList{ "cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), "memory": *resource.NewQuantity(memory, resource.BinarySI), }, @@ -41,7 +41,7 @@ func makeNode(node string, milliCPU, memory int64) *api.Node { } func priorityFunction(mapFn algorithm.PriorityMapFunction, reduceFn algorithm.PriorityReduceFunction) algorithm.PriorityFunction { - return func(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) { + return func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { result := make(schedulerapi.HostPriorityList, 0, len(nodes)) for i := range nodes { hostResult, err := mapFn(pod, nil, nodeNameToInfo[nodes[i].Name]) diff --git a/plugin/pkg/scheduler/algorithm/priorities/util/non_zero.go b/plugin/pkg/scheduler/algorithm/priorities/util/non_zero.go index 8588471d479..b928fefa517 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/util/non_zero.go +++ b/plugin/pkg/scheduler/algorithm/priorities/util/non_zero.go @@ -16,9 +16,7 @@ limitations under the License. package util -import ( - "k8s.io/kubernetes/pkg/api" -) +import "k8s.io/kubernetes/pkg/api/v1" // For each of these resources, a pod that doesn't request the resource explicitly // will be treated as having requested the amount indicated below, for the purpose @@ -32,18 +30,18 @@ const DefaultMilliCpuRequest int64 = 100 // 0.1 core const DefaultMemoryRequest int64 = 200 * 1024 * 1024 // 200 MB // GetNonzeroRequests returns the default resource request if none is found or what is provided on the request -// TODO: Consider setting default as a fixed fraction of machine capacity (take "capacity api.ResourceList" +// TODO: Consider setting default as a fixed fraction of machine capacity (take "capacity v1.ResourceList" // as an additional argument here) rather than using constants -func GetNonzeroRequests(requests *api.ResourceList) (int64, int64) { +func GetNonzeroRequests(requests *v1.ResourceList) (int64, int64) { var outMilliCPU, outMemory int64 // Override if un-set, but not if explicitly set to zero - if _, found := (*requests)[api.ResourceCPU]; !found { + if _, found := (*requests)[v1.ResourceCPU]; !found { outMilliCPU = DefaultMilliCpuRequest } else { outMilliCPU = requests.Cpu().MilliValue() } // Override if un-set, but not if explicitly set to zero - if _, found := (*requests)[api.ResourceMemory]; !found { + if _, found := (*requests)[v1.ResourceMemory]; !found { outMemory = DefaultMemoryRequest } else { outMemory = requests.Memory().Value() diff --git a/plugin/pkg/scheduler/algorithm/priorities/util/topologies.go b/plugin/pkg/scheduler/algorithm/priorities/util/topologies.go index 36890cb0e9d..e5d091c27c1 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/util/topologies.go +++ b/plugin/pkg/scheduler/algorithm/priorities/util/topologies.go @@ -17,8 +17,8 @@ limitations under the License. package util import ( - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/sets" ) @@ -27,7 +27,7 @@ import ( // according to the namespaces indicated in podAffinityTerm. // 1. If the namespaces is nil considers the given pod's namespace // 2. If the namespaces is empty list then considers all the namespaces -func getNamespacesFromPodAffinityTerm(pod *api.Pod, podAffinityTerm api.PodAffinityTerm) sets.String { +func getNamespacesFromPodAffinityTerm(pod *v1.Pod, podAffinityTerm v1.PodAffinityTerm) sets.String { names := sets.String{} if podAffinityTerm.Namespaces == nil { names.Insert(pod.Namespace) @@ -39,7 +39,7 @@ func getNamespacesFromPodAffinityTerm(pod *api.Pod, podAffinityTerm api.PodAffin // PodMatchesTermsNamespaceAndSelector returns true if the given // matches the namespace and selector defined by `s . -func PodMatchesTermsNamespaceAndSelector(pod *api.Pod, affinityPod *api.Pod, term *api.PodAffinityTerm) (bool, error) { +func PodMatchesTermsNamespaceAndSelector(pod *v1.Pod, affinityPod *v1.Pod, term *v1.PodAffinityTerm) (bool, error) { namespaces := getNamespacesFromPodAffinityTerm(affinityPod, *term) if len(namespaces) != 0 && !namespaces.Has(pod.Namespace) { return false, nil @@ -53,7 +53,7 @@ func PodMatchesTermsNamespaceAndSelector(pod *api.Pod, affinityPod *api.Pod, ter } // nodesHaveSameTopologyKeyInternal checks if nodeA and nodeB have same label value with given topologyKey as label key. -func nodesHaveSameTopologyKeyInternal(nodeA, nodeB *api.Node, topologyKey string) bool { +func nodesHaveSameTopologyKeyInternal(nodeA, nodeB *v1.Node, topologyKey string) bool { return nodeA.Labels != nil && nodeB.Labels != nil && len(nodeA.Labels[topologyKey]) > 0 && nodeA.Labels[topologyKey] == nodeB.Labels[topologyKey] } @@ -63,7 +63,7 @@ type Topologies struct { // NodesHaveSameTopologyKey checks if nodeA and nodeB have same label value with given topologyKey as label key. // If the topologyKey is nil/empty, check if the two nodes have any of the default topologyKeys, and have same corresponding label value. -func (tps *Topologies) NodesHaveSameTopologyKey(nodeA, nodeB *api.Node, topologyKey string) bool { +func (tps *Topologies) NodesHaveSameTopologyKey(nodeA, nodeB *v1.Node, topologyKey string) bool { if len(topologyKey) == 0 { // assumes this is allowed only for PreferredDuringScheduling pod anti-affinity (ensured by api/validation) for _, defaultKey := range tps.DefaultKeys { diff --git a/plugin/pkg/scheduler/algorithm/priorities/util/util.go b/plugin/pkg/scheduler/algorithm/priorities/util/util.go index 0547e0548c4..6f086b59a0d 100644 --- a/plugin/pkg/scheduler/algorithm/priorities/util/util.go +++ b/plugin/pkg/scheduler/algorithm/priorities/util/util.go @@ -16,11 +16,9 @@ limitations under the License. package util -import ( - "k8s.io/kubernetes/pkg/api" -) +import "k8s.io/kubernetes/pkg/api/v1" -func GetControllerRef(pod *api.Pod) *api.OwnerReference { +func GetControllerRef(pod *v1.Pod) *v1.OwnerReference { if len(pod.OwnerReferences) == 0 { return nil } diff --git a/plugin/pkg/scheduler/algorithm/scheduler_interface.go b/plugin/pkg/scheduler/algorithm/scheduler_interface.go index 9b4611e397e..761bce0985a 100644 --- a/plugin/pkg/scheduler/algorithm/scheduler_interface.go +++ b/plugin/pkg/scheduler/algorithm/scheduler_interface.go @@ -17,7 +17,7 @@ limitations under the License. package algorithm import ( - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" ) @@ -28,16 +28,16 @@ type SchedulerExtender interface { // Filter based on extender-implemented predicate functions. The filtered list is // expected to be a subset of the supplied list. failedNodesMap optionally contains // the list of failed nodes and failure reasons. - Filter(pod *api.Pod, nodes []*api.Node) (filteredNodes []*api.Node, failedNodesMap schedulerapi.FailedNodesMap, err error) + Filter(pod *v1.Pod, nodes []*v1.Node) (filteredNodes []*v1.Node, failedNodesMap schedulerapi.FailedNodesMap, err error) // Prioritize based on extender-implemented priority functions. The returned scores & weight // are used to compute the weighted score for an extender. The weighted scores are added to // the scores computed by Kubernetes scheduler. The total scores are used to do the host selection. - Prioritize(pod *api.Pod, nodes []*api.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error) + Prioritize(pod *v1.Pod, nodes []*v1.Node) (hostPriorities *schedulerapi.HostPriorityList, weight int, err error) } // ScheduleAlgorithm is an interface implemented by things that know how to schedule pods // onto machines. type ScheduleAlgorithm interface { - Schedule(*api.Pod, NodeLister) (selectedMachine string, err error) + Schedule(*v1.Pod, NodeLister) (selectedMachine string, err error) } diff --git a/plugin/pkg/scheduler/algorithm/scheduler_interface_test.go b/plugin/pkg/scheduler/algorithm/scheduler_interface_test.go index fe9641c2913..d3940bee0d4 100755 --- a/plugin/pkg/scheduler/algorithm/scheduler_interface_test.go +++ b/plugin/pkg/scheduler/algorithm/scheduler_interface_test.go @@ -19,7 +19,7 @@ package algorithm import ( "testing" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" ) // Some functions used by multiple scheduler tests. @@ -31,7 +31,7 @@ type schedulerTester struct { } // Call if you know exactly where pod should get scheduled. -func (st *schedulerTester) expectSchedule(pod *api.Pod, expected string) { +func (st *schedulerTester) expectSchedule(pod *v1.Pod, expected string) { actual, err := st.scheduler.Schedule(pod, st.nodeLister) if err != nil { st.t.Errorf("Unexpected error %v\nTried to schedule: %#v", err, pod) @@ -43,7 +43,7 @@ func (st *schedulerTester) expectSchedule(pod *api.Pod, expected string) { } // Call if you can't predict where pod will be scheduled. -func (st *schedulerTester) expectSuccess(pod *api.Pod) { +func (st *schedulerTester) expectSuccess(pod *v1.Pod) { _, err := st.scheduler.Schedule(pod, st.nodeLister) if err != nil { st.t.Errorf("Unexpected error %v\nTried to schedule: %#v", err, pod) @@ -52,7 +52,7 @@ func (st *schedulerTester) expectSuccess(pod *api.Pod) { } // Call if pod should *not* schedule. -func (st *schedulerTester) expectFailure(pod *api.Pod) { +func (st *schedulerTester) expectFailure(pod *v1.Pod) { _, err := st.scheduler.Schedule(pod, st.nodeLister) if err == nil { st.t.Error("Unexpected non-error") diff --git a/plugin/pkg/scheduler/algorithm/types.go b/plugin/pkg/scheduler/algorithm/types.go index 99c29a5cad6..b889187f8b2 100644 --- a/plugin/pkg/scheduler/algorithm/types.go +++ b/plugin/pkg/scheduler/algorithm/types.go @@ -17,7 +17,7 @@ limitations under the License. package algorithm import ( - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) @@ -25,25 +25,25 @@ import ( // FitPredicate is a function that indicates if a pod fits into an existing node. // The failure information is given by the error. // TODO: Change interface{} to a specific type. -type FitPredicate func(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []PredicateFailureReason, error) +type FitPredicate func(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []PredicateFailureReason, error) // PriorityMapFunction is a function that computes per-node results for a given node. // TODO: Figure out the exact API of this method. // TODO: Change interface{} to a specific type. -type PriorityMapFunction func(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) +type PriorityMapFunction func(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) // PriorityReduceFunction is a function that aggregated per-node results and computes // final scores for all nodes. // TODO: Figure out the exact API of this method. // TODO: Change interface{} to a specific type. -type PriorityReduceFunction func(pod *api.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error +type PriorityReduceFunction func(pod *v1.Pod, meta interface{}, nodeNameToInfo map[string]*schedulercache.NodeInfo, result schedulerapi.HostPriorityList) error // MetdataProducer is a function that computes metadata for a given pod. -type MetadataProducer func(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} +type MetadataProducer func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} // DEPRECATED // Use Map-Reduce pattern for priority functions. -type PriorityFunction func(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) +type PriorityFunction func(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) type PriorityConfig struct { Map PriorityMapFunction @@ -55,7 +55,7 @@ type PriorityConfig struct { } // EmptyMetadataProducer returns a no-op MetadataProducer type. -func EmptyMetadataProducer(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} { +func EmptyMetadataProducer(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo) interface{} { return nil } @@ -63,4 +63,4 @@ type PredicateFailureReason interface { GetReason() string } -type GetEquivalencePodFunc func(pod *api.Pod) interface{} +type GetEquivalencePodFunc func(pod *v1.Pod) interface{} diff --git a/plugin/pkg/scheduler/algorithmprovider/defaults/compatibility_test.go b/plugin/pkg/scheduler/algorithmprovider/defaults/compatibility_test.go index 7e3a4d64633..2a88f436c0e 100644 --- a/plugin/pkg/scheduler/algorithmprovider/defaults/compatibility_test.go +++ b/plugin/pkg/scheduler/algorithmprovider/defaults/compatibility_test.go @@ -23,9 +23,9 @@ import ( "net/http/httptest" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apimachinery/registered" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/util/sets" @@ -338,9 +338,9 @@ func TestCompatibility_v1_Scheduler(t *testing.T) { } server := httptest.NewServer(&handler) defer server.Close() - client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}}) - if _, err := factory.NewConfigFactory(client, "some-scheduler-name", api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains).CreateFromConfig(policy); err != nil { + if _, err := factory.NewConfigFactory(client, "some-scheduler-name", v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains).CreateFromConfig(policy); err != nil { t.Errorf("%s: Error constructing: %v", v, err) continue } diff --git a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go index 53fc9e6b4b2..f7ac0cf4021 100644 --- a/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go +++ b/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go @@ -21,7 +21,7 @@ import ( "os" "strconv" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/cloudprovider/providers/aws" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/plugin/pkg/scheduler" @@ -228,7 +228,7 @@ func copyAndReplace(set sets.String, replaceWhat, replaceWith string) sets.Strin } // GetEquivalencePod returns a EquivalencePod which contains a group of pod attributes which can be reused. -func GetEquivalencePod(pod *api.Pod) interface{} { +func GetEquivalencePod(pod *v1.Pod) interface{} { equivalencePod := EquivalencePod{} // For now we only consider pods: // 1. OwnerReferences is Controller @@ -260,5 +260,5 @@ func isValidControllerKind(kind string) bool { // EquivalencePod is a group of pod attributes which can be reused as equivalence to schedule other pods. type EquivalencePod struct { - ControllerRef api.OwnerReference + ControllerRef v1.OwnerReference } diff --git a/plugin/pkg/scheduler/api/types.go b/plugin/pkg/scheduler/api/types.go index 9e83c7802fc..7c506fa7b14 100644 --- a/plugin/pkg/scheduler/api/types.go +++ b/plugin/pkg/scheduler/api/types.go @@ -19,8 +19,8 @@ package api import ( "time" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/unversioned" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/restclient" ) @@ -134,9 +134,9 @@ type ExtenderConfig struct { // nodes for a pod. type ExtenderArgs struct { // Pod being scheduled - Pod api.Pod `json:"pod"` + Pod v1.Pod `json:"pod"` // List of candidate nodes where the pod can be scheduled - Nodes api.NodeList `json:"nodes"` + Nodes v1.NodeList `json:"nodes"` } // FailedNodesMap represents the filtered out nodes, with node names and failure messages @@ -145,7 +145,7 @@ type FailedNodesMap map[string]string // ExtenderFilterResult represents the results of a filter call to an extender type ExtenderFilterResult struct { // Filtered set of nodes where the pod can be scheduled - Nodes api.NodeList `json:"nodes,omitempty"` + Nodes v1.NodeList `json:"nodes,omitempty"` // Filtered out nodes where the pod can't be scheduled and the failure messages FailedNodes FailedNodesMap `json:"failedNodes,omitempty"` // Error message indicating failure diff --git a/plugin/pkg/scheduler/equivalence_cache.go b/plugin/pkg/scheduler/equivalence_cache.go index cd13b250aea..1770fb91834 100644 --- a/plugin/pkg/scheduler/equivalence_cache.go +++ b/plugin/pkg/scheduler/equivalence_cache.go @@ -17,13 +17,15 @@ limitations under the License. package scheduler import ( - "github.com/golang/groupcache/lru" "hash/adler32" - "k8s.io/kubernetes/pkg/api" + "github.com/golang/groupcache/lru" + + "sync" + + "k8s.io/kubernetes/pkg/api/v1" hashutil "k8s.io/kubernetes/pkg/util/hash" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" - "sync" ) // TODO(harryz) figure out the right number for this, 4096 may be too big @@ -68,7 +70,7 @@ func (ec *EquivalenceCache) addPodPredicate(podKey uint64, nodeName string, fit } // AddPodPredicatesCache cache pod predicate for equivalence class -func (ec *EquivalenceCache) AddPodPredicatesCache(pod *api.Pod, fitNodeList []*api.Node, failedPredicates *FailedPredicateMap) { +func (ec *EquivalenceCache) AddPodPredicatesCache(pod *v1.Pod, fitNodeList []*v1.Node, failedPredicates *FailedPredicateMap) { equivalenceHash := ec.hashEquivalencePod(pod) for _, fitNode := range fitNodeList { @@ -80,10 +82,10 @@ func (ec *EquivalenceCache) AddPodPredicatesCache(pod *api.Pod, fitNodeList []*a } // GetCachedPredicates gets cached predicates for equivalence class -func (ec *EquivalenceCache) GetCachedPredicates(pod *api.Pod, nodes []*api.Node) ([]*api.Node, FailedPredicateMap, []*api.Node) { - fitNodeList := []*api.Node{} +func (ec *EquivalenceCache) GetCachedPredicates(pod *v1.Pod, nodes []*v1.Node) ([]*v1.Node, FailedPredicateMap, []*v1.Node) { + fitNodeList := []*v1.Node{} failedPredicates := FailedPredicateMap{} - noCacheNodeList := []*api.Node{} + noCacheNodeList := []*v1.Node{} equivalenceHash := ec.hashEquivalencePod(pod) for _, node := range nodes { findCache := false @@ -124,7 +126,7 @@ func (ec *EquivalenceCache) SendClearAllCacheReq() { } // hashEquivalencePod returns the hash of equivalence pod. -func (ec *EquivalenceCache) hashEquivalencePod(pod *api.Pod) uint64 { +func (ec *EquivalenceCache) hashEquivalencePod(pod *v1.Pod) uint64 { equivalencePod := ec.getEquivalencePod(pod) hash := adler32.New() hashutil.DeepHashObject(hash, equivalencePod) diff --git a/plugin/pkg/scheduler/extender.go b/plugin/pkg/scheduler/extender.go index 846e6681bc3..fd01b7328da 100644 --- a/plugin/pkg/scheduler/extender.go +++ b/plugin/pkg/scheduler/extender.go @@ -24,7 +24,7 @@ import ( "net/http" "time" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/restclient" utilnet "k8s.io/kubernetes/pkg/util/net" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" @@ -94,20 +94,20 @@ func NewHTTPExtender(config *schedulerapi.ExtenderConfig, apiVersion string) (al // Filter based on extender implemented predicate functions. The filtered list is // expected to be a subset of the supplied list. failedNodesMap optionally contains // the list of failed nodes and failure reasons. -func (h *HTTPExtender) Filter(pod *api.Pod, nodes []*api.Node) ([]*api.Node, schedulerapi.FailedNodesMap, error) { +func (h *HTTPExtender) Filter(pod *v1.Pod, nodes []*v1.Node) ([]*v1.Node, schedulerapi.FailedNodesMap, error) { var result schedulerapi.ExtenderFilterResult if h.filterVerb == "" { return nodes, schedulerapi.FailedNodesMap{}, nil } - nodeItems := make([]api.Node, 0, len(nodes)) + nodeItems := make([]v1.Node, 0, len(nodes)) for _, node := range nodes { nodeItems = append(nodeItems, *node) } args := schedulerapi.ExtenderArgs{ Pod: *pod, - Nodes: api.NodeList{Items: nodeItems}, + Nodes: v1.NodeList{Items: nodeItems}, } if err := h.send(h.filterVerb, &args, &result); err != nil { @@ -117,7 +117,7 @@ func (h *HTTPExtender) Filter(pod *api.Pod, nodes []*api.Node) ([]*api.Node, sch return nil, nil, fmt.Errorf(result.Error) } - nodeResult := make([]*api.Node, 0, len(result.Nodes.Items)) + nodeResult := make([]*v1.Node, 0, len(result.Nodes.Items)) for i := range result.Nodes.Items { nodeResult = append(nodeResult, &result.Nodes.Items[i]) } @@ -127,7 +127,7 @@ func (h *HTTPExtender) Filter(pod *api.Pod, nodes []*api.Node) ([]*api.Node, sch // Prioritize based on extender implemented priority functions. Weight*priority is added // up for each such priority function. The returned score is added to the score computed // by Kubernetes scheduler. The total score is used to do the host selection. -func (h *HTTPExtender) Prioritize(pod *api.Pod, nodes []*api.Node) (*schedulerapi.HostPriorityList, int, error) { +func (h *HTTPExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, int, error) { var result schedulerapi.HostPriorityList if h.prioritizeVerb == "" { @@ -138,13 +138,13 @@ func (h *HTTPExtender) Prioritize(pod *api.Pod, nodes []*api.Node) (*schedulerap return &result, 0, nil } - nodeItems := make([]api.Node, 0, len(nodes)) + nodeItems := make([]v1.Node, 0, len(nodes)) for _, node := range nodes { nodeItems = append(nodeItems, *node) } args := schedulerapi.ExtenderArgs{ Pod: *pod, - Nodes: api.NodeList{Items: nodeItems}, + Nodes: v1.NodeList{Items: nodeItems}, } if err := h.send(h.prioritizeVerb, &args, &result); err != nil { diff --git a/plugin/pkg/scheduler/extender_test.go b/plugin/pkg/scheduler/extender_test.go index db82927c9f2..515d7635c9d 100644 --- a/plugin/pkg/scheduler/extender_test.go +++ b/plugin/pkg/scheduler/extender_test.go @@ -21,52 +21,52 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" schedulerapi "k8s.io/kubernetes/plugin/pkg/scheduler/api" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) -type fitPredicate func(pod *api.Pod, node *api.Node) (bool, error) -type priorityFunc func(pod *api.Pod, nodes []*api.Node) (*schedulerapi.HostPriorityList, error) +type fitPredicate func(pod *v1.Pod, node *v1.Node) (bool, error) +type priorityFunc func(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, error) type priorityConfig struct { function priorityFunc weight int } -func errorPredicateExtender(pod *api.Pod, node *api.Node) (bool, error) { +func errorPredicateExtender(pod *v1.Pod, node *v1.Node) (bool, error) { return false, fmt.Errorf("Some error") } -func falsePredicateExtender(pod *api.Pod, node *api.Node) (bool, error) { +func falsePredicateExtender(pod *v1.Pod, node *v1.Node) (bool, error) { return false, nil } -func truePredicateExtender(pod *api.Pod, node *api.Node) (bool, error) { +func truePredicateExtender(pod *v1.Pod, node *v1.Node) (bool, error) { return true, nil } -func machine1PredicateExtender(pod *api.Pod, node *api.Node) (bool, error) { +func machine1PredicateExtender(pod *v1.Pod, node *v1.Node) (bool, error) { if node.Name == "machine1" { return true, nil } return false, nil } -func machine2PredicateExtender(pod *api.Pod, node *api.Node) (bool, error) { +func machine2PredicateExtender(pod *v1.Pod, node *v1.Node) (bool, error) { if node.Name == "machine2" { return true, nil } return false, nil } -func errorPrioritizerExtender(pod *api.Pod, nodes []*api.Node) (*schedulerapi.HostPriorityList, error) { +func errorPrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, error) { return &schedulerapi.HostPriorityList{}, fmt.Errorf("Some error") } -func machine1PrioritizerExtender(pod *api.Pod, nodes []*api.Node) (*schedulerapi.HostPriorityList, error) { +func machine1PrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, error) { result := schedulerapi.HostPriorityList{} for _, node := range nodes { score := 1 @@ -78,7 +78,7 @@ func machine1PrioritizerExtender(pod *api.Pod, nodes []*api.Node) (*schedulerapi return &result, nil } -func machine2PrioritizerExtender(pod *api.Pod, nodes []*api.Node) (*schedulerapi.HostPriorityList, error) { +func machine2PrioritizerExtender(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, error) { result := schedulerapi.HostPriorityList{} for _, node := range nodes { score := 1 @@ -90,7 +90,7 @@ func machine2PrioritizerExtender(pod *api.Pod, nodes []*api.Node) (*schedulerapi return &result, nil } -func machine2Prioritizer(_ *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) { +func machine2Prioritizer(_ *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { result := []schedulerapi.HostPriority{} for _, node := range nodes { score := 1 @@ -108,15 +108,15 @@ type FakeExtender struct { weight int } -func (f *FakeExtender) Filter(pod *api.Pod, nodes []*api.Node) ([]*api.Node, schedulerapi.FailedNodesMap, error) { - filtered := []*api.Node{} +func (f *FakeExtender) Filter(pod *v1.Pod, nodes []*v1.Node) ([]*v1.Node, schedulerapi.FailedNodesMap, error) { + filtered := []*v1.Node{} failedNodesMap := schedulerapi.FailedNodesMap{} for _, node := range nodes { fits := true for _, predicate := range f.predicates { fit, err := predicate(pod, node) if err != nil { - return []*api.Node{}, schedulerapi.FailedNodesMap{}, err + return []*v1.Node{}, schedulerapi.FailedNodesMap{}, err } if !fit { fits = false @@ -132,7 +132,7 @@ func (f *FakeExtender) Filter(pod *api.Pod, nodes []*api.Node) ([]*api.Node, sch return filtered, failedNodesMap, nil } -func (f *FakeExtender) Prioritize(pod *api.Pod, nodes []*api.Node) (*schedulerapi.HostPriorityList, int, error) { +func (f *FakeExtender) Prioritize(pod *v1.Pod, nodes []*v1.Node) (*schedulerapi.HostPriorityList, int, error) { result := schedulerapi.HostPriorityList{} combinedScores := map[string]int{} for _, prioritizer := range f.prioritizers { @@ -164,8 +164,8 @@ func TestGenericSchedulerWithExtenders(t *testing.T) { extenderPredicates []fitPredicate extenderPrioritizers []priorityConfig nodes []string - pod *api.Pod - pods []*api.Pod + pod *v1.Pod + pods []*v1.Pod expectedHost string expectsErr bool }{ @@ -288,7 +288,7 @@ func TestGenericSchedulerWithExtenders(t *testing.T) { cache.AddPod(pod) } for _, name := range test.nodes { - cache.AddNode(&api.Node{ObjectMeta: api.ObjectMeta{Name: name}}) + cache.AddNode(&v1.Node{ObjectMeta: v1.ObjectMeta{Name: name}}) } scheduler := NewGenericScheduler( cache, test.predicates, algorithm.EmptyMetadataProducer, test.prioritizers, algorithm.EmptyMetadataProducer, extenders) diff --git a/plugin/pkg/scheduler/factory/factory.go b/plugin/pkg/scheduler/factory/factory.go index 23fd57f274f..d47434442ed 100644 --- a/plugin/pkg/scheduler/factory/factory.go +++ b/plugin/pkg/scheduler/factory/factory.go @@ -27,8 +27,9 @@ import ( "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/errors" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/cache" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/controller/informers" "k8s.io/kubernetes/pkg/fields" "k8s.io/kubernetes/pkg/types" @@ -42,7 +43,7 @@ import ( "k8s.io/kubernetes/plugin/pkg/scheduler/api/validation" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/apis/extensions" + extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) @@ -110,7 +111,7 @@ func NewConfigFactory(client clientset.Interface, schedulerName string, hardPodA schedulerCache := schedulercache.New(30*time.Second, stopEverything) // TODO: pass this in as an argument... - informerFactory := informers.NewSharedInformerFactory(client, 0) + informerFactory := informers.NewSharedInformerFactory(client, nil, 0) pvcInformer := informerFactory.PersistentVolumeClaims() c := &ConfigFactory{ @@ -141,7 +142,7 @@ func NewConfigFactory(client clientset.Interface, schedulerName string, hardPodA // they may need to call. c.ScheduledPodLister.Indexer, c.scheduledPodPopulator = cache.NewIndexerInformer( c.createAssignedNonTerminatedPodLW(), - &api.Pod{}, + &v1.Pod{}, 0, cache.ResourceEventHandlerFuncs{ AddFunc: c.addPodToCache, @@ -153,7 +154,7 @@ func NewConfigFactory(client clientset.Interface, schedulerName string, hardPodA c.NodeLister.Store, c.nodePopulator = cache.NewInformer( c.createNodeLW(), - &api.Node{}, + &v1.Node{}, 0, cache.ResourceEventHandlerFuncs{ AddFunc: c.addNodeToCache, @@ -165,14 +166,14 @@ func NewConfigFactory(client clientset.Interface, schedulerName string, hardPodA // TODO(harryz) need to fill all the handlers here and below for equivalence cache c.PVLister.Store, c.pvPopulator = cache.NewInformer( c.createPersistentVolumeLW(), - &api.PersistentVolume{}, + &v1.PersistentVolume{}, 0, cache.ResourceEventHandlerFuncs{}, ) c.ServiceLister.Indexer, c.servicePopulator = cache.NewIndexerInformer( c.createServiceLW(), - &api.Service{}, + &v1.Service{}, 0, cache.ResourceEventHandlerFuncs{}, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, @@ -180,7 +181,7 @@ func NewConfigFactory(client clientset.Interface, schedulerName string, hardPodA c.ControllerLister.Indexer, c.controllerPopulator = cache.NewIndexerInformer( c.createControllerLW(), - &api.ReplicationController{}, + &v1.ReplicationController{}, 0, cache.ResourceEventHandlerFuncs{}, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, @@ -191,9 +192,9 @@ func NewConfigFactory(client clientset.Interface, schedulerName string, hardPodA // TODO(harryz) need to update all the handlers here and below for equivalence cache func (c *ConfigFactory) addPodToCache(obj interface{}) { - pod, ok := obj.(*api.Pod) + pod, ok := obj.(*v1.Pod) if !ok { - glog.Errorf("cannot convert to *api.Pod: %v", obj) + glog.Errorf("cannot convert to *v1.Pod: %v", obj) return } @@ -203,14 +204,14 @@ func (c *ConfigFactory) addPodToCache(obj interface{}) { } func (c *ConfigFactory) updatePodInCache(oldObj, newObj interface{}) { - oldPod, ok := oldObj.(*api.Pod) + oldPod, ok := oldObj.(*v1.Pod) if !ok { - glog.Errorf("cannot convert oldObj to *api.Pod: %v", oldObj) + glog.Errorf("cannot convert oldObj to *v1.Pod: %v", oldObj) return } - newPod, ok := newObj.(*api.Pod) + newPod, ok := newObj.(*v1.Pod) if !ok { - glog.Errorf("cannot convert newObj to *api.Pod: %v", newObj) + glog.Errorf("cannot convert newObj to *v1.Pod: %v", newObj) return } @@ -220,19 +221,19 @@ func (c *ConfigFactory) updatePodInCache(oldObj, newObj interface{}) { } func (c *ConfigFactory) deletePodFromCache(obj interface{}) { - var pod *api.Pod + var pod *v1.Pod switch t := obj.(type) { - case *api.Pod: + case *v1.Pod: pod = t case cache.DeletedFinalStateUnknown: var ok bool - pod, ok = t.Obj.(*api.Pod) + pod, ok = t.Obj.(*v1.Pod) if !ok { - glog.Errorf("cannot convert to *api.Pod: %v", t.Obj) + glog.Errorf("cannot convert to *v1.Pod: %v", t.Obj) return } default: - glog.Errorf("cannot convert to *api.Pod: %v", t) + glog.Errorf("cannot convert to *v1.Pod: %v", t) return } if err := c.schedulerCache.RemovePod(pod); err != nil { @@ -241,9 +242,9 @@ func (c *ConfigFactory) deletePodFromCache(obj interface{}) { } func (c *ConfigFactory) addNodeToCache(obj interface{}) { - node, ok := obj.(*api.Node) + node, ok := obj.(*v1.Node) if !ok { - glog.Errorf("cannot convert to *api.Node: %v", obj) + glog.Errorf("cannot convert to *v1.Node: %v", obj) return } @@ -253,14 +254,14 @@ func (c *ConfigFactory) addNodeToCache(obj interface{}) { } func (c *ConfigFactory) updateNodeInCache(oldObj, newObj interface{}) { - oldNode, ok := oldObj.(*api.Node) + oldNode, ok := oldObj.(*v1.Node) if !ok { - glog.Errorf("cannot convert oldObj to *api.Node: %v", oldObj) + glog.Errorf("cannot convert oldObj to *v1.Node: %v", oldObj) return } - newNode, ok := newObj.(*api.Node) + newNode, ok := newObj.(*v1.Node) if !ok { - glog.Errorf("cannot convert newObj to *api.Node: %v", newObj) + glog.Errorf("cannot convert newObj to *v1.Node: %v", newObj) return } @@ -270,19 +271,19 @@ func (c *ConfigFactory) updateNodeInCache(oldObj, newObj interface{}) { } func (c *ConfigFactory) deleteNodeFromCache(obj interface{}) { - var node *api.Node + var node *v1.Node switch t := obj.(type) { - case *api.Node: + case *v1.Node: node = t case cache.DeletedFinalStateUnknown: var ok bool - node, ok = t.Obj.(*api.Node) + node, ok = t.Obj.(*v1.Node) if !ok { - glog.Errorf("cannot convert to *api.Node: %v", t.Obj) + glog.Errorf("cannot convert to *v1.Node: %v", t.Obj) return } default: - glog.Errorf("cannot convert to *api.Node: %v", t) + glog.Errorf("cannot convert to *v1.Node: %v", t) return } if err := c.schedulerCache.RemoveNode(node); err != nil { @@ -386,7 +387,7 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys sets.String, Algorithm: algo, Binder: &binder{f.Client}, PodConditionUpdater: &podConditionUpdater{f.Client}, - NextPod: func() *api.Pod { + NextPod: func() *v1.Pod { return f.getNextPod() }, Error: f.makeDefaultErrorFunc(&podBackoff, f.PodQueue), @@ -454,7 +455,7 @@ func (f *ConfigFactory) getPluginArgs() (*PluginFactoryArgs, error) { func (f *ConfigFactory) Run() { // Watch and queue pods that need scheduling. - cache.NewReflector(f.createUnassignedNonTerminatedPodLW(), &api.Pod{}, f.PodQueue, 0).RunUntil(f.StopEverything) + cache.NewReflector(f.createUnassignedNonTerminatedPodLW(), &v1.Pod{}, f.PodQueue, 0).RunUntil(f.StopEverything) // Begin populating scheduled pods. go f.scheduledPodPopulator.Run(f.StopEverything) @@ -481,9 +482,9 @@ func (f *ConfigFactory) Run() { cache.NewReflector(f.createReplicaSetLW(), &extensions.ReplicaSet{}, f.ReplicaSetLister.Indexer, 0).RunUntil(f.StopEverything) } -func (f *ConfigFactory) getNextPod() *api.Pod { +func (f *ConfigFactory) getNextPod() *v1.Pod { for { - pod := cache.Pop(f.PodQueue).(*api.Pod) + pod := cache.Pop(f.PodQueue).(*v1.Pod) if f.responsibleForPod(pod) { glog.V(4).Infof("About to try and schedule pod %v", pod.Name) return pod @@ -491,8 +492,8 @@ func (f *ConfigFactory) getNextPod() *api.Pod { } } -func (f *ConfigFactory) responsibleForPod(pod *api.Pod) bool { - if f.SchedulerName == api.DefaultSchedulerName { +func (f *ConfigFactory) responsibleForPod(pod *v1.Pod) bool { + if f.SchedulerName == v1.DefaultSchedulerName { return pod.Annotations[SchedulerAnnotationKey] == f.SchedulerName || pod.Annotations[SchedulerAnnotationKey] == "" } else { return pod.Annotations[SchedulerAnnotationKey] == f.SchedulerName @@ -500,20 +501,20 @@ func (f *ConfigFactory) responsibleForPod(pod *api.Pod) bool { } func getNodeConditionPredicate() cache.NodeConditionPredicate { - return func(node *api.Node) bool { + return func(node *v1.Node) bool { for i := range node.Status.Conditions { cond := &node.Status.Conditions[i] // We consider the node for scheduling only when its: // - NodeReady condition status is ConditionTrue, // - NodeOutOfDisk condition status is ConditionFalse, // - NodeNetworkUnavailable condition status is ConditionFalse. - if cond.Type == api.NodeReady && cond.Status != api.ConditionTrue { + if cond.Type == v1.NodeReady && cond.Status != v1.ConditionTrue { glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status) return false - } else if cond.Type == api.NodeOutOfDisk && cond.Status != api.ConditionFalse { + } else if cond.Type == v1.NodeOutOfDisk && cond.Status != v1.ConditionFalse { glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status) return false - } else if cond.Type == api.NodeNetworkUnavailable && cond.Status != api.ConditionFalse { + } else if cond.Type == v1.NodeNetworkUnavailable && cond.Status != v1.ConditionFalse { glog.V(4).Infof("Ignoring node %v with %v condition status %v", node.Name, cond.Type, cond.Status) return false } @@ -530,16 +531,16 @@ func getNodeConditionPredicate() cache.NodeConditionPredicate { // Returns a cache.ListWatch that finds all pods that need to be // scheduled. func (factory *ConfigFactory) createUnassignedNonTerminatedPodLW() *cache.ListWatch { - selector := fields.ParseSelectorOrDie("spec.nodeName==" + "" + ",status.phase!=" + string(api.PodSucceeded) + ",status.phase!=" + string(api.PodFailed)) - return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "pods", api.NamespaceAll, selector) + selector := fields.ParseSelectorOrDie("spec.nodeName==" + "" + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed)) + return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "pods", v1.NamespaceAll, selector) } // Returns a cache.ListWatch that finds all pods that are // already scheduled. // TODO: return a ListerWatcher interface instead? func (factory *ConfigFactory) createAssignedNonTerminatedPodLW() *cache.ListWatch { - selector := fields.ParseSelectorOrDie("spec.nodeName!=" + "" + ",status.phase!=" + string(api.PodSucceeded) + ",status.phase!=" + string(api.PodFailed)) - return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "pods", api.NamespaceAll, selector) + selector := fields.ParseSelectorOrDie("spec.nodeName!=" + "" + ",status.phase!=" + string(v1.PodSucceeded) + ",status.phase!=" + string(v1.PodFailed)) + return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "pods", v1.NamespaceAll, selector) } // createNodeLW returns a cache.ListWatch that gets all changes to nodes. @@ -547,36 +548,36 @@ func (factory *ConfigFactory) createNodeLW() *cache.ListWatch { // all nodes are considered to ensure that the scheduler cache has access to all nodes for lookups // the NodeCondition is used to filter out the nodes that are not ready or unschedulable // the filtered list is used as the super set of nodes to consider for scheduling - return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "nodes", api.NamespaceAll, fields.ParseSelectorOrDie("")) + return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "nodes", v1.NamespaceAll, fields.ParseSelectorOrDie("")) } // createPersistentVolumeLW returns a cache.ListWatch that gets all changes to persistentVolumes. func (factory *ConfigFactory) createPersistentVolumeLW() *cache.ListWatch { - return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "persistentVolumes", api.NamespaceAll, fields.ParseSelectorOrDie("")) + return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "persistentVolumes", v1.NamespaceAll, fields.ParseSelectorOrDie("")) } // createPersistentVolumeClaimLW returns a cache.ListWatch that gets all changes to persistentVolumeClaims. func (factory *ConfigFactory) createPersistentVolumeClaimLW() *cache.ListWatch { - return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "persistentVolumeClaims", api.NamespaceAll, fields.ParseSelectorOrDie("")) + return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "persistentVolumeClaims", v1.NamespaceAll, fields.ParseSelectorOrDie("")) } // Returns a cache.ListWatch that gets all changes to services. func (factory *ConfigFactory) createServiceLW() *cache.ListWatch { - return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "services", api.NamespaceAll, fields.ParseSelectorOrDie("")) + return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "services", v1.NamespaceAll, fields.ParseSelectorOrDie("")) } // Returns a cache.ListWatch that gets all changes to controllers. func (factory *ConfigFactory) createControllerLW() *cache.ListWatch { - return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "replicationControllers", api.NamespaceAll, fields.ParseSelectorOrDie("")) + return cache.NewListWatchFromClient(factory.Client.Core().RESTClient(), "replicationControllers", v1.NamespaceAll, fields.ParseSelectorOrDie("")) } // Returns a cache.ListWatch that gets all changes to replicasets. func (factory *ConfigFactory) createReplicaSetLW() *cache.ListWatch { - return cache.NewListWatchFromClient(factory.Client.Extensions().RESTClient(), "replicasets", api.NamespaceAll, fields.ParseSelectorOrDie("")) + return cache.NewListWatchFromClient(factory.Client.Extensions().RESTClient(), "replicasets", v1.NamespaceAll, fields.ParseSelectorOrDie("")) } -func (factory *ConfigFactory) makeDefaultErrorFunc(backoff *podBackoff, podQueue *cache.FIFO) func(pod *api.Pod, err error) { - return func(pod *api.Pod, err error) { +func (factory *ConfigFactory) makeDefaultErrorFunc(backoff *podBackoff, podQueue *cache.FIFO) func(pod *v1.Pod, err error) { + return func(pod *v1.Pod, err error) { if err == scheduler.ErrNoNodesAvailable { glog.V(4).Infof("Unable to schedule %v %v: no nodes are registered to the cluster; waiting", pod.Namespace, pod.Name) } else { @@ -621,9 +622,9 @@ func (factory *ConfigFactory) makeDefaultErrorFunc(backoff *podBackoff, podQueue } } -// nodeEnumerator allows a cache.Poller to enumerate items in an api.NodeList +// nodeEnumerator allows a cache.Poller to enumerate items in an v1.NodeList type nodeEnumerator struct { - *api.NodeList + *v1.NodeList } // Len returns the number of items in the node list. @@ -644,7 +645,7 @@ type binder struct { } // Bind just does a POST binding RPC. -func (b *binder) Bind(binding *api.Binding) error { +func (b *binder) Bind(binding *v1.Binding) error { glog.V(3).Infof("Attempting to bind %v to %v", binding.Name, binding.Target.Name) ctx := api.WithNamespace(api.NewContext(), binding.Namespace) return b.Client.Core().RESTClient().Post().Namespace(api.NamespaceValue(ctx)).Resource("bindings").Body(binding).Do().Error() @@ -656,9 +657,9 @@ type podConditionUpdater struct { Client clientset.Interface } -func (p *podConditionUpdater) Update(pod *api.Pod, condition *api.PodCondition) error { +func (p *podConditionUpdater) Update(pod *v1.Pod, condition *v1.PodCondition) error { glog.V(2).Infof("Updating pod condition for %s/%s to (%s==%s)", pod.Namespace, pod.Name, condition.Type, condition.Status) - if api.UpdatePodCondition(&pod.Status, condition) { + if v1.UpdatePodCondition(&pod.Status, condition) { _, err := p.Client.Core().Pods(pod.Namespace).UpdateStatus(pod) return err } diff --git a/plugin/pkg/scheduler/factory/factory_test.go b/plugin/pkg/scheduler/factory/factory_test.go index 02c35db7371..2a7271919fc 100644 --- a/plugin/pkg/scheduler/factory/factory_test.go +++ b/plugin/pkg/scheduler/factory/factory_test.go @@ -23,12 +23,12 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/testapi" apitesting "k8s.io/kubernetes/pkg/api/testing" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/apimachinery/registered" "k8s.io/kubernetes/pkg/client/cache" - clientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" + clientset "k8s.io/kubernetes/pkg/client/clientset_generated/release_1_5" "k8s.io/kubernetes/pkg/client/restclient" "k8s.io/kubernetes/pkg/runtime" "k8s.io/kubernetes/pkg/types" @@ -47,8 +47,8 @@ func TestCreate(t *testing.T) { } server := httptest.NewServer(&handler) defer server.Close() - client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) - factory := NewConfigFactory(client, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}}) + factory := NewConfigFactory(client, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains) factory.Create() } @@ -65,8 +65,8 @@ func TestCreateFromConfig(t *testing.T) { } server := httptest.NewServer(&handler) defer server.Close() - client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) - factory := NewConfigFactory(client, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}}) + factory := NewConfigFactory(client, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains) // Pre-register some predicate and priority functions RegisterFitPredicate("PredicateOne", PredicateOne) @@ -106,8 +106,8 @@ func TestCreateFromEmptyConfig(t *testing.T) { } server := httptest.NewServer(&handler) defer server.Close() - client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) - factory := NewConfigFactory(client, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}}) + factory := NewConfigFactory(client, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains) configData = []byte(`{}`) if err := runtime.DecodeInto(latestschedulerapi.Codec, configData, &policy); err != nil { @@ -117,26 +117,26 @@ func TestCreateFromEmptyConfig(t *testing.T) { factory.CreateFromConfig(policy) } -func PredicateOne(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func PredicateOne(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { return true, nil, nil } -func PredicateTwo(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func PredicateTwo(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { return true, nil, nil } -func PriorityOne(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) { +func PriorityOne(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { return []schedulerapi.HostPriority{}, nil } -func PriorityTwo(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) { +func PriorityTwo(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { return []schedulerapi.HostPriority{}, nil } func TestDefaultErrorFunc(t *testing.T) { - testPod := &api.Pod{ - ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar"}, - Spec: apitesting.DeepEqualSafePodSpec(), + testPod := &v1.Pod{ + ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: "bar"}, + Spec: apitesting.V1DeepEqualSafePodSpec(), } handler := utiltesting.FakeHandler{ StatusCode: 200, @@ -149,7 +149,7 @@ func TestDefaultErrorFunc(t *testing.T) { mux.Handle(testapi.Default.ResourcePath("pods", "bar", "foo"), &handler) server := httptest.NewServer(mux) defer server.Close() - factory := NewConfigFactory(clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}), api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) + factory := NewConfigFactory(clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}}), v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains) queue := cache.NewFIFO(cache.MetaNamespaceKeyFunc) podBackoff := podBackoff{ perPodBackoff: map[types.NamespacedName]*backoffEntry{}, @@ -178,11 +178,11 @@ func TestDefaultErrorFunc(t *testing.T) { } func TestNodeEnumerator(t *testing.T) { - testList := &api.NodeList{ - Items: []api.Node{ - {ObjectMeta: api.ObjectMeta{Name: "foo"}}, - {ObjectMeta: api.ObjectMeta{Name: "bar"}}, - {ObjectMeta: api.ObjectMeta{Name: "baz"}}, + testList := &v1.NodeList{ + Items: []v1.Node{ + {ObjectMeta: v1.ObjectMeta{Name: "foo"}}, + {ObjectMeta: v1.ObjectMeta{Name: "bar"}}, + {ObjectMeta: v1.ObjectMeta{Name: "baz"}}, }, } me := nodeEnumerator{testList} @@ -192,7 +192,7 @@ func TestNodeEnumerator(t *testing.T) { } for i := range testList.Items { gotObj := me.Get(i) - if e, a := testList.Items[i].Name, gotObj.(*api.Node).Name; e != a { + if e, a := testList.Items[i].Name, gotObj.(*v1.Node).Name; e != a { t.Errorf("Expected %v, got %v", e, a) } if e, a := &testList.Items[i], gotObj; !reflect.DeepEqual(e, a) { @@ -211,14 +211,14 @@ func (f *fakeClock) Now() time.Time { func TestBind(t *testing.T) { table := []struct { - binding *api.Binding + binding *v1.Binding }{ - {binding: &api.Binding{ - ObjectMeta: api.ObjectMeta{ - Namespace: api.NamespaceDefault, + {binding: &v1.Binding{ + ObjectMeta: v1.ObjectMeta{ + Namespace: v1.NamespaceDefault, Name: "foo", }, - Target: api.ObjectReference{ + Target: v1.ObjectReference{ Name: "foohost.kubernetes.mydomain.com", }, }}, @@ -232,7 +232,7 @@ func TestBind(t *testing.T) { } server := httptest.NewServer(&handler) defer server.Close() - client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}}) b := binder{client} if err := b.Bind(item.binding); err != nil { @@ -240,7 +240,7 @@ func TestBind(t *testing.T) { continue } expectedBody := runtime.EncodeOrDie(testapi.Default.Codec(), item.binding) - handler.ValidateRequest(t, testapi.Default.ResourcePath("bindings", api.NamespaceDefault, ""), "POST", &expectedBody) + handler.ValidateRequest(t, testapi.Default.ResourcePath("bindings", v1.NamespaceDefault, ""), "POST", &expectedBody) } } @@ -317,45 +317,45 @@ func TestResponsibleForPod(t *testing.T) { } server := httptest.NewServer(&handler) defer server.Close() - client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}}) // factory of "default-scheduler" - factoryDefaultScheduler := NewConfigFactory(client, api.DefaultSchedulerName, api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) + factoryDefaultScheduler := NewConfigFactory(client, v1.DefaultSchedulerName, v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains) // factory of "foo-scheduler" - factoryFooScheduler := NewConfigFactory(client, "foo-scheduler", api.DefaultHardPodAffinitySymmetricWeight, api.DefaultFailureDomains) + factoryFooScheduler := NewConfigFactory(client, "foo-scheduler", v1.DefaultHardPodAffinitySymmetricWeight, v1.DefaultFailureDomains) // scheduler annotations to be tested schedulerAnnotationFitsDefault := map[string]string{"scheduler.alpha.kubernetes.io/name": "default-scheduler"} schedulerAnnotationFitsFoo := map[string]string{"scheduler.alpha.kubernetes.io/name": "foo-scheduler"} schedulerAnnotationFitsNone := map[string]string{"scheduler.alpha.kubernetes.io/name": "bar-scheduler"} tests := []struct { - pod *api.Pod + pod *v1.Pod pickedByDefault bool pickedByFoo bool }{ { // pod with no annotation "scheduler.alpha.kubernetes.io/name=" should be // picked by the default scheduler, NOT by the one of name "foo-scheduler" - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar"}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: "bar"}}, pickedByDefault: true, pickedByFoo: false, }, { // pod with annotation "scheduler.alpha.kubernetes.io/name=default-scheduler" should be picked // by the scheduler of name "default-scheduler", NOT by the one of name "foo-scheduler" - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar", Annotations: schedulerAnnotationFitsDefault}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: "bar", Annotations: schedulerAnnotationFitsDefault}}, pickedByDefault: true, pickedByFoo: false, }, { // pod with annotataion "scheduler.alpha.kubernetes.io/name=foo-scheduler" should be NOT // be picked by the scheduler of name "default-scheduler", but by the one of name "foo-scheduler" - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar", Annotations: schedulerAnnotationFitsFoo}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: "bar", Annotations: schedulerAnnotationFitsFoo}}, pickedByDefault: false, pickedByFoo: true, }, { // pod with annotataion "scheduler.alpha.kubernetes.io/name=foo-scheduler" should be NOT // be picked by niether the scheduler of name "default-scheduler" nor the one of name "foo-scheduler" - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: "bar", Annotations: schedulerAnnotationFitsNone}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "foo", Namespace: "bar", Annotations: schedulerAnnotationFitsNone}}, pickedByDefault: false, pickedByFoo: false, }, @@ -381,9 +381,9 @@ func TestInvalidHardPodAffinitySymmetricWeight(t *testing.T) { server := httptest.NewServer(&handler) // TODO: Uncomment when fix #19254 // defer server.Close() - client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}}) // factory of "default-scheduler" - factory := NewConfigFactory(client, api.DefaultSchedulerName, -1, api.DefaultFailureDomains) + factory := NewConfigFactory(client, v1.DefaultSchedulerName, -1, v1.DefaultFailureDomains) _, err := factory.Create() if err == nil { t.Errorf("expected err: invalid hardPodAffinitySymmetricWeight, got nothing") @@ -398,7 +398,7 @@ func TestInvalidFactoryArgs(t *testing.T) { } server := httptest.NewServer(&handler) defer server.Close() - client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(api.GroupName).GroupVersion}}) + client := clientset.NewForConfigOrDie(&restclient.Config{Host: server.URL, ContentConfig: restclient.ContentConfig{GroupVersion: ®istered.GroupOrDie(v1.GroupName).GroupVersion}}) testCases := []struct { hardPodAffinitySymmetricWeight int @@ -407,12 +407,12 @@ func TestInvalidFactoryArgs(t *testing.T) { }{ { hardPodAffinitySymmetricWeight: -1, - failureDomains: api.DefaultFailureDomains, + failureDomains: v1.DefaultFailureDomains, expectErr: "invalid hardPodAffinitySymmetricWeight: -1, must be in the range 0-100", }, { hardPodAffinitySymmetricWeight: 101, - failureDomains: api.DefaultFailureDomains, + failureDomains: v1.DefaultFailureDomains, expectErr: "invalid hardPodAffinitySymmetricWeight: 101, must be in the range 0-100", }, { @@ -423,7 +423,7 @@ func TestInvalidFactoryArgs(t *testing.T) { } for _, test := range testCases { - factory := NewConfigFactory(client, api.DefaultSchedulerName, test.hardPodAffinitySymmetricWeight, test.failureDomains) + factory := NewConfigFactory(client, v1.DefaultSchedulerName, test.hardPodAffinitySymmetricWeight, test.failureDomains) _, err := factory.Create() if err == nil { t.Errorf("expected err: %s, got nothing", test.expectErr) @@ -434,32 +434,32 @@ func TestInvalidFactoryArgs(t *testing.T) { func TestNodeConditionPredicate(t *testing.T) { nodeFunc := getNodeConditionPredicate() - nodeList := &api.NodeList{ - Items: []api.Node{ + nodeList := &v1.NodeList{ + Items: []v1.Node{ // node1 considered - {ObjectMeta: api.ObjectMeta{Name: "node1"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionTrue}}}}, + {ObjectMeta: v1.ObjectMeta{Name: "node1"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}}}}, // node2 ignored - node not Ready - {ObjectMeta: api.ObjectMeta{Name: "node2"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionFalse}}}}, + {ObjectMeta: v1.ObjectMeta{Name: "node2"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}}}}, // node3 ignored - node out of disk - {ObjectMeta: api.ObjectMeta{Name: "node3"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeOutOfDisk, Status: api.ConditionTrue}}}}, + {ObjectMeta: v1.ObjectMeta{Name: "node3"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}}}, // node4 considered - {ObjectMeta: api.ObjectMeta{Name: "node4"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeOutOfDisk, Status: api.ConditionFalse}}}}, + {ObjectMeta: v1.ObjectMeta{Name: "node4"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeOutOfDisk, Status: v1.ConditionFalse}}}}, // node5 ignored - node out of disk - {ObjectMeta: api.ObjectMeta{Name: "node5"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionTrue}, {Type: api.NodeOutOfDisk, Status: api.ConditionTrue}}}}, + {ObjectMeta: v1.ObjectMeta{Name: "node5"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}, {Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}}}, // node6 considered - {ObjectMeta: api.ObjectMeta{Name: "node6"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionTrue}, {Type: api.NodeOutOfDisk, Status: api.ConditionFalse}}}}, + {ObjectMeta: v1.ObjectMeta{Name: "node6"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionTrue}, {Type: v1.NodeOutOfDisk, Status: v1.ConditionFalse}}}}, // node7 ignored - node out of disk, node not Ready - {ObjectMeta: api.ObjectMeta{Name: "node7"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionFalse}, {Type: api.NodeOutOfDisk, Status: api.ConditionTrue}}}}, + {ObjectMeta: v1.ObjectMeta{Name: "node7"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}, {Type: v1.NodeOutOfDisk, Status: v1.ConditionTrue}}}}, // node8 ignored - node not Ready - {ObjectMeta: api.ObjectMeta{Name: "node8"}, Status: api.NodeStatus{Conditions: []api.NodeCondition{{Type: api.NodeReady, Status: api.ConditionFalse}, {Type: api.NodeOutOfDisk, Status: api.ConditionFalse}}}}, + {ObjectMeta: v1.ObjectMeta{Name: "node8"}, Status: v1.NodeStatus{Conditions: []v1.NodeCondition{{Type: v1.NodeReady, Status: v1.ConditionFalse}, {Type: v1.NodeOutOfDisk, Status: v1.ConditionFalse}}}}, // node9 ignored - node unschedulable - {ObjectMeta: api.ObjectMeta{Name: "node9"}, Spec: api.NodeSpec{Unschedulable: true}}, + {ObjectMeta: v1.ObjectMeta{Name: "node9"}, Spec: v1.NodeSpec{Unschedulable: true}}, // node10 considered - {ObjectMeta: api.ObjectMeta{Name: "node10"}, Spec: api.NodeSpec{Unschedulable: false}}, + {ObjectMeta: v1.ObjectMeta{Name: "node10"}, Spec: v1.NodeSpec{Unschedulable: false}}, // node11 considered - {ObjectMeta: api.ObjectMeta{Name: "node11"}}, + {ObjectMeta: v1.ObjectMeta{Name: "node11"}}, }, } diff --git a/plugin/pkg/scheduler/generic_scheduler.go b/plugin/pkg/scheduler/generic_scheduler.go index 3cd648a8cde..dcfc467077f 100644 --- a/plugin/pkg/scheduler/generic_scheduler.go +++ b/plugin/pkg/scheduler/generic_scheduler.go @@ -26,7 +26,7 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/util" "k8s.io/kubernetes/pkg/util/errors" "k8s.io/kubernetes/pkg/util/workqueue" @@ -39,7 +39,7 @@ import ( type FailedPredicateMap map[string][]algorithm.PredicateFailureReason type FitError struct { - Pod *api.Pod + Pod *v1.Pod FailedPredicates FailedPredicateMap } @@ -89,7 +89,7 @@ type genericScheduler struct { // Schedule tries to schedule the given pod to one of node in the node list. // If it succeeds, it will return the name of the node. // If it fails, it will return a Fiterror error with reasons. -func (g *genericScheduler) Schedule(pod *api.Pod, nodeLister algorithm.NodeLister) (string, error) { +func (g *genericScheduler) Schedule(pod *v1.Pod, nodeLister algorithm.NodeLister) (string, error) { var trace *util.Trace if pod != nil { trace = util.NewTrace(fmt.Sprintf("Scheduling %s/%s", pod.Namespace, pod.Name)) @@ -160,14 +160,14 @@ func (g *genericScheduler) selectHost(priorityList schedulerapi.HostPriorityList // Filters the nodes to find the ones that fit based on the given predicate functions // Each node is passed through the predicate functions to determine if it is a fit func findNodesThatFit( - pod *api.Pod, + pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, - nodes []*api.Node, + nodes []*v1.Node, predicateFuncs map[string]algorithm.FitPredicate, extenders []algorithm.SchedulerExtender, metadataProducer algorithm.MetadataProducer, -) ([]*api.Node, FailedPredicateMap, error) { - var filtered []*api.Node +) ([]*v1.Node, FailedPredicateMap, error) { + var filtered []*v1.Node failedPredicateMap := FailedPredicateMap{} if len(predicateFuncs) == 0 { @@ -175,7 +175,7 @@ func findNodesThatFit( } else { // Create filtered list with enough space to avoid growing it // and allow assigning. - filtered = make([]*api.Node, len(nodes)) + filtered = make([]*v1.Node, len(nodes)) errs := []error{} var predicateResultLock sync.Mutex var filteredLen int32 @@ -202,7 +202,7 @@ func findNodesThatFit( workqueue.Parallelize(16, len(nodes), checkNode) filtered = filtered[:filteredLen] if len(errs) > 0 { - return []*api.Node{}, FailedPredicateMap{}, errors.NewAggregate(errs) + return []*v1.Node{}, FailedPredicateMap{}, errors.NewAggregate(errs) } } @@ -210,7 +210,7 @@ func findNodesThatFit( for _, extender := range extenders { filteredList, failedMap, err := extender.Filter(pod, filtered) if err != nil { - return []*api.Node{}, FailedPredicateMap{}, err + return []*v1.Node{}, FailedPredicateMap{}, err } for failedNodeName, failedMsg := range failedMap { @@ -229,7 +229,7 @@ func findNodesThatFit( } // Checks whether node with a given name and NodeInfo satisfies all predicateFuncs. -func podFitsOnNode(pod *api.Pod, meta interface{}, info *schedulercache.NodeInfo, predicateFuncs map[string]algorithm.FitPredicate) (bool, []algorithm.PredicateFailureReason, error) { +func podFitsOnNode(pod *v1.Pod, meta interface{}, info *schedulercache.NodeInfo, predicateFuncs map[string]algorithm.FitPredicate) (bool, []algorithm.PredicateFailureReason, error) { var failedPredicates []algorithm.PredicateFailureReason for _, predicate := range predicateFuncs { fit, reasons, err := predicate(pod, meta, info) @@ -251,11 +251,11 @@ func podFitsOnNode(pod *api.Pod, meta interface{}, info *schedulercache.NodeInfo // The node scores returned by the priority function are multiplied by the weights to get weighted scores // All scores are finally combined (added) to get the total weighted scores of all nodes func PrioritizeNodes( - pod *api.Pod, + pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, meta interface{}, priorityConfigs []algorithm.PriorityConfig, - nodes []*api.Node, + nodes []*v1.Node, extenders []algorithm.SchedulerExtender, ) (schedulerapi.HostPriorityList, error) { // If no priority configs are provided, then the EqualPriority function is applied @@ -381,7 +381,7 @@ func PrioritizeNodes( } // EqualPriority is a prioritizer function that gives an equal weight of one to all nodes -func EqualPriorityMap(_ *api.Pod, _ interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { +func EqualPriorityMap(_ *v1.Pod, _ interface{}, nodeInfo *schedulercache.NodeInfo) (schedulerapi.HostPriority, error) { node := nodeInfo.Node() if node == nil { return schedulerapi.HostPriority{}, fmt.Errorf("node not found") diff --git a/plugin/pkg/scheduler/generic_scheduler_test.go b/plugin/pkg/scheduler/generic_scheduler_test.go index 52b4aa95b1c..c7972c359ac 100644 --- a/plugin/pkg/scheduler/generic_scheduler_test.go +++ b/plugin/pkg/scheduler/generic_scheduler_test.go @@ -24,9 +24,9 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" - "k8s.io/kubernetes/pkg/apis/extensions" + "k8s.io/kubernetes/pkg/api/v1" + extensions "k8s.io/kubernetes/pkg/apis/extensions/v1beta1" "k8s.io/kubernetes/pkg/util/sets" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" @@ -37,15 +37,15 @@ import ( "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) -func falsePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func falsePredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil } -func truePredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func truePredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { return true, nil, nil } -func matchesPredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func matchesPredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { node := nodeInfo.Node() if node == nil { return false, nil, fmt.Errorf("node not found") @@ -56,14 +56,14 @@ func matchesPredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.N return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil } -func hasNoPodsPredicate(pod *api.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { +func hasNoPodsPredicate(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.NodeInfo) (bool, []algorithm.PredicateFailureReason, error) { if len(nodeInfo.Pods()) == 0 { return true, nil, nil } return false, []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, nil } -func numericPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) { +func numericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { result := []schedulerapi.HostPriority{} for _, node := range nodes { score, err := strconv.Atoi(node.Name) @@ -78,7 +78,7 @@ func numericPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.Nod return result, nil } -func reverseNumericPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*api.Node) (schedulerapi.HostPriorityList, error) { +func reverseNumericPriority(pod *v1.Pod, nodeNameToInfo map[string]*schedulercache.NodeInfo, nodes []*v1.Node) (schedulerapi.HostPriorityList, error) { var maxScore float64 minScore := math.MaxFloat64 reverseResult := []schedulerapi.HostPriority{} @@ -101,10 +101,10 @@ func reverseNumericPriority(pod *api.Pod, nodeNameToInfo map[string]*schedulerca return reverseResult, nil } -func makeNodeList(nodeNames []string) []*api.Node { - result := make([]*api.Node, 0, len(nodeNames)) +func makeNodeList(nodeNames []string) []*v1.Node { + result := make([]*v1.Node, 0, len(nodeNames)) for _, nodeName := range nodeNames { - result = append(result, &api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName}}) + result = append(result, &v1.Node{ObjectMeta: v1.ObjectMeta{Name: nodeName}}) } return result } @@ -181,8 +181,8 @@ func TestGenericScheduler(t *testing.T) { predicates map[string]algorithm.FitPredicate prioritizers []algorithm.PriorityConfig nodes []string - pod *api.Pod - pods []*api.Pod + pod *v1.Pod + pods []*v1.Pod expectedHosts sets.String expectsErr bool wErr error @@ -192,10 +192,10 @@ func TestGenericScheduler(t *testing.T) { prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, nodes: []string{"machine1", "machine2"}, expectsErr: true, - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}}, name: "test 1", wErr: &FitError{ - Pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}}, + Pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}}, FailedPredicates: FailedPredicateMap{ "machine1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, "machine2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, @@ -214,7 +214,7 @@ func TestGenericScheduler(t *testing.T) { predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate}, prioritizers: []algorithm.PriorityConfig{{Map: EqualPriorityMap, Weight: 1}}, nodes: []string{"machine1", "machine2"}, - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "machine2"}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "machine2"}}, expectedHosts: sets.NewString("machine2"), name: "test 3", wErr: nil, @@ -231,7 +231,7 @@ func TestGenericScheduler(t *testing.T) { predicates: map[string]algorithm.FitPredicate{"matches": matchesPredicate}, prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, nodes: []string{"3", "2", "1"}, - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}}, expectedHosts: sets.NewString("2"), name: "test 5", wErr: nil, @@ -240,7 +240,7 @@ func TestGenericScheduler(t *testing.T) { predicates: map[string]algorithm.FitPredicate{"true": truePredicate}, prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}, {Function: reverseNumericPriority, Weight: 2}}, nodes: []string{"3", "2", "1"}, - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}}, expectedHosts: sets.NewString("1"), name: "test 6", wErr: nil, @@ -249,11 +249,11 @@ func TestGenericScheduler(t *testing.T) { predicates: map[string]algorithm.FitPredicate{"true": truePredicate, "false": falsePredicate}, prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, nodes: []string{"3", "2", "1"}, - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}}, expectsErr: true, name: "test 7", wErr: &FitError{ - Pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}}, + Pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}}, FailedPredicates: FailedPredicateMap{ "3": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, "2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, @@ -266,24 +266,24 @@ func TestGenericScheduler(t *testing.T) { "nopods": hasNoPodsPredicate, "matches": matchesPredicate, }, - pods: []*api.Pod{ + pods: []*v1.Pod{ { - ObjectMeta: api.ObjectMeta{Name: "2"}, - Spec: api.PodSpec{ + ObjectMeta: v1.ObjectMeta{Name: "2"}, + Spec: v1.PodSpec{ NodeName: "2", }, - Status: api.PodStatus{ - Phase: api.PodRunning, + Status: v1.PodStatus{ + Phase: v1.PodRunning, }, }, }, - pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}}, + pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}}, prioritizers: []algorithm.PriorityConfig{{Function: numericPriority, Weight: 1}}, nodes: []string{"1", "2"}, expectsErr: true, name: "test 8", wErr: &FitError{ - Pod: &api.Pod{ObjectMeta: api.ObjectMeta{Name: "2"}}, + Pod: &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "2"}}, FailedPredicates: FailedPredicateMap{ "1": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, "2": []algorithm.PredicateFailureReason{algorithmpredicates.ErrFakePredicate}, @@ -297,7 +297,7 @@ func TestGenericScheduler(t *testing.T) { cache.AddPod(pod) } for _, name := range test.nodes { - cache.AddNode(&api.Node{ObjectMeta: api.ObjectMeta{Name: name}}) + cache.AddNode(&v1.Node{ObjectMeta: v1.ObjectMeta{Name: name}}) } scheduler := NewGenericScheduler( @@ -322,7 +322,7 @@ func TestFindFitAllError(t *testing.T) { "2": schedulercache.NewNodeInfo(), "1": schedulercache.NewNodeInfo(), } - _, predicateMap, err := findNodesThatFit(&api.Pod{}, nodeNameToInfo, makeNodeList(nodes), predicates, nil, algorithm.EmptyMetadataProducer) + _, predicateMap, err := findNodesThatFit(&v1.Pod{}, nodeNameToInfo, makeNodeList(nodes), predicates, nil, algorithm.EmptyMetadataProducer) if err != nil { t.Errorf("unexpected error: %v", err) @@ -346,14 +346,14 @@ func TestFindFitAllError(t *testing.T) { func TestFindFitSomeError(t *testing.T) { nodes := []string{"3", "2", "1"} predicates := map[string]algorithm.FitPredicate{"true": truePredicate, "match": matchesPredicate} - pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "1"}} + pod := &v1.Pod{ObjectMeta: v1.ObjectMeta{Name: "1"}} nodeNameToInfo := map[string]*schedulercache.NodeInfo{ "3": schedulercache.NewNodeInfo(), "2": schedulercache.NewNodeInfo(), "1": schedulercache.NewNodeInfo(pod), } for name := range nodeNameToInfo { - nodeNameToInfo[name].SetNode(&api.Node{ObjectMeta: api.ObjectMeta{Name: name}}) + nodeNameToInfo[name].SetNode(&v1.Node{ObjectMeta: v1.ObjectMeta{Name: name}}) } _, predicateMap, err := findNodesThatFit(pod, nodeNameToInfo, makeNodeList(nodes), predicates, nil, algorithm.EmptyMetadataProducer) @@ -379,15 +379,15 @@ func TestFindFitSomeError(t *testing.T) { } } -func makeNode(node string, milliCPU, memory int64) *api.Node { - return &api.Node{ - ObjectMeta: api.ObjectMeta{Name: node}, - Status: api.NodeStatus{ - Capacity: api.ResourceList{ +func makeNode(node string, milliCPU, memory int64) *v1.Node { + return &v1.Node{ + ObjectMeta: v1.ObjectMeta{Name: node}, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ "cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), "memory": *resource.NewQuantity(memory, resource.BinarySI), }, - Allocatable: api.ResourceList{ + Allocatable: v1.ResourceList{ "cpu": *resource.NewMilliQuantity(milliCPU, resource.DecimalSI), "memory": *resource.NewQuantity(memory, resource.BinarySI), }, @@ -402,19 +402,19 @@ func makeNode(node string, milliCPU, memory int64) *api.Node { // - don't get the same score no matter what we schedule. func TestZeroRequest(t *testing.T) { // A pod with no resources. We expect spreading to count it as having the default resources. - noResources := api.PodSpec{ - Containers: []api.Container{ + noResources := v1.PodSpec{ + Containers: []v1.Container{ {}, }, } noResources1 := noResources noResources1.NodeName = "machine1" // A pod with the same resources as a 0-request pod gets by default as its resources (for spreading). - small := api.PodSpec{ - Containers: []api.Container{ + small := v1.PodSpec{ + Containers: []v1.Container{ { - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ "cpu": resource.MustParse( strconv.FormatInt(priorityutil.DefaultMilliCpuRequest, 10) + "m"), "memory": resource.MustParse( @@ -427,11 +427,11 @@ func TestZeroRequest(t *testing.T) { small2 := small small2.NodeName = "machine2" // A larger pod. - large := api.PodSpec{ - Containers: []api.Container{ + large := v1.PodSpec{ + Containers: []v1.Container{ { - Resources: api.ResourceRequirements{ - Requests: api.ResourceList{ + Resources: v1.ResourceRequirements{ + Requests: v1.ResourceList{ "cpu": resource.MustParse( strconv.FormatInt(priorityutil.DefaultMilliCpuRequest*3, 10) + "m"), "memory": resource.MustParse( @@ -446,38 +446,38 @@ func TestZeroRequest(t *testing.T) { large2 := large large2.NodeName = "machine2" tests := []struct { - pod *api.Pod - pods []*api.Pod - nodes []*api.Node + pod *v1.Pod + pods []*v1.Pod + nodes []*v1.Node test string }{ // The point of these next two tests is to show you get the same priority for a zero-request pod // as for a pod with the defaults requests, both when the zero-request pod is already on the machine // and when the zero-request pod is the one being scheduled. { - pod: &api.Pod{Spec: noResources}, - nodes: []*api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)}, + pod: &v1.Pod{Spec: noResources}, + nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)}, test: "test priority of zero-request pod with machine with zero-request pod", - pods: []*api.Pod{ + pods: []*v1.Pod{ {Spec: large1}, {Spec: noResources1}, {Spec: large2}, {Spec: small2}, }, }, { - pod: &api.Pod{Spec: small}, - nodes: []*api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)}, + pod: &v1.Pod{Spec: small}, + nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)}, test: "test priority of nonzero-request pod with machine with zero-request pod", - pods: []*api.Pod{ + pods: []*v1.Pod{ {Spec: large1}, {Spec: noResources1}, {Spec: large2}, {Spec: small2}, }, }, // The point of this test is to verify that we're not just getting the same score no matter what we schedule. { - pod: &api.Pod{Spec: large}, - nodes: []*api.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)}, + pod: &v1.Pod{Spec: large}, + nodes: []*v1.Node{makeNode("machine1", 1000, priorityutil.DefaultMemoryRequest*10), makeNode("machine2", 1000, priorityutil.DefaultMemoryRequest*10)}, test: "test priority of larger pod with machine with zero-request pod", - pods: []*api.Pod{ + pods: []*v1.Pod{ {Spec: large1}, {Spec: noResources1}, {Spec: large2}, {Spec: small2}, }, @@ -494,8 +494,8 @@ func TestZeroRequest(t *testing.T) { {Map: algorithmpriorities.BalancedResourceAllocationMap, Weight: 1}, { Function: algorithmpriorities.NewSelectorSpreadPriority( - algorithm.FakeServiceLister([]*api.Service{}), - algorithm.FakeControllerLister([]*api.ReplicationController{}), + algorithm.FakeServiceLister([]*v1.Service{}), + algorithm.FakeControllerLister([]*v1.ReplicationController{}), algorithm.FakeReplicaSetLister([]*extensions.ReplicaSet{})), Weight: 1, }, diff --git a/plugin/pkg/scheduler/scheduler.go b/plugin/pkg/scheduler/scheduler.go index 690064f0eae..6f52ba00287 100644 --- a/plugin/pkg/scheduler/scheduler.go +++ b/plugin/pkg/scheduler/scheduler.go @@ -19,7 +19,7 @@ package scheduler import ( "time" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/util/wait" "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm" @@ -31,11 +31,11 @@ import ( // Binder knows how to write a binding. type Binder interface { - Bind(binding *api.Binding) error + Bind(binding *v1.Binding) error } type PodConditionUpdater interface { - Update(pod *api.Pod, podCondition *api.PodCondition) error + Update(pod *v1.Pod, podCondition *v1.PodCondition) error } // Scheduler watches for new unscheduled pods. It attempts to find @@ -60,11 +60,11 @@ type Config struct { // is available. We don't use a channel for this, because scheduling // a pod may take some amount of time and we don't want pods to get // stale while they sit in a channel. - NextPod func() *api.Pod + NextPod func() *v1.Pod // Error is called if there is an error. It is passed the pod in // question, and the error - Error func(*api.Pod, error) + Error func(*v1.Pod, error) // Recorder is the EventRecorder to use Recorder record.EventRecorder @@ -96,11 +96,11 @@ func (s *Scheduler) scheduleOne() { if err != nil { glog.V(1).Infof("Failed to schedule pod: %v/%v", pod.Namespace, pod.Name) s.config.Error(pod, err) - s.config.Recorder.Eventf(pod, api.EventTypeWarning, "FailedScheduling", "%v", err) - s.config.PodConditionUpdater.Update(pod, &api.PodCondition{ - Type: api.PodScheduled, - Status: api.ConditionFalse, - Reason: api.PodReasonUnschedulable, + s.config.Recorder.Eventf(pod, v1.EventTypeWarning, "FailedScheduling", "%v", err) + s.config.PodConditionUpdater.Update(pod, &v1.PodCondition{ + Type: v1.PodScheduled, + Status: v1.ConditionFalse, + Reason: v1.PodReasonUnschedulable, }) return } @@ -126,9 +126,9 @@ func (s *Scheduler) scheduleOne() { go func() { defer metrics.E2eSchedulingLatency.Observe(metrics.SinceInMicroseconds(start)) - b := &api.Binding{ - ObjectMeta: api.ObjectMeta{Namespace: pod.Namespace, Name: pod.Name}, - Target: api.ObjectReference{ + b := &v1.Binding{ + ObjectMeta: v1.ObjectMeta{Namespace: pod.Namespace, Name: pod.Name}, + Target: v1.ObjectReference{ Kind: "Node", Name: dest, }, @@ -144,15 +144,15 @@ func (s *Scheduler) scheduleOne() { glog.Errorf("scheduler cache ForgetPod failed: %v", err) } s.config.Error(pod, err) - s.config.Recorder.Eventf(pod, api.EventTypeNormal, "FailedScheduling", "Binding rejected: %v", err) - s.config.PodConditionUpdater.Update(pod, &api.PodCondition{ - Type: api.PodScheduled, - Status: api.ConditionFalse, + s.config.Recorder.Eventf(pod, v1.EventTypeNormal, "FailedScheduling", "Binding rejected: %v", err) + s.config.PodConditionUpdater.Update(pod, &v1.PodCondition{ + Type: v1.PodScheduled, + Status: v1.ConditionFalse, Reason: "BindingRejected", }) return } metrics.BindingLatency.Observe(metrics.SinceInMicroseconds(bindingStart)) - s.config.Recorder.Eventf(pod, api.EventTypeNormal, "Scheduled", "Successfully assigned %v to %v", pod.Name, dest) + s.config.Recorder.Eventf(pod, v1.EventTypeNormal, "Scheduled", "Successfully assigned %v to %v", pod.Name, dest) }() } diff --git a/plugin/pkg/scheduler/scheduler_test.go b/plugin/pkg/scheduler/scheduler_test.go index 1cb118ee8c4..5f4adf01994 100644 --- a/plugin/pkg/scheduler/scheduler_test.go +++ b/plugin/pkg/scheduler/scheduler_test.go @@ -23,9 +23,9 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" "k8s.io/kubernetes/pkg/api/testapi" + "k8s.io/kubernetes/pkg/api/v1" clientcache "k8s.io/kubernetes/pkg/client/cache" "k8s.io/kubernetes/pkg/client/record" "k8s.io/kubernetes/pkg/labels" @@ -38,38 +38,38 @@ import ( ) type fakeBinder struct { - b func(binding *api.Binding) error + b func(binding *v1.Binding) error } -func (fb fakeBinder) Bind(binding *api.Binding) error { return fb.b(binding) } +func (fb fakeBinder) Bind(binding *v1.Binding) error { return fb.b(binding) } type fakePodConditionUpdater struct{} -func (fc fakePodConditionUpdater) Update(pod *api.Pod, podCondition *api.PodCondition) error { +func (fc fakePodConditionUpdater) Update(pod *v1.Pod, podCondition *v1.PodCondition) error { return nil } -func podWithID(id, desiredHost string) *api.Pod { - return &api.Pod{ - ObjectMeta: api.ObjectMeta{Name: id, SelfLink: testapi.Default.SelfLink("pods", id)}, - Spec: api.PodSpec{ +func podWithID(id, desiredHost string) *v1.Pod { + return &v1.Pod{ + ObjectMeta: v1.ObjectMeta{Name: id, SelfLink: testapi.Default.SelfLink("pods", id)}, + Spec: v1.PodSpec{ NodeName: desiredHost, }, } } -func podWithPort(id, desiredHost string, port int) *api.Pod { +func podWithPort(id, desiredHost string, port int) *v1.Pod { pod := podWithID(id, desiredHost) - pod.Spec.Containers = []api.Container{ - {Name: "ctr", Ports: []api.ContainerPort{{HostPort: int32(port)}}}, + pod.Spec.Containers = []v1.Container{ + {Name: "ctr", Ports: []v1.ContainerPort{{HostPort: int32(port)}}}, } return pod } -func podWithResources(id, desiredHost string, limits api.ResourceList, requests api.ResourceList) *api.Pod { +func podWithResources(id, desiredHost string, limits v1.ResourceList, requests v1.ResourceList) *v1.Pod { pod := podWithID(id, desiredHost) - pod.Spec.Containers = []api.Container{ - {Name: "ctr", Resources: api.ResourceRequirements{Limits: limits, Requests: requests}}, + pod.Spec.Containers = []v1.Container{ + {Name: "ctr", Resources: v1.ResourceRequirements{Limits: limits, Requests: requests}}, } return pod } @@ -79,7 +79,7 @@ type mockScheduler struct { err error } -func (es mockScheduler) Schedule(pod *api.Pod, ml algorithm.NodeLister) (string, error) { +func (es mockScheduler) Schedule(pod *v1.Pod, ml algorithm.NodeLister) (string, error) { return es.machine, es.err } @@ -88,22 +88,22 @@ func TestScheduler(t *testing.T) { eventBroadcaster.StartLogging(t.Logf).Stop() errS := errors.New("scheduler") errB := errors.New("binder") - testNode := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine1"}} + testNode := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "machine1"}} table := []struct { injectBindError error - sendPod *api.Pod + sendPod *v1.Pod algo algorithm.ScheduleAlgorithm - expectErrorPod *api.Pod - expectAssumedPod *api.Pod + expectErrorPod *v1.Pod + expectAssumedPod *v1.Pod expectError error - expectBind *api.Binding + expectBind *v1.Binding eventReason string }{ { sendPod: podWithID("foo", ""), algo: mockScheduler{testNode.Name, nil}, - expectBind: &api.Binding{ObjectMeta: api.ObjectMeta{Name: "foo"}, Target: api.ObjectReference{Kind: "Node", Name: testNode.Name}}, + expectBind: &v1.Binding{ObjectMeta: v1.ObjectMeta{Name: "foo"}, Target: v1.ObjectReference{Kind: "Node", Name: testNode.Name}}, expectAssumedPod: podWithID("foo", testNode.Name), eventReason: "Scheduled", }, { @@ -115,7 +115,7 @@ func TestScheduler(t *testing.T) { }, { sendPod: podWithID("foo", ""), algo: mockScheduler{testNode.Name, nil}, - expectBind: &api.Binding{ObjectMeta: api.ObjectMeta{Name: "foo"}, Target: api.ObjectReference{Kind: "Node", Name: testNode.Name}}, + expectBind: &v1.Binding{ObjectMeta: v1.ObjectMeta{Name: "foo"}, Target: v1.ObjectReference{Kind: "Node", Name: testNode.Name}}, expectAssumedPod: podWithID("foo", testNode.Name), injectBindError: errB, expectError: errB, @@ -126,36 +126,36 @@ func TestScheduler(t *testing.T) { for i, item := range table { var gotError error - var gotPod *api.Pod - var gotAssumedPod *api.Pod - var gotBinding *api.Binding + var gotPod *v1.Pod + var gotAssumedPod *v1.Pod + var gotBinding *v1.Binding c := &Config{ SchedulerCache: &schedulertesting.FakeCache{ - AssumeFunc: func(pod *api.Pod) { + AssumeFunc: func(pod *v1.Pod) { gotAssumedPod = pod }, }, NodeLister: algorithm.FakeNodeLister( - []*api.Node{&testNode}, + []*v1.Node{&testNode}, ), Algorithm: item.algo, - Binder: fakeBinder{func(b *api.Binding) error { + Binder: fakeBinder{func(b *v1.Binding) error { gotBinding = b return item.injectBindError }}, PodConditionUpdater: fakePodConditionUpdater{}, - Error: func(p *api.Pod, err error) { + Error: func(p *v1.Pod, err error) { gotPod = p gotError = err }, - NextPod: func() *api.Pod { + NextPod: func() *v1.Pod { return item.sendPod }, - Recorder: eventBroadcaster.NewRecorder(api.EventSource{Component: "scheduler"}), + Recorder: eventBroadcaster.NewRecorder(v1.EventSource{Component: "scheduler"}), } s := New(c) called := make(chan struct{}) - events := eventBroadcaster.StartEventWatcher(func(e *api.Event) { + events := eventBroadcaster.StartEventWatcher(func(e *v1.Event) { if e, a := item.eventReason, e.Reason; e != a { t.Errorf("%v: expected %v, got %v", i, e, a) } @@ -185,9 +185,9 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) { queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc) scache := schedulercache.New(100*time.Millisecond, stop) pod := podWithPort("pod.Name", "", 8080) - node := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine1"}} + node := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "machine1"}} scache.AddNode(&node) - nodeLister := algorithm.FakeNodeLister([]*api.Node{&node}) + nodeLister := algorithm.FakeNodeLister([]*v1.Node{&node}) predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts} scheduler, bindingChan, _ := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, nodeLister, predicateMap, pod, &node) @@ -225,9 +225,9 @@ func TestSchedulerNoPhantomPodAfterExpire(t *testing.T) { scheduler.scheduleOne() select { case b := <-bindingChan: - expectBinding := &api.Binding{ - ObjectMeta: api.ObjectMeta{Name: "bar"}, - Target: api.ObjectReference{Kind: "Node", Name: node.Name}, + expectBinding := &v1.Binding{ + ObjectMeta: v1.ObjectMeta{Name: "bar"}, + Target: v1.ObjectReference{Kind: "Node", Name: node.Name}, } if !reflect.DeepEqual(expectBinding, b) { t.Errorf("binding want=%v, get=%v", expectBinding, b) @@ -243,9 +243,9 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) { queuedPodStore := clientcache.NewFIFO(clientcache.MetaNamespaceKeyFunc) scache := schedulercache.New(10*time.Minute, stop) firstPod := podWithPort("pod.Name", "", 8080) - node := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine1"}} + node := v1.Node{ObjectMeta: v1.ObjectMeta{Name: "machine1"}} scache.AddNode(&node) - nodeLister := algorithm.FakeNodeLister([]*api.Node{&node}) + nodeLister := algorithm.FakeNodeLister([]*v1.Node{&node}) predicateMap := map[string]algorithm.FitPredicate{"PodFitsHostPorts": predicates.PodFitsHostPorts} scheduler, bindingChan, errChan := setupTestSchedulerWithOnePodOnNode(t, queuedPodStore, scache, nodeLister, predicateMap, firstPod, &node) @@ -285,9 +285,9 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) { scheduler.scheduleOne() select { case b := <-bindingChan: - expectBinding := &api.Binding{ - ObjectMeta: api.ObjectMeta{Name: "bar"}, - Target: api.ObjectReference{Kind: "Node", Name: node.Name}, + expectBinding := &v1.Binding{ + ObjectMeta: v1.ObjectMeta{Name: "bar"}, + Target: v1.ObjectReference{Kind: "Node", Name: node.Name}, } if !reflect.DeepEqual(expectBinding, b) { t.Errorf("binding want=%v, get=%v", expectBinding, b) @@ -300,7 +300,7 @@ func TestSchedulerNoPhantomPodAfterDelete(t *testing.T) { // queuedPodStore: pods queued before processing. // cache: scheduler cache that might contain assumed pods. func setupTestSchedulerWithOnePodOnNode(t *testing.T, queuedPodStore *clientcache.FIFO, scache schedulercache.Cache, - nodeLister algorithm.FakeNodeLister, predicateMap map[string]algorithm.FitPredicate, pod *api.Pod, node *api.Node) (*Scheduler, chan *api.Binding, chan error) { + nodeLister algorithm.FakeNodeLister, predicateMap map[string]algorithm.FitPredicate, pod *v1.Pod, node *v1.Node) (*Scheduler, chan *v1.Binding, chan error) { scheduler, bindingChan, errChan := setupTestScheduler(queuedPodStore, scache, nodeLister, predicateMap) @@ -314,9 +314,9 @@ func setupTestSchedulerWithOnePodOnNode(t *testing.T, queuedPodStore *clientcach select { case b := <-bindingChan: - expectBinding := &api.Binding{ - ObjectMeta: api.ObjectMeta{Name: pod.Name}, - Target: api.ObjectReference{Kind: "Node", Name: node.Name}, + expectBinding := &v1.Binding{ + ObjectMeta: v1.ObjectMeta{Name: pod.Name}, + Target: v1.ObjectReference{Kind: "Node", Name: node.Name}, } if !reflect.DeepEqual(expectBinding, b) { t.Errorf("binding want=%v, get=%v", expectBinding, b) @@ -336,29 +336,29 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) { // Design the baseline for the pods, and we will make nodes that dont fit it later. var cpu = int64(4) var mem = int64(500) - podWithTooBigResourceRequests := podWithResources("bar", "", api.ResourceList{ - api.ResourceCPU: *(resource.NewQuantity(cpu, resource.DecimalSI)), - api.ResourceMemory: *(resource.NewQuantity(mem, resource.DecimalSI)), - }, api.ResourceList{ - api.ResourceCPU: *(resource.NewQuantity(cpu, resource.DecimalSI)), - api.ResourceMemory: *(resource.NewQuantity(mem, resource.DecimalSI)), + podWithTooBigResourceRequests := podWithResources("bar", "", v1.ResourceList{ + v1.ResourceCPU: *(resource.NewQuantity(cpu, resource.DecimalSI)), + v1.ResourceMemory: *(resource.NewQuantity(mem, resource.DecimalSI)), + }, v1.ResourceList{ + v1.ResourceCPU: *(resource.NewQuantity(cpu, resource.DecimalSI)), + v1.ResourceMemory: *(resource.NewQuantity(mem, resource.DecimalSI)), }) // create several nodes which cannot schedule the above pod - nodes := []*api.Node{} + nodes := []*v1.Node{} for i := 0; i < 100; i++ { - node := api.Node{ - ObjectMeta: api.ObjectMeta{Name: fmt.Sprintf("machine%v", i)}, - Status: api.NodeStatus{ - Capacity: api.ResourceList{ - api.ResourceCPU: *(resource.NewQuantity(cpu/2, resource.DecimalSI)), - api.ResourceMemory: *(resource.NewQuantity(mem/5, resource.DecimalSI)), - api.ResourcePods: *(resource.NewQuantity(10, resource.DecimalSI)), + node := v1.Node{ + ObjectMeta: v1.ObjectMeta{Name: fmt.Sprintf("machine%v", i)}, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + v1.ResourceCPU: *(resource.NewQuantity(cpu/2, resource.DecimalSI)), + v1.ResourceMemory: *(resource.NewQuantity(mem/5, resource.DecimalSI)), + v1.ResourcePods: *(resource.NewQuantity(10, resource.DecimalSI)), }, - Allocatable: api.ResourceList{ - api.ResourceCPU: *(resource.NewQuantity(cpu/2, resource.DecimalSI)), - api.ResourceMemory: *(resource.NewQuantity(mem/5, resource.DecimalSI)), - api.ResourcePods: *(resource.NewQuantity(10, resource.DecimalSI)), + Allocatable: v1.ResourceList{ + v1.ResourceCPU: *(resource.NewQuantity(cpu/2, resource.DecimalSI)), + v1.ResourceMemory: *(resource.NewQuantity(mem/5, resource.DecimalSI)), + v1.ResourcePods: *(resource.NewQuantity(10, resource.DecimalSI)), }}, } scache.AddNode(&node) @@ -373,8 +373,8 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) { failedPredicatesMap := FailedPredicateMap{} for _, node := range nodes { failedPredicatesMap[node.Name] = []algorithm.PredicateFailureReason{ - predicates.NewInsufficientResourceError(api.ResourceCPU, 4000, 0, 2000), - predicates.NewInsufficientResourceError(api.ResourceMemory, 500, 0, 100), + predicates.NewInsufficientResourceError(v1.ResourceCPU, 4000, 0, 2000), + predicates.NewInsufficientResourceError(v1.ResourceMemory, 500, 0, 100), } } scheduler, _, errChan := setupTestScheduler(queuedPodStore, scache, nodeLister, predicateMap) @@ -400,7 +400,7 @@ func TestSchedulerFailedSchedulingReasons(t *testing.T) { // queuedPodStore: pods queued before processing. // scache: scheduler cache that might contain assumed pods. -func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulercache.Cache, nodeLister algorithm.FakeNodeLister, predicateMap map[string]algorithm.FitPredicate) (*Scheduler, chan *api.Binding, chan error) { +func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulercache.Cache, nodeLister algorithm.FakeNodeLister, predicateMap map[string]algorithm.FitPredicate) (*Scheduler, chan *v1.Binding, chan error) { algo := NewGenericScheduler( scache, predicateMap, @@ -408,20 +408,20 @@ func setupTestScheduler(queuedPodStore *clientcache.FIFO, scache schedulercache. []algorithm.PriorityConfig{}, algorithm.EmptyMetadataProducer, []algorithm.SchedulerExtender{}) - bindingChan := make(chan *api.Binding, 1) + bindingChan := make(chan *v1.Binding, 1) errChan := make(chan error, 1) cfg := &Config{ SchedulerCache: scache, NodeLister: nodeLister, Algorithm: algo, - Binder: fakeBinder{func(b *api.Binding) error { + Binder: fakeBinder{func(b *v1.Binding) error { bindingChan <- b return nil }}, - NextPod: func() *api.Pod { - return clientcache.Pop(queuedPodStore).(*api.Pod) + NextPod: func() *v1.Pod { + return clientcache.Pop(queuedPodStore).(*v1.Pod) }, - Error: func(p *api.Pod, err error) { + Error: func(p *v1.Pod, err error) { errChan <- err }, Recorder: &record.FakeRecorder{}, diff --git a/plugin/pkg/scheduler/schedulercache/cache.go b/plugin/pkg/scheduler/schedulercache/cache.go index 985f9ed0f73..697b3789073 100644 --- a/plugin/pkg/scheduler/schedulercache/cache.go +++ b/plugin/pkg/scheduler/schedulercache/cache.go @@ -22,7 +22,7 @@ import ( "time" "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/pkg/util/wait" ) @@ -57,7 +57,7 @@ type schedulerCache struct { } type podState struct { - pod *api.Pod + pod *v1.Pod // Used by assumedPod to determinate expiration. deadline *time.Time } @@ -90,10 +90,10 @@ func (cache *schedulerCache) UpdateNodeNameToInfoMap(nodeNameToInfo map[string]* return nil } -func (cache *schedulerCache) List(selector labels.Selector) ([]*api.Pod, error) { +func (cache *schedulerCache) List(selector labels.Selector) ([]*v1.Pod, error) { cache.mu.Lock() defer cache.mu.Unlock() - var pods []*api.Pod + var pods []*v1.Pod for _, info := range cache.nodes { for _, pod := range info.pods { if selector.Matches(labels.Set(pod.Labels)) { @@ -104,12 +104,12 @@ func (cache *schedulerCache) List(selector labels.Selector) ([]*api.Pod, error) return pods, nil } -func (cache *schedulerCache) AssumePod(pod *api.Pod) error { +func (cache *schedulerCache) AssumePod(pod *v1.Pod) error { return cache.assumePod(pod, time.Now()) } // assumePod exists for making test deterministic by taking time as input argument. -func (cache *schedulerCache) assumePod(pod *api.Pod, now time.Time) error { +func (cache *schedulerCache) assumePod(pod *v1.Pod, now time.Time) error { cache.mu.Lock() defer cache.mu.Unlock() @@ -132,7 +132,7 @@ func (cache *schedulerCache) assumePod(pod *api.Pod, now time.Time) error { return nil } -func (cache *schedulerCache) ForgetPod(pod *api.Pod) error { +func (cache *schedulerCache) ForgetPod(pod *v1.Pod) error { key, err := getPodKey(pod) if err != nil { return err @@ -157,7 +157,7 @@ func (cache *schedulerCache) ForgetPod(pod *api.Pod) error { return nil } -func (cache *schedulerCache) AddPod(pod *api.Pod) error { +func (cache *schedulerCache) AddPod(pod *v1.Pod) error { key, err := getPodKey(pod) if err != nil { return err @@ -184,7 +184,7 @@ func (cache *schedulerCache) AddPod(pod *api.Pod) error { return nil } -func (cache *schedulerCache) UpdatePod(oldPod, newPod *api.Pod) error { +func (cache *schedulerCache) UpdatePod(oldPod, newPod *v1.Pod) error { key, err := getPodKey(oldPod) if err != nil { return err @@ -207,7 +207,7 @@ func (cache *schedulerCache) UpdatePod(oldPod, newPod *api.Pod) error { return nil } -func (cache *schedulerCache) updatePod(oldPod, newPod *api.Pod) error { +func (cache *schedulerCache) updatePod(oldPod, newPod *v1.Pod) error { if err := cache.removePod(oldPod); err != nil { return err } @@ -215,7 +215,7 @@ func (cache *schedulerCache) updatePod(oldPod, newPod *api.Pod) error { return nil } -func (cache *schedulerCache) addPod(pod *api.Pod) { +func (cache *schedulerCache) addPod(pod *v1.Pod) { n, ok := cache.nodes[pod.Spec.NodeName] if !ok { n = NewNodeInfo() @@ -224,7 +224,7 @@ func (cache *schedulerCache) addPod(pod *api.Pod) { n.addPod(pod) } -func (cache *schedulerCache) removePod(pod *api.Pod) error { +func (cache *schedulerCache) removePod(pod *v1.Pod) error { n := cache.nodes[pod.Spec.NodeName] if err := n.removePod(pod); err != nil { return err @@ -235,7 +235,7 @@ func (cache *schedulerCache) removePod(pod *api.Pod) error { return nil } -func (cache *schedulerCache) RemovePod(pod *api.Pod) error { +func (cache *schedulerCache) RemovePod(pod *v1.Pod) error { key, err := getPodKey(pod) if err != nil { return err @@ -260,7 +260,7 @@ func (cache *schedulerCache) RemovePod(pod *api.Pod) error { return nil } -func (cache *schedulerCache) AddNode(node *api.Node) error { +func (cache *schedulerCache) AddNode(node *v1.Node) error { cache.mu.Lock() defer cache.mu.Unlock() @@ -272,7 +272,7 @@ func (cache *schedulerCache) AddNode(node *api.Node) error { return n.SetNode(node) } -func (cache *schedulerCache) UpdateNode(oldNode, newNode *api.Node) error { +func (cache *schedulerCache) UpdateNode(oldNode, newNode *v1.Node) error { cache.mu.Lock() defer cache.mu.Unlock() @@ -284,7 +284,7 @@ func (cache *schedulerCache) UpdateNode(oldNode, newNode *api.Node) error { return n.SetNode(newNode) } -func (cache *schedulerCache) RemoveNode(node *api.Node) error { +func (cache *schedulerCache) RemoveNode(node *v1.Node) error { cache.mu.Lock() defer cache.mu.Unlock() diff --git a/plugin/pkg/scheduler/schedulercache/cache_test.go b/plugin/pkg/scheduler/schedulercache/cache_test.go index fb0e062f9ce..b5a632cbbd7 100644 --- a/plugin/pkg/scheduler/schedulercache/cache_test.go +++ b/plugin/pkg/scheduler/schedulercache/cache_test.go @@ -22,8 +22,8 @@ import ( "testing" "time" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/labels" priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" ) @@ -42,19 +42,19 @@ func deepEqualWithoutGeneration(t *testing.T, testcase int, actual, expected *No // on node level. func TestAssumePodScheduled(t *testing.T) { nodeName := "node" - testPods := []*api.Pod{ - makeBasePod(nodeName, "test", "100m", "500", []api.ContainerPort{{HostPort: 80}}), - makeBasePod(nodeName, "test-1", "100m", "500", []api.ContainerPort{{HostPort: 80}}), - makeBasePod(nodeName, "test-2", "200m", "1Ki", []api.ContainerPort{{HostPort: 8080}}), - makeBasePod(nodeName, "test-nonzero", "", "", []api.ContainerPort{{HostPort: 80}}), + testPods := []*v1.Pod{ + makeBasePod(nodeName, "test", "100m", "500", []v1.ContainerPort{{HostPort: 80}}), + makeBasePod(nodeName, "test-1", "100m", "500", []v1.ContainerPort{{HostPort: 80}}), + makeBasePod(nodeName, "test-2", "200m", "1Ki", []v1.ContainerPort{{HostPort: 8080}}), + makeBasePod(nodeName, "test-nonzero", "", "", []v1.ContainerPort{{HostPort: 80}}), } tests := []struct { - pods []*api.Pod + pods []*v1.Pod wNodeInfo *NodeInfo }{{ - pods: []*api.Pod{testPods[0]}, + pods: []*v1.Pod{testPods[0]}, wNodeInfo: &NodeInfo{ requestedResource: &Resource{ MilliCPU: 100, @@ -65,10 +65,10 @@ func TestAssumePodScheduled(t *testing.T) { Memory: 500, }, allocatableResource: &Resource{}, - pods: []*api.Pod{testPods[0]}, + pods: []*v1.Pod{testPods[0]}, }, }, { - pods: []*api.Pod{testPods[1], testPods[2]}, + pods: []*v1.Pod{testPods[1], testPods[2]}, wNodeInfo: &NodeInfo{ requestedResource: &Resource{ MilliCPU: 300, @@ -79,10 +79,10 @@ func TestAssumePodScheduled(t *testing.T) { Memory: 1524, }, allocatableResource: &Resource{}, - pods: []*api.Pod{testPods[1], testPods[2]}, + pods: []*v1.Pod{testPods[1], testPods[2]}, }, }, { // test non-zero request - pods: []*api.Pod{testPods[3]}, + pods: []*v1.Pod{testPods[3]}, wNodeInfo: &NodeInfo{ requestedResource: &Resource{ MilliCPU: 0, @@ -93,7 +93,7 @@ func TestAssumePodScheduled(t *testing.T) { Memory: priorityutil.DefaultMemoryRequest, }, allocatableResource: &Resource{}, - pods: []*api.Pod{testPods[3]}, + pods: []*v1.Pod{testPods[3]}, }, }} @@ -119,7 +119,7 @@ func TestAssumePodScheduled(t *testing.T) { } type testExpirePodStruct struct { - pod *api.Pod + pod *v1.Pod assumedTime time.Time } @@ -127,9 +127,9 @@ type testExpirePodStruct struct { // The removal will be reflected in node info. func TestExpirePod(t *testing.T) { nodeName := "node" - testPods := []*api.Pod{ - makeBasePod(nodeName, "test-1", "100m", "500", []api.ContainerPort{{HostPort: 80}}), - makeBasePod(nodeName, "test-2", "200m", "1Ki", []api.ContainerPort{{HostPort: 8080}}), + testPods := []*v1.Pod{ + makeBasePod(nodeName, "test-1", "100m", "500", []v1.ContainerPort{{HostPort: 80}}), + makeBasePod(nodeName, "test-2", "200m", "1Ki", []v1.ContainerPort{{HostPort: 8080}}), } now := time.Now() ttl := 10 * time.Second @@ -160,7 +160,7 @@ func TestExpirePod(t *testing.T) { Memory: 1024, }, allocatableResource: &Resource{}, - pods: []*api.Pod{testPods[1]}, + pods: []*v1.Pod{testPods[1]}, }, }} @@ -186,18 +186,18 @@ func TestAddPodWillConfirm(t *testing.T) { now := time.Now() ttl := 10 * time.Second - testPods := []*api.Pod{ - makeBasePod(nodeName, "test-1", "100m", "500", []api.ContainerPort{{HostPort: 80}}), - makeBasePod(nodeName, "test-2", "200m", "1Ki", []api.ContainerPort{{HostPort: 8080}}), + testPods := []*v1.Pod{ + makeBasePod(nodeName, "test-1", "100m", "500", []v1.ContainerPort{{HostPort: 80}}), + makeBasePod(nodeName, "test-2", "200m", "1Ki", []v1.ContainerPort{{HostPort: 8080}}), } tests := []struct { - podsToAssume []*api.Pod - podsToAdd []*api.Pod + podsToAssume []*v1.Pod + podsToAdd []*v1.Pod wNodeInfo *NodeInfo }{{ // two pod were assumed at same time. But first one is called Add() and gets confirmed. - podsToAssume: []*api.Pod{testPods[0], testPods[1]}, - podsToAdd: []*api.Pod{testPods[0]}, + podsToAssume: []*v1.Pod{testPods[0], testPods[1]}, + podsToAdd: []*v1.Pod{testPods[0]}, wNodeInfo: &NodeInfo{ requestedResource: &Resource{ MilliCPU: 100, @@ -208,7 +208,7 @@ func TestAddPodWillConfirm(t *testing.T) { Memory: 500, }, allocatableResource: &Resource{}, - pods: []*api.Pod{testPods[0]}, + pods: []*v1.Pod{testPods[0]}, }, }} @@ -235,9 +235,9 @@ func TestAddPodWillConfirm(t *testing.T) { func TestAddPodAfterExpiration(t *testing.T) { nodeName := "node" ttl := 10 * time.Second - basePod := makeBasePod(nodeName, "test", "100m", "500", []api.ContainerPort{{HostPort: 80}}) + basePod := makeBasePod(nodeName, "test", "100m", "500", []v1.ContainerPort{{HostPort: 80}}) tests := []struct { - pod *api.Pod + pod *v1.Pod wNodeInfo *NodeInfo }{{ @@ -252,7 +252,7 @@ func TestAddPodAfterExpiration(t *testing.T) { Memory: 500, }, allocatableResource: &Resource{}, - pods: []*api.Pod{basePod}, + pods: []*v1.Pod{basePod}, }, }} @@ -281,19 +281,19 @@ func TestAddPodAfterExpiration(t *testing.T) { func TestUpdatePod(t *testing.T) { nodeName := "node" ttl := 10 * time.Second - testPods := []*api.Pod{ - makeBasePod(nodeName, "test", "100m", "500", []api.ContainerPort{{HostPort: 80}}), - makeBasePod(nodeName, "test", "200m", "1Ki", []api.ContainerPort{{HostPort: 8080}}), + testPods := []*v1.Pod{ + makeBasePod(nodeName, "test", "100m", "500", []v1.ContainerPort{{HostPort: 80}}), + makeBasePod(nodeName, "test", "200m", "1Ki", []v1.ContainerPort{{HostPort: 8080}}), } tests := []struct { - podsToAssume []*api.Pod - podsToAdd []*api.Pod - podsToUpdate []*api.Pod + podsToAssume []*v1.Pod + podsToAdd []*v1.Pod + podsToUpdate []*v1.Pod wNodeInfo []*NodeInfo }{{ // add a pod and then update it twice - podsToAdd: []*api.Pod{testPods[0]}, - podsToUpdate: []*api.Pod{testPods[0], testPods[1], testPods[0]}, + podsToAdd: []*v1.Pod{testPods[0]}, + podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]}, wNodeInfo: []*NodeInfo{{ requestedResource: &Resource{ MilliCPU: 200, @@ -304,7 +304,7 @@ func TestUpdatePod(t *testing.T) { Memory: 1024, }, allocatableResource: &Resource{}, - pods: []*api.Pod{testPods[1]}, + pods: []*v1.Pod{testPods[1]}, }, { requestedResource: &Resource{ MilliCPU: 100, @@ -315,7 +315,7 @@ func TestUpdatePod(t *testing.T) { Memory: 500, }, allocatableResource: &Resource{}, - pods: []*api.Pod{testPods[0]}, + pods: []*v1.Pod{testPods[0]}, }}, }} @@ -345,20 +345,20 @@ func TestUpdatePod(t *testing.T) { func TestExpireAddUpdatePod(t *testing.T) { nodeName := "node" ttl := 10 * time.Second - testPods := []*api.Pod{ - makeBasePod(nodeName, "test", "100m", "500", []api.ContainerPort{{HostPort: 80}}), - makeBasePod(nodeName, "test", "200m", "1Ki", []api.ContainerPort{{HostPort: 8080}}), + testPods := []*v1.Pod{ + makeBasePod(nodeName, "test", "100m", "500", []v1.ContainerPort{{HostPort: 80}}), + makeBasePod(nodeName, "test", "200m", "1Ki", []v1.ContainerPort{{HostPort: 8080}}), } tests := []struct { - podsToAssume []*api.Pod - podsToAdd []*api.Pod - podsToUpdate []*api.Pod + podsToAssume []*v1.Pod + podsToAdd []*v1.Pod + podsToUpdate []*v1.Pod wNodeInfo []*NodeInfo }{{ // Pod is assumed, expired, and added. Then it would be updated twice. - podsToAssume: []*api.Pod{testPods[0]}, - podsToAdd: []*api.Pod{testPods[0]}, - podsToUpdate: []*api.Pod{testPods[0], testPods[1], testPods[0]}, + podsToAssume: []*v1.Pod{testPods[0]}, + podsToAdd: []*v1.Pod{testPods[0]}, + podsToUpdate: []*v1.Pod{testPods[0], testPods[1], testPods[0]}, wNodeInfo: []*NodeInfo{{ requestedResource: &Resource{ MilliCPU: 200, @@ -369,7 +369,7 @@ func TestExpireAddUpdatePod(t *testing.T) { Memory: 1024, }, allocatableResource: &Resource{}, - pods: []*api.Pod{testPods[1]}, + pods: []*v1.Pod{testPods[1]}, }, { requestedResource: &Resource{ MilliCPU: 100, @@ -380,7 +380,7 @@ func TestExpireAddUpdatePod(t *testing.T) { Memory: 500, }, allocatableResource: &Resource{}, - pods: []*api.Pod{testPods[0]}, + pods: []*v1.Pod{testPods[0]}, }}, }} @@ -417,9 +417,9 @@ func TestExpireAddUpdatePod(t *testing.T) { // TestRemovePod tests after added pod is removed, its information should also be subtracted. func TestRemovePod(t *testing.T) { nodeName := "node" - basePod := makeBasePod(nodeName, "test", "100m", "500", []api.ContainerPort{{HostPort: 80}}) + basePod := makeBasePod(nodeName, "test", "100m", "500", []v1.ContainerPort{{HostPort: 80}}) tests := []struct { - pod *api.Pod + pod *v1.Pod wNodeInfo *NodeInfo }{{ @@ -434,7 +434,7 @@ func TestRemovePod(t *testing.T) { Memory: 500, }, allocatableResource: &Resource{}, - pods: []*api.Pod{basePod}, + pods: []*v1.Pod{basePod}, }, }} @@ -459,11 +459,11 @@ func TestRemovePod(t *testing.T) { func TestForgetPod(t *testing.T) { nodeName := "node" - basePod := makeBasePod(nodeName, "test", "100m", "500", []api.ContainerPort{{HostPort: 80}}) + basePod := makeBasePod(nodeName, "test", "100m", "500", []v1.ContainerPort{{HostPort: 80}}) tests := []struct { - pods []*api.Pod + pods []*v1.Pod }{{ - pods: []*api.Pod{basePod}, + pods: []*v1.Pod{basePod}, }} now := time.Now() ttl := 10 * time.Second @@ -517,22 +517,22 @@ func benchmarkExpire(b *testing.B, podNum int) { } } -func makeBasePod(nodeName, objName, cpu, mem string, ports []api.ContainerPort) *api.Pod { - req := api.ResourceList{} +func makeBasePod(nodeName, objName, cpu, mem string, ports []v1.ContainerPort) *v1.Pod { + req := v1.ResourceList{} if cpu != "" { - req = api.ResourceList{ - api.ResourceCPU: resource.MustParse(cpu), - api.ResourceMemory: resource.MustParse(mem), + req = v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(cpu), + v1.ResourceMemory: resource.MustParse(mem), } } - return &api.Pod{ - ObjectMeta: api.ObjectMeta{ + return &v1.Pod{ + ObjectMeta: v1.ObjectMeta{ Namespace: "node_info_cache_test", Name: objName, }, - Spec: api.PodSpec{ - Containers: []api.Container{{ - Resources: api.ResourceRequirements{ + Spec: v1.PodSpec{ + Containers: []v1.Container{{ + Resources: v1.ResourceRequirements{ Requests: req, }, Ports: ports, diff --git a/plugin/pkg/scheduler/schedulercache/interface.go b/plugin/pkg/scheduler/schedulercache/interface.go index 2f0a84b2a2e..1ca64fa800b 100644 --- a/plugin/pkg/scheduler/schedulercache/interface.go +++ b/plugin/pkg/scheduler/schedulercache/interface.go @@ -17,7 +17,7 @@ limitations under the License. package schedulercache import ( - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/labels" ) @@ -59,29 +59,29 @@ type Cache interface { // AssumePod assumes a pod scheduled and aggregates the pod's information into its node. // The implementation also decides the policy to expire pod before being confirmed (receiving Add event). // After expiration, its information would be subtracted. - AssumePod(pod *api.Pod) error + AssumePod(pod *v1.Pod) error // ForgetPod removes an assumed pod from cache. - ForgetPod(pod *api.Pod) error + ForgetPod(pod *v1.Pod) error // AddPod either confirms a pod if it's assumed, or adds it back if it's expired. // If added back, the pod's information would be added again. - AddPod(pod *api.Pod) error + AddPod(pod *v1.Pod) error // UpdatePod removes oldPod's information and adds newPod's information. - UpdatePod(oldPod, newPod *api.Pod) error + UpdatePod(oldPod, newPod *v1.Pod) error // RemovePod removes a pod. The pod's information would be subtracted from assigned node. - RemovePod(pod *api.Pod) error + RemovePod(pod *v1.Pod) error // AddNode adds overall information about node. - AddNode(node *api.Node) error + AddNode(node *v1.Node) error // UpdateNode updates overall information about node. - UpdateNode(oldNode, newNode *api.Node) error + UpdateNode(oldNode, newNode *v1.Node) error // RemoveNode removes overall information about node. - RemoveNode(node *api.Node) error + RemoveNode(node *v1.Node) error // UpdateNodeNameToInfoMap updates the passed infoMap to the current contents of Cache. // The node info contains aggregated information of pods scheduled (including assumed to be) @@ -89,5 +89,5 @@ type Cache interface { UpdateNodeNameToInfoMap(infoMap map[string]*NodeInfo) error // List lists all cached pods (including assumed ones). - List(labels.Selector) ([]*api.Pod, error) + List(labels.Selector) ([]*v1.Pod, error) } diff --git a/plugin/pkg/scheduler/schedulercache/node_info.go b/plugin/pkg/scheduler/schedulercache/node_info.go index 8b98ca8cacf..4a173b6b2ee 100644 --- a/plugin/pkg/scheduler/schedulercache/node_info.go +++ b/plugin/pkg/scheduler/schedulercache/node_info.go @@ -21,8 +21,8 @@ import ( "github.com/golang/glog" - "k8s.io/kubernetes/pkg/api" "k8s.io/kubernetes/pkg/api/resource" + "k8s.io/kubernetes/pkg/api/v1" clientcache "k8s.io/kubernetes/pkg/client/cache" priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util" ) @@ -32,10 +32,10 @@ var emptyResource = Resource{} // NodeInfo is node level aggregated information. type NodeInfo struct { // Overall node information. - node *api.Node + node *v1.Node - pods []*api.Pod - podsWithAffinity []*api.Pod + pods []*v1.Pod + podsWithAffinity []*v1.Pod // Total requested resource of all pods on this node. // It includes assumed pods which scheduler sends binding to apiserver but @@ -59,14 +59,14 @@ type Resource struct { MilliCPU int64 Memory int64 NvidiaGPU int64 - OpaqueIntResources map[api.ResourceName]int64 + OpaqueIntResources map[v1.ResourceName]int64 } -func (r *Resource) ResourceList() api.ResourceList { - result := api.ResourceList{ - api.ResourceCPU: *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI), - api.ResourceMemory: *resource.NewQuantity(r.Memory, resource.BinarySI), - api.ResourceNvidiaGPU: *resource.NewQuantity(r.NvidiaGPU, resource.DecimalSI), +func (r *Resource) ResourceList() v1.ResourceList { + result := v1.ResourceList{ + v1.ResourceCPU: *resource.NewMilliQuantity(r.MilliCPU, resource.DecimalSI), + v1.ResourceMemory: *resource.NewQuantity(r.Memory, resource.BinarySI), + v1.ResourceNvidiaGPU: *resource.NewQuantity(r.NvidiaGPU, resource.DecimalSI), } for rName, rQuant := range r.OpaqueIntResources { result[rName] = *resource.NewQuantity(rQuant, resource.DecimalSI) @@ -77,7 +77,7 @@ func (r *Resource) ResourceList() api.ResourceList { // NewNodeInfo returns a ready to use empty NodeInfo object. // If any pods are given in arguments, their information will be aggregated in // the returned object. -func NewNodeInfo(pods ...*api.Pod) *NodeInfo { +func NewNodeInfo(pods ...*v1.Pod) *NodeInfo { ni := &NodeInfo{ requestedResource: &Resource{}, nonzeroRequest: &Resource{}, @@ -92,7 +92,7 @@ func NewNodeInfo(pods ...*api.Pod) *NodeInfo { } // Returns overall information about this node. -func (n *NodeInfo) Node() *api.Node { +func (n *NodeInfo) Node() *v1.Node { if n == nil { return nil } @@ -100,7 +100,7 @@ func (n *NodeInfo) Node() *api.Node { } // Pods return all pods scheduled (including assumed to be) on this node. -func (n *NodeInfo) Pods() []*api.Pod { +func (n *NodeInfo) Pods() []*v1.Pod { if n == nil { return nil } @@ -108,7 +108,7 @@ func (n *NodeInfo) Pods() []*api.Pod { } // PodsWithAffinity return all pods with (anti)affinity constraints on this node. -func (n *NodeInfo) PodsWithAffinity() []*api.Pod { +func (n *NodeInfo) PodsWithAffinity() []*v1.Pod { if n == nil { return nil } @@ -156,10 +156,10 @@ func (n *NodeInfo) Clone() *NodeInfo { generation: n.generation, } if len(n.pods) > 0 { - clone.pods = append([]*api.Pod(nil), n.pods...) + clone.pods = append([]*v1.Pod(nil), n.pods...) } if len(n.podsWithAffinity) > 0 { - clone.podsWithAffinity = append([]*api.Pod(nil), n.podsWithAffinity...) + clone.podsWithAffinity = append([]*v1.Pod(nil), n.podsWithAffinity...) } return clone } @@ -173,8 +173,8 @@ func (n *NodeInfo) String() string { return fmt.Sprintf("&NodeInfo{Pods:%v, RequestedResource:%#v, NonZeroRequest: %#v}", podKeys, n.requestedResource, n.nonzeroRequest) } -func hasPodAffinityConstraints(pod *api.Pod) bool { - affinity, err := api.GetAffinityFromPodAnnotations(pod.Annotations) +func hasPodAffinityConstraints(pod *v1.Pod) bool { + affinity, err := v1.GetAffinityFromPodAnnotations(pod.Annotations) if err != nil || affinity == nil { return false } @@ -182,14 +182,14 @@ func hasPodAffinityConstraints(pod *api.Pod) bool { } // addPod adds pod information to this NodeInfo. -func (n *NodeInfo) addPod(pod *api.Pod) { +func (n *NodeInfo) addPod(pod *v1.Pod) { // cpu, mem, nvidia_gpu, non0_cpu, non0_mem := calculateResource(pod) res, non0_cpu, non0_mem := calculateResource(pod) n.requestedResource.MilliCPU += res.MilliCPU n.requestedResource.Memory += res.Memory n.requestedResource.NvidiaGPU += res.NvidiaGPU if n.requestedResource.OpaqueIntResources == nil && len(res.OpaqueIntResources) > 0 { - n.requestedResource.OpaqueIntResources = map[api.ResourceName]int64{} + n.requestedResource.OpaqueIntResources = map[v1.ResourceName]int64{} } for rName, rQuant := range res.OpaqueIntResources { n.requestedResource.OpaqueIntResources[rName] += rQuant @@ -204,7 +204,7 @@ func (n *NodeInfo) addPod(pod *api.Pod) { } // removePod subtracts pod information to this NodeInfo. -func (n *NodeInfo) removePod(pod *api.Pod) error { +func (n *NodeInfo) removePod(pod *v1.Pod) error { k1, err := getPodKey(pod) if err != nil { return err @@ -240,7 +240,7 @@ func (n *NodeInfo) removePod(pod *api.Pod) error { n.requestedResource.Memory -= res.Memory n.requestedResource.NvidiaGPU -= res.NvidiaGPU if len(res.OpaqueIntResources) > 0 && n.requestedResource.OpaqueIntResources == nil { - n.requestedResource.OpaqueIntResources = map[api.ResourceName]int64{} + n.requestedResource.OpaqueIntResources = map[v1.ResourceName]int64{} } for rName, rQuant := range res.OpaqueIntResources { n.requestedResource.OpaqueIntResources[rName] -= rQuant @@ -254,21 +254,21 @@ func (n *NodeInfo) removePod(pod *api.Pod) error { return fmt.Errorf("no corresponding pod %s in pods of node %s", pod.Name, n.node.Name) } -func calculateResource(pod *api.Pod) (res Resource, non0_cpu int64, non0_mem int64) { +func calculateResource(pod *v1.Pod) (res Resource, non0_cpu int64, non0_mem int64) { for _, c := range pod.Spec.Containers { for rName, rQuant := range c.Resources.Requests { switch rName { - case api.ResourceCPU: + case v1.ResourceCPU: res.MilliCPU += rQuant.MilliValue() - case api.ResourceMemory: + case v1.ResourceMemory: res.Memory += rQuant.Value() - case api.ResourceNvidiaGPU: + case v1.ResourceNvidiaGPU: res.NvidiaGPU += rQuant.Value() default: - if api.IsOpaqueIntResourceName(rName) { + if v1.IsOpaqueIntResourceName(rName) { // Lazily allocate opaque resource map. if res.OpaqueIntResources == nil { - res.OpaqueIntResources = map[api.ResourceName]int64{} + res.OpaqueIntResources = map[v1.ResourceName]int64{} } res.OpaqueIntResources[rName] += rQuant.Value() } @@ -284,23 +284,23 @@ func calculateResource(pod *api.Pod) (res Resource, non0_cpu int64, non0_mem int } // Sets the overall node information. -func (n *NodeInfo) SetNode(node *api.Node) error { +func (n *NodeInfo) SetNode(node *v1.Node) error { n.node = node for rName, rQuant := range node.Status.Allocatable { switch rName { - case api.ResourceCPU: + case v1.ResourceCPU: n.allocatableResource.MilliCPU = rQuant.MilliValue() - case api.ResourceMemory: + case v1.ResourceMemory: n.allocatableResource.Memory = rQuant.Value() - case api.ResourceNvidiaGPU: + case v1.ResourceNvidiaGPU: n.allocatableResource.NvidiaGPU = rQuant.Value() - case api.ResourcePods: + case v1.ResourcePods: n.allowedPodNumber = int(rQuant.Value()) default: - if api.IsOpaqueIntResourceName(rName) { + if v1.IsOpaqueIntResourceName(rName) { // Lazily allocate opaque resource map. if n.allocatableResource.OpaqueIntResources == nil { - n.allocatableResource.OpaqueIntResources = map[api.ResourceName]int64{} + n.allocatableResource.OpaqueIntResources = map[v1.ResourceName]int64{} } n.allocatableResource.OpaqueIntResources[rName] = rQuant.Value() } @@ -311,7 +311,7 @@ func (n *NodeInfo) SetNode(node *api.Node) error { } // Removes the overall information about the node. -func (n *NodeInfo) RemoveNode(node *api.Node) error { +func (n *NodeInfo) RemoveNode(node *v1.Node) error { // We don't remove NodeInfo for because there can still be some pods on this node - // this is because notifications about pods are delivered in a different watch, // and thus can potentially be observed later, even though they happened before @@ -324,6 +324,6 @@ func (n *NodeInfo) RemoveNode(node *api.Node) error { } // getPodKey returns the string key of a pod. -func getPodKey(pod *api.Pod) (string, error) { +func getPodKey(pod *v1.Pod) (string, error) { return clientcache.MetaNamespaceKeyFunc(pod) } diff --git a/plugin/pkg/scheduler/schedulercache/util.go b/plugin/pkg/scheduler/schedulercache/util.go index 12e6848bc57..d5d52f49119 100644 --- a/plugin/pkg/scheduler/schedulercache/util.go +++ b/plugin/pkg/scheduler/schedulercache/util.go @@ -16,11 +16,11 @@ limitations under the License. package schedulercache -import "k8s.io/kubernetes/pkg/api" +import "k8s.io/kubernetes/pkg/api/v1" // CreateNodeNameToInfoMap obtains a list of pods and pivots that list into a map where the keys are node names // and the values are the aggregated information for that node. -func CreateNodeNameToInfoMap(pods []*api.Pod, nodes []*api.Node) map[string]*NodeInfo { +func CreateNodeNameToInfoMap(pods []*v1.Pod, nodes []*v1.Node) map[string]*NodeInfo { nodeNameToInfo := make(map[string]*NodeInfo) for _, pod := range pods { nodeName := pod.Spec.NodeName diff --git a/plugin/pkg/scheduler/testing/fake_cache.go b/plugin/pkg/scheduler/testing/fake_cache.go index b4106c38fef..fb0d0c6d53b 100644 --- a/plugin/pkg/scheduler/testing/fake_cache.go +++ b/plugin/pkg/scheduler/testing/fake_cache.go @@ -17,37 +17,37 @@ limitations under the License. package schedulercache import ( - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) // FakeCache is used for testing type FakeCache struct { - AssumeFunc func(*api.Pod) + AssumeFunc func(*v1.Pod) } -func (f *FakeCache) AssumePod(pod *api.Pod) error { +func (f *FakeCache) AssumePod(pod *v1.Pod) error { f.AssumeFunc(pod) return nil } -func (f *FakeCache) ForgetPod(pod *api.Pod) error { return nil } +func (f *FakeCache) ForgetPod(pod *v1.Pod) error { return nil } -func (f *FakeCache) AddPod(pod *api.Pod) error { return nil } +func (f *FakeCache) AddPod(pod *v1.Pod) error { return nil } -func (f *FakeCache) UpdatePod(oldPod, newPod *api.Pod) error { return nil } +func (f *FakeCache) UpdatePod(oldPod, newPod *v1.Pod) error { return nil } -func (f *FakeCache) RemovePod(pod *api.Pod) error { return nil } +func (f *FakeCache) RemovePod(pod *v1.Pod) error { return nil } -func (f *FakeCache) AddNode(node *api.Node) error { return nil } +func (f *FakeCache) AddNode(node *v1.Node) error { return nil } -func (f *FakeCache) UpdateNode(oldNode, newNode *api.Node) error { return nil } +func (f *FakeCache) UpdateNode(oldNode, newNode *v1.Node) error { return nil } -func (f *FakeCache) RemoveNode(node *api.Node) error { return nil } +func (f *FakeCache) RemoveNode(node *v1.Node) error { return nil } func (f *FakeCache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error { return nil } -func (f *FakeCache) List(s labels.Selector) ([]*api.Pod, error) { return nil, nil } +func (f *FakeCache) List(s labels.Selector) ([]*v1.Pod, error) { return nil, nil } diff --git a/plugin/pkg/scheduler/testing/pods_to_cache.go b/plugin/pkg/scheduler/testing/pods_to_cache.go index 586760bf916..c5a96fde11e 100644 --- a/plugin/pkg/scheduler/testing/pods_to_cache.go +++ b/plugin/pkg/scheduler/testing/pods_to_cache.go @@ -17,35 +17,35 @@ limitations under the License. package schedulercache import ( - "k8s.io/kubernetes/pkg/api" + "k8s.io/kubernetes/pkg/api/v1" "k8s.io/kubernetes/pkg/labels" "k8s.io/kubernetes/plugin/pkg/scheduler/schedulercache" ) // PodsToCache is used for testing -type PodsToCache []*api.Pod +type PodsToCache []*v1.Pod -func (p PodsToCache) AssumePod(pod *api.Pod) error { return nil } +func (p PodsToCache) AssumePod(pod *v1.Pod) error { return nil } -func (p PodsToCache) ForgetPod(pod *api.Pod) error { return nil } +func (p PodsToCache) ForgetPod(pod *v1.Pod) error { return nil } -func (p PodsToCache) AddPod(pod *api.Pod) error { return nil } +func (p PodsToCache) AddPod(pod *v1.Pod) error { return nil } -func (p PodsToCache) UpdatePod(oldPod, newPod *api.Pod) error { return nil } +func (p PodsToCache) UpdatePod(oldPod, newPod *v1.Pod) error { return nil } -func (p PodsToCache) RemovePod(pod *api.Pod) error { return nil } +func (p PodsToCache) RemovePod(pod *v1.Pod) error { return nil } -func (p PodsToCache) AddNode(node *api.Node) error { return nil } +func (p PodsToCache) AddNode(node *v1.Node) error { return nil } -func (p PodsToCache) UpdateNode(oldNode, newNode *api.Node) error { return nil } +func (p PodsToCache) UpdateNode(oldNode, newNode *v1.Node) error { return nil } -func (p PodsToCache) RemoveNode(node *api.Node) error { return nil } +func (p PodsToCache) RemoveNode(node *v1.Node) error { return nil } func (p PodsToCache) UpdateNodeNameToInfoMap(infoMap map[string]*schedulercache.NodeInfo) error { return nil } -func (p PodsToCache) List(s labels.Selector) (selected []*api.Pod, err error) { +func (p PodsToCache) List(s labels.Selector) (selected []*v1.Pod, err error) { for _, pod := range p { if s.Matches(labels.Set(pod.Labels)) { selected = append(selected, pod)