separate listers into their own package
This commit is contained in:
@@ -261,6 +261,7 @@
|
|||||||
"k8s.io/kubernetes/pkg/master/ports",
|
"k8s.io/kubernetes/pkg/master/ports",
|
||||||
"k8s.io/kubernetes/pkg/scheduler/api",
|
"k8s.io/kubernetes/pkg/scheduler/api",
|
||||||
"k8s.io/kubernetes/pkg/scheduler/util",
|
"k8s.io/kubernetes/pkg/scheduler/util",
|
||||||
|
"k8s.io/kubernetes/pkg/scheduler/listers",
|
||||||
"k8s.io/kubernetes/pkg/security/apparmor",
|
"k8s.io/kubernetes/pkg/security/apparmor",
|
||||||
"k8s.io/kubernetes/pkg/util/parsers",
|
"k8s.io/kubernetes/pkg/util/parsers",
|
||||||
"k8s.io/kubernetes/pkg/fieldpath",
|
"k8s.io/kubernetes/pkg/fieldpath",
|
||||||
|
@@ -26,6 +26,7 @@ go_library(
|
|||||||
"//pkg/scheduler/internal/cache:go_default_library",
|
"//pkg/scheduler/internal/cache:go_default_library",
|
||||||
"//pkg/scheduler/internal/cache/debugger:go_default_library",
|
"//pkg/scheduler/internal/cache/debugger:go_default_library",
|
||||||
"//pkg/scheduler/internal/queue:go_default_library",
|
"//pkg/scheduler/internal/queue:go_default_library",
|
||||||
|
"//pkg/scheduler/listers:go_default_library",
|
||||||
"//pkg/scheduler/metrics:go_default_library",
|
"//pkg/scheduler/metrics:go_default_library",
|
||||||
"//pkg/scheduler/volumebinder:go_default_library",
|
"//pkg/scheduler/volumebinder:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
@@ -129,6 +130,7 @@ filegroup(
|
|||||||
"//pkg/scheduler/internal/cache:all-srcs",
|
"//pkg/scheduler/internal/cache:all-srcs",
|
||||||
"//pkg/scheduler/internal/heap:all-srcs",
|
"//pkg/scheduler/internal/heap:all-srcs",
|
||||||
"//pkg/scheduler/internal/queue:all-srcs",
|
"//pkg/scheduler/internal/queue:all-srcs",
|
||||||
|
"//pkg/scheduler/listers:all-srcs",
|
||||||
"//pkg/scheduler/metrics:all-srcs",
|
"//pkg/scheduler/metrics:all-srcs",
|
||||||
"//pkg/scheduler/nodeinfo:all-srcs",
|
"//pkg/scheduler/nodeinfo:all-srcs",
|
||||||
"//pkg/scheduler/testing:all-srcs",
|
"//pkg/scheduler/testing:all-srcs",
|
||||||
|
@@ -13,7 +13,6 @@ go_library(
|
|||||||
"error.go",
|
"error.go",
|
||||||
"metadata.go",
|
"metadata.go",
|
||||||
"predicates.go",
|
"predicates.go",
|
||||||
"testing_helper.go",
|
|
||||||
"utils.go",
|
"utils.go",
|
||||||
],
|
],
|
||||||
importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates",
|
importpath = "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates",
|
||||||
@@ -23,6 +22,7 @@ go_library(
|
|||||||
"//pkg/scheduler/algorithm:go_default_library",
|
"//pkg/scheduler/algorithm:go_default_library",
|
||||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||||
"//pkg/scheduler/api:go_default_library",
|
"//pkg/scheduler/api:go_default_library",
|
||||||
|
"//pkg/scheduler/listers:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
"//pkg/scheduler/util:go_default_library",
|
"//pkg/scheduler/util:go_default_library",
|
||||||
"//pkg/scheduler/volumebinder:go_default_library",
|
"//pkg/scheduler/volumebinder:go_default_library",
|
||||||
@@ -61,6 +61,7 @@ go_test(
|
|||||||
"//pkg/apis/core/v1/helper:go_default_library",
|
"//pkg/apis/core/v1/helper:go_default_library",
|
||||||
"//pkg/features:go_default_library",
|
"//pkg/features:go_default_library",
|
||||||
"//pkg/scheduler/api:go_default_library",
|
"//pkg/scheduler/api:go_default_library",
|
||||||
|
"//pkg/scheduler/listers/fake:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
"//pkg/scheduler/testing:go_default_library",
|
"//pkg/scheduler/testing:go_default_library",
|
||||||
"//pkg/volume/util:go_default_library",
|
"//pkg/volume/util:go_default_library",
|
||||||
|
@@ -30,6 +30,7 @@ import (
|
|||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
|
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@@ -471,8 +472,8 @@ func TestCSIVolumeCountPredicate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFakeCSIPVInfo(volumeName string, driverNames ...string) FakePersistentVolumeInfo {
|
func getFakeCSIPVInfo(volumeName string, driverNames ...string) fakelisters.PersistentVolumeInfo {
|
||||||
pvInfos := FakePersistentVolumeInfo{}
|
pvInfos := fakelisters.PersistentVolumeInfo{}
|
||||||
for _, driver := range driverNames {
|
for _, driver := range driverNames {
|
||||||
for j := 0; j < 4; j++ {
|
for j := 0; j < 4; j++ {
|
||||||
volumeHandle := fmt.Sprintf("%s-%s-%d", volumeName, driver, j)
|
volumeHandle := fmt.Sprintf("%s-%s-%d", volumeName, driver, j)
|
||||||
@@ -516,8 +517,8 @@ func getFakeCSIPVInfo(volumeName string, driverNames ...string) FakePersistentVo
|
|||||||
return pvInfos
|
return pvInfos
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFakeCSIPVCInfo(volumeName, scName string, driverNames ...string) FakePersistentVolumeClaimInfo {
|
func getFakeCSIPVCInfo(volumeName, scName string, driverNames ...string) fakelisters.PersistentVolumeClaimInfo {
|
||||||
pvcInfos := FakePersistentVolumeClaimInfo{}
|
pvcInfos := fakelisters.PersistentVolumeClaimInfo{}
|
||||||
for _, driver := range driverNames {
|
for _, driver := range driverNames {
|
||||||
for j := 0; j < 4; j++ {
|
for j := 0; j < 4; j++ {
|
||||||
v := fmt.Sprintf("%s-%s-%d", volumeName, driver, j)
|
v := fmt.Sprintf("%s-%s-%d", volumeName, driver, j)
|
||||||
@@ -559,8 +560,8 @@ func enableMigrationOnNode(csiNode *storagev1beta1.CSINode, pluginName string) {
|
|||||||
csiNode.Annotations = nodeInfoAnnotations
|
csiNode.Annotations = nodeInfoAnnotations
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFakeCSIStorageClassInfo(scName, provisionerName string) FakeStorageClassInfo {
|
func getFakeCSIStorageClassInfo(scName, provisionerName string) fakelisters.StorageClassInfo {
|
||||||
return FakeStorageClassInfo{
|
return fakelisters.StorageClassInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: scName},
|
ObjectMeta: metav1.ObjectMeta{Name: scName},
|
||||||
Provisioner: provisionerName,
|
Provisioner: provisionerName,
|
||||||
@@ -568,9 +569,9 @@ func getFakeCSIStorageClassInfo(scName, provisionerName string) FakeStorageClass
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFakeCSINodeInfo(csiNode *storagev1beta1.CSINode) FakeCSINodeInfo {
|
func getFakeCSINodeInfo(csiNode *storagev1beta1.CSINode) fakelisters.CSINodeInfo {
|
||||||
if csiNode != nil {
|
if csiNode != nil {
|
||||||
return FakeCSINodeInfo(*csiNode)
|
return fakelisters.CSINodeInfo(*csiNode)
|
||||||
}
|
}
|
||||||
return FakeCSINodeInfo{}
|
return fakelisters.CSINodeInfo{}
|
||||||
}
|
}
|
||||||
|
@@ -31,11 +31,27 @@ import (
|
|||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
|
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
utilpointer "k8s.io/utils/pointer"
|
utilpointer "k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func getVolumeLimitKey(filterType string) v1.ResourceName {
|
||||||
|
switch filterType {
|
||||||
|
case EBSVolumeFilterType:
|
||||||
|
return v1.ResourceName(volumeutil.EBSVolumeLimitKey)
|
||||||
|
case GCEPDVolumeFilterType:
|
||||||
|
return v1.ResourceName(volumeutil.GCEVolumeLimitKey)
|
||||||
|
case AzureDiskVolumeFilterType:
|
||||||
|
return v1.ResourceName(volumeutil.AzureVolumeLimitKey)
|
||||||
|
case CinderVolumeFilterType:
|
||||||
|
return v1.ResourceName(volumeutil.CinderVolumeLimitKey)
|
||||||
|
default:
|
||||||
|
return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func onePVCPod(filterName string) *v1.Pod {
|
func onePVCPod(filterName string) *v1.Pod {
|
||||||
return &v1.Pod{
|
return &v1.Pod{
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
@@ -877,7 +893,7 @@ func TestVolumeCountConflicts(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFakeStorageClassInfo(sc string) FakeStorageClassInfo {
|
func getFakeStorageClassInfo(sc string) fakelisters.StorageClassInfo {
|
||||||
var provisioner string
|
var provisioner string
|
||||||
switch sc {
|
switch sc {
|
||||||
case EBSVolumeFilterType:
|
case EBSVolumeFilterType:
|
||||||
@@ -889,9 +905,9 @@ func getFakeStorageClassInfo(sc string) FakeStorageClassInfo {
|
|||||||
case CinderVolumeFilterType:
|
case CinderVolumeFilterType:
|
||||||
provisioner = csilibplugins.CinderInTreePluginName
|
provisioner = csilibplugins.CinderInTreePluginName
|
||||||
default:
|
default:
|
||||||
return FakeStorageClassInfo{}
|
return fakelisters.StorageClassInfo{}
|
||||||
}
|
}
|
||||||
return FakeStorageClassInfo{
|
return fakelisters.StorageClassInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: sc},
|
ObjectMeta: metav1.ObjectMeta{Name: sc},
|
||||||
Provisioner: provisioner,
|
Provisioner: provisioner,
|
||||||
@@ -903,8 +919,8 @@ func getFakeStorageClassInfo(sc string) FakeStorageClassInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFakePVInfo(filterName string) FakePersistentVolumeInfo {
|
func getFakePVInfo(filterName string) fakelisters.PersistentVolumeInfo {
|
||||||
return FakePersistentVolumeInfo{
|
return fakelisters.PersistentVolumeInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
||||||
Spec: v1.PersistentVolumeSpec{
|
Spec: v1.PersistentVolumeSpec{
|
||||||
@@ -922,8 +938,8 @@ func getFakePVInfo(filterName string) FakePersistentVolumeInfo {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFakePVCInfo(filterName string) FakePersistentVolumeClaimInfo {
|
func getFakePVCInfo(filterName string) fakelisters.PersistentVolumeClaimInfo {
|
||||||
return FakePersistentVolumeClaimInfo{
|
return fakelisters.PersistentVolumeClaimInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
||||||
Spec: v1.PersistentVolumeClaimSpec{
|
Spec: v1.PersistentVolumeClaimSpec{
|
||||||
@@ -1063,7 +1079,7 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
|
|||||||
|
|
||||||
addLimitToNode := func() {
|
addLimitToNode := func() {
|
||||||
for _, driver := range driverNames {
|
for _, driver := range driverNames {
|
||||||
node.Status.Allocatable[GetVolumeLimitKey(driver)] = *resource.NewQuantity(limit, resource.DecimalSI)
|
node.Status.Allocatable[getVolumeLimitKey(driver)] = *resource.NewQuantity(limit, resource.DecimalSI)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -25,6 +25,7 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||||
)
|
)
|
||||||
@@ -350,16 +351,11 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
|
|||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
allPodLister := st.FakePodLister(append(test.existingPods, test.addedPod))
|
allPodLister := fakelisters.PodLister(append(test.existingPods, test.addedPod))
|
||||||
// getMeta creates predicate meta data given the list of pods.
|
// getMeta creates predicate meta data given the list of pods.
|
||||||
getMeta := func(lister st.FakePodLister) (*predicateMetadata, map[string]*schedulernodeinfo.NodeInfo) {
|
getMeta := func(lister fakelisters.PodLister) (*predicateMetadata, map[string]*schedulernodeinfo.NodeInfo) {
|
||||||
nodeInfoMap := schedulernodeinfo.CreateNodeNameToInfoMap(lister, test.nodes)
|
nodeInfoMap := schedulernodeinfo.CreateNodeNameToInfoMap(lister, test.nodes)
|
||||||
// nodeList is a list of non-pointer nodes to feed to FakeNodeListInfo.
|
_, precompute := NewServiceAffinityPredicate(fakelisters.NodeLister(test.nodes), lister, fakelisters.ServiceLister(test.services), nil)
|
||||||
nodeList := []v1.Node{}
|
|
||||||
for _, n := range test.nodes {
|
|
||||||
nodeList = append(nodeList, *n)
|
|
||||||
}
|
|
||||||
_, precompute := NewServiceAffinityPredicate(lister, st.FakeServiceLister(test.services), FakeNodeListInfo(nodeList), nil)
|
|
||||||
RegisterPredicateMetadataProducer("ServiceAffinityMetaProducer", precompute)
|
RegisterPredicateMetadataProducer("ServiceAffinityMetaProducer", precompute)
|
||||||
meta := GetPredicateMetadata(test.pendingPod, nodeInfoMap)
|
meta := GetPredicateMetadata(test.pendingPod, nodeInfoMap)
|
||||||
return meta.(*predicateMetadata), nodeInfoMap
|
return meta.(*predicateMetadata), nodeInfoMap
|
||||||
@@ -369,7 +365,7 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
|
|||||||
// are given to the metadata producer.
|
// are given to the metadata producer.
|
||||||
allPodsMeta, _ := getMeta(allPodLister)
|
allPodsMeta, _ := getMeta(allPodLister)
|
||||||
// existingPodsMeta1 is meta data produced for test.existingPods (without test.addedPod).
|
// existingPodsMeta1 is meta data produced for test.existingPods (without test.addedPod).
|
||||||
existingPodsMeta1, nodeInfoMap := getMeta(st.FakePodLister(test.existingPods))
|
existingPodsMeta1, nodeInfoMap := getMeta(fakelisters.PodLister(test.existingPods))
|
||||||
// Add test.addedPod to existingPodsMeta1 and make sure meta is equal to allPodsMeta
|
// Add test.addedPod to existingPodsMeta1 and make sure meta is equal to allPodsMeta
|
||||||
nodeInfo := nodeInfoMap[test.addedPod.Spec.NodeName]
|
nodeInfo := nodeInfoMap[test.addedPod.Spec.NodeName]
|
||||||
if err := existingPodsMeta1.AddPod(test.addedPod, nodeInfo.Node()); err != nil {
|
if err := existingPodsMeta1.AddPod(test.addedPod, nodeInfo.Node()); err != nil {
|
||||||
@@ -380,7 +376,7 @@ func TestPredicateMetadata_AddRemovePod(t *testing.T) {
|
|||||||
}
|
}
|
||||||
// Remove the added pod and from existingPodsMeta1 an make sure it is equal
|
// Remove the added pod and from existingPodsMeta1 an make sure it is equal
|
||||||
// to meta generated for existing pods.
|
// to meta generated for existing pods.
|
||||||
existingPodsMeta2, _ := getMeta(st.FakePodLister(test.existingPods))
|
existingPodsMeta2, _ := getMeta(fakelisters.PodLister(test.existingPods))
|
||||||
if err := existingPodsMeta1.RemovePod(test.addedPod, nil); err != nil {
|
if err := existingPodsMeta1.RemovePod(test.addedPod, nil); err != nil {
|
||||||
t.Errorf("error removing pod from meta: %v", err)
|
t.Errorf("error removing pod from meta: %v", err)
|
||||||
}
|
}
|
||||||
|
@@ -44,6 +44,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
||||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||||
|
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
||||||
@@ -149,15 +150,16 @@ func Ordering() []string {
|
|||||||
return predicatesOrdering
|
return predicatesOrdering
|
||||||
}
|
}
|
||||||
|
|
||||||
// FitPredicate is a function that indicates if a pod fits into an existing node.
|
|
||||||
// The failure information is given by the error.
|
|
||||||
type FitPredicate func(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error)
|
|
||||||
|
|
||||||
// NodeInfo interface represents anything that can get node object from node name.
|
// NodeInfo interface represents anything that can get node object from node name.
|
||||||
|
// TODO(ahg-g): should be deleted, still exist because kubelet depends on it.
|
||||||
type NodeInfo interface {
|
type NodeInfo interface {
|
||||||
GetNodeInfo(nodeName string) (*v1.Node, error)
|
GetNodeInfo(nodeName string) (*v1.Node, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// FitPredicate is a function that indicates if a pod fits into an existing node.
|
||||||
|
// The failure information is given by the error.
|
||||||
|
type FitPredicate func(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error)
|
||||||
|
|
||||||
// CSINodeInfo interface represents anything that can get CSINode object from node name.
|
// CSINodeInfo interface represents anything that can get CSINode object from node name.
|
||||||
type CSINodeInfo interface {
|
type CSINodeInfo interface {
|
||||||
GetCSINodeInfo(nodeName string) (*v1beta1storage.CSINode, error)
|
GetCSINodeInfo(nodeName string) (*v1beta1storage.CSINode, error)
|
||||||
@@ -1052,9 +1054,9 @@ func (n *NodeLabelChecker) CheckNodeLabelPresence(pod *v1.Pod, meta PredicateMet
|
|||||||
|
|
||||||
// ServiceAffinity defines a struct used for creating service affinity predicates.
|
// ServiceAffinity defines a struct used for creating service affinity predicates.
|
||||||
type ServiceAffinity struct {
|
type ServiceAffinity struct {
|
||||||
podLister algorithm.PodLister
|
nodeLister schedulerlisters.NodeLister
|
||||||
|
podLister schedulerlisters.PodLister
|
||||||
serviceLister corelisters.ServiceLister
|
serviceLister corelisters.ServiceLister
|
||||||
nodeInfo NodeInfo
|
|
||||||
labels []string
|
labels []string
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1085,11 +1087,11 @@ func (s *ServiceAffinity) serviceAffinityMetadataProducer(pm *predicateMetadata)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewServiceAffinityPredicate creates a ServiceAffinity.
|
// NewServiceAffinityPredicate creates a ServiceAffinity.
|
||||||
func NewServiceAffinityPredicate(podLister algorithm.PodLister, serviceLister corelisters.ServiceLister, nodeInfo NodeInfo, labels []string) (FitPredicate, predicateMetadataProducer) {
|
func NewServiceAffinityPredicate(nodeLister schedulerlisters.NodeLister, podLister schedulerlisters.PodLister, serviceLister corelisters.ServiceLister, labels []string) (FitPredicate, predicateMetadataProducer) {
|
||||||
affinity := &ServiceAffinity{
|
affinity := &ServiceAffinity{
|
||||||
|
nodeLister: nodeLister,
|
||||||
podLister: podLister,
|
podLister: podLister,
|
||||||
serviceLister: serviceLister,
|
serviceLister: serviceLister,
|
||||||
nodeInfo: nodeInfo,
|
|
||||||
labels: labels,
|
labels: labels,
|
||||||
}
|
}
|
||||||
return affinity.checkServiceAffinity, affinity.serviceAffinityMetadataProducer
|
return affinity.checkServiceAffinity, affinity.serviceAffinityMetadataProducer
|
||||||
@@ -1145,7 +1147,7 @@ func (s *ServiceAffinity) checkServiceAffinity(pod *v1.Pod, meta PredicateMetada
|
|||||||
if len(s.labels) > len(affinityLabels) {
|
if len(s.labels) > len(affinityLabels) {
|
||||||
if len(services) > 0 {
|
if len(services) > 0 {
|
||||||
if len(filteredPods) > 0 {
|
if len(filteredPods) > 0 {
|
||||||
nodeWithAffinityLabels, err := s.nodeInfo.GetNodeInfo(filteredPods[0].Spec.NodeName)
|
nodeWithAffinityLabels, err := s.nodeLister.GetNodeInfo(filteredPods[0].Spec.NodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, nil, err
|
return false, nil, err
|
||||||
}
|
}
|
||||||
@@ -1253,15 +1255,15 @@ func EssentialPredicates(pod *v1.Pod, meta PredicateMetadata, nodeInfo *schedule
|
|||||||
|
|
||||||
// PodAffinityChecker contains information to check pod affinity.
|
// PodAffinityChecker contains information to check pod affinity.
|
||||||
type PodAffinityChecker struct {
|
type PodAffinityChecker struct {
|
||||||
info NodeInfo
|
nodeLister schedulerlisters.NodeLister
|
||||||
podLister algorithm.PodLister
|
podLister schedulerlisters.PodLister
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPodAffinityPredicate creates a PodAffinityChecker.
|
// NewPodAffinityPredicate creates a PodAffinityChecker.
|
||||||
func NewPodAffinityPredicate(info NodeInfo, podLister algorithm.PodLister) FitPredicate {
|
func NewPodAffinityPredicate(nodeLister schedulerlisters.NodeLister, podLister schedulerlisters.PodLister) FitPredicate {
|
||||||
checker := &PodAffinityChecker{
|
checker := &PodAffinityChecker{
|
||||||
info: info,
|
nodeLister: nodeLister,
|
||||||
podLister: podLister,
|
podLister: podLister,
|
||||||
}
|
}
|
||||||
return checker.InterPodAffinityMatches
|
return checker.InterPodAffinityMatches
|
||||||
}
|
}
|
||||||
@@ -1315,7 +1317,7 @@ func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod, targetPod *v1.Pod,
|
|||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
// Namespace and selector of the terms have matched. Now we check topology of the terms.
|
// Namespace and selector of the terms have matched. Now we check topology of the terms.
|
||||||
targetPodNode, err := c.info.GetNodeInfo(targetPod.Spec.NodeName)
|
targetPodNodeInfo, err := c.nodeLister.GetNodeInfo(targetPod.Spec.NodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, false, err
|
return false, false, err
|
||||||
}
|
}
|
||||||
@@ -1323,7 +1325,7 @@ func (c *PodAffinityChecker) podMatchesPodAffinityTerms(pod, targetPod *v1.Pod,
|
|||||||
if len(term.TopologyKey) == 0 {
|
if len(term.TopologyKey) == 0 {
|
||||||
return false, false, fmt.Errorf("empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity")
|
return false, false, fmt.Errorf("empty topologyKey is not allowed except for PreferredDuringScheduling pod anti-affinity")
|
||||||
}
|
}
|
||||||
if !priorityutil.NodesHaveSameTopologyKey(nodeInfo.Node(), targetPodNode, term.TopologyKey) {
|
if !priorityutil.NodesHaveSameTopologyKey(nodeInfo.Node(), targetPodNodeInfo, term.TopologyKey) {
|
||||||
return false, true, nil
|
return false, true, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1388,7 +1390,7 @@ func (c *PodAffinityChecker) getMatchingAntiAffinityTopologyPairsOfPods(pod *v1.
|
|||||||
topologyMaps := newTopologyPairsMaps()
|
topologyMaps := newTopologyPairsMaps()
|
||||||
|
|
||||||
for _, existingPod := range existingPods {
|
for _, existingPod := range existingPods {
|
||||||
existingPodNode, err := c.info.GetNodeInfo(existingPod.Spec.NodeName)
|
existingPodNode, err := c.nodeLister.GetNodeInfo(existingPod.Spec.NodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Pod %s has NodeName %q but node is not found", podName(existingPod), existingPod.Spec.NodeName)
|
klog.Errorf("Pod %s has NodeName %q but node is not found", podName(existingPod), existingPod.Spec.NodeName)
|
||||||
continue
|
continue
|
||||||
|
@@ -34,6 +34,7 @@ import (
|
|||||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||||
|
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||||
)
|
)
|
||||||
@@ -1852,12 +1853,12 @@ func TestServiceAffinity(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
testIt := func(skipPrecompute bool) {
|
testIt := func(skipPrecompute bool) {
|
||||||
t.Run(fmt.Sprintf("%v/skipPrecompute/%v", test.name, skipPrecompute), func(t *testing.T) {
|
t.Run(fmt.Sprintf("%v/skipPrecompute/%v", test.name, skipPrecompute), func(t *testing.T) {
|
||||||
nodes := []v1.Node{node1, node2, node3, node4, node5}
|
nodes := []*v1.Node{&node1, &node2, &node3, &node4, &node5}
|
||||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||||
nodeInfo.SetNode(test.node)
|
nodeInfo.SetNode(test.node)
|
||||||
nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{test.node.Name: nodeInfo}
|
nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{test.node.Name: nodeInfo}
|
||||||
// Reimplementing the logic that the scheduler implements: Any time it makes a predicate, it registers any precomputations.
|
// Reimplementing the logic that the scheduler implements: Any time it makes a predicate, it registers any precomputations.
|
||||||
predicate, precompute := NewServiceAffinityPredicate(st.FakePodLister(test.pods), st.FakeServiceLister(test.services), FakeNodeListInfo(nodes), test.labels)
|
predicate, precompute := NewServiceAffinityPredicate(fakelisters.NodeLister(nodes), fakelisters.PodLister(test.pods), fakelisters.ServiceLister(test.services), test.labels)
|
||||||
// Register a precomputation or Rewrite the precomputation to a no-op, depending on the state we want to test.
|
// Register a precomputation or Rewrite the precomputation to a no-op, depending on the state we want to test.
|
||||||
RegisterPredicateMetadataProducer("ServiceAffinityMetaProducer", func(pm *predicateMetadata) {
|
RegisterPredicateMetadataProducer("ServiceAffinityMetaProducer", func(pm *predicateMetadata) {
|
||||||
if !skipPrecompute {
|
if !skipPrecompute {
|
||||||
@@ -2930,8 +2931,8 @@ func TestInterPodAffinity(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fit := PodAffinityChecker{
|
fit := PodAffinityChecker{
|
||||||
info: FakeNodeInfo(*node),
|
nodeLister: fakelisters.NodeLister([]*v1.Node{node}),
|
||||||
podLister: st.FakePodLister(test.pods),
|
podLister: fakelisters.PodLister(test.pods),
|
||||||
}
|
}
|
||||||
nodeInfo := schedulernodeinfo.NewNodeInfo(podsOnNode...)
|
nodeInfo := schedulernodeinfo.NewNodeInfo(podsOnNode...)
|
||||||
nodeInfo.SetNode(test.node)
|
nodeInfo.SetNode(test.node)
|
||||||
@@ -2968,7 +2969,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
tests := []struct {
|
tests := []struct {
|
||||||
pod *v1.Pod
|
pod *v1.Pod
|
||||||
pods []*v1.Pod
|
pods []*v1.Pod
|
||||||
nodes []v1.Node
|
nodes []*v1.Node
|
||||||
nodesExpectAffinityFailureReasons [][]PredicateFailureReason
|
nodesExpectAffinityFailureReasons [][]PredicateFailureReason
|
||||||
fits map[string]bool
|
fits map[string]bool
|
||||||
name string
|
name string
|
||||||
@@ -3000,7 +3001,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: podLabelA}},
|
{Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: podLabelA}},
|
||||||
},
|
},
|
||||||
nodes: []v1.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChinaAzAz1}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "machine3", Labels: labelRgIndia}},
|
||||||
@@ -3055,7 +3056,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
{Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "abc"}}},
|
{Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "abc"}}},
|
||||||
{Spec: v1.PodSpec{NodeName: "nodeB"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "def"}}},
|
{Spec: v1.PodSpec{NodeName: "nodeB"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "def"}}},
|
||||||
},
|
},
|
||||||
nodes: []v1.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "h1"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "h1"}}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "h2"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "h2"}}},
|
||||||
},
|
},
|
||||||
@@ -3108,7 +3109,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: map[string]string{"foo": "bar"}}}},
|
pods: []*v1.Pod{{Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: metav1.ObjectMeta{Name: "p1", Labels: map[string]string{"foo": "bar"}}}},
|
||||||
nodes: []v1.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"zone": "az1", "hostname": "h1"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"zone": "az1", "hostname": "h1"}}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"zone": "az2", "hostname": "h2"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"zone": "az2", "hostname": "h2"}}},
|
||||||
},
|
},
|
||||||
@@ -3146,7 +3147,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "abc"}}},
|
{Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "abc"}}},
|
||||||
},
|
},
|
||||||
nodes: []v1.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "nodeA"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "hostname": "nodeA"}}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "nodeB"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "hostname": "nodeB"}}},
|
||||||
},
|
},
|
||||||
@@ -3195,7 +3196,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "abc", "service": "securityscan"}}},
|
{Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "abc", "service": "securityscan"}}},
|
||||||
},
|
},
|
||||||
nodes: []v1.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||||
},
|
},
|
||||||
@@ -3235,7 +3236,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
pods: []*v1.Pod{
|
pods: []*v1.Pod{
|
||||||
{Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "abc"}}},
|
{Spec: v1.PodSpec{NodeName: "nodeA"}, ObjectMeta: metav1.ObjectMeta{Labels: map[string]string{"foo": "abc"}}},
|
||||||
},
|
},
|
||||||
nodes: []v1.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: labelRgChina}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: labelRgChina}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
||||||
@@ -3298,7 +3299,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodes: []v1.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: labelRgChina}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: labelRgChina}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
||||||
@@ -3379,7 +3380,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodes: []v1.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: labelRgChina}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: labelRgChina}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: labelRgChinaAzAz1}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: labelRgIndia}},
|
||||||
@@ -3424,7 +3425,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodes: []v1.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}},
|
||||||
},
|
},
|
||||||
@@ -3465,7 +3466,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodes: []v1.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}},
|
||||||
},
|
},
|
||||||
@@ -3528,7 +3529,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodes: []v1.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||||
},
|
},
|
||||||
@@ -3589,7 +3590,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodes: []v1.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||||
},
|
},
|
||||||
@@ -3642,7 +3643,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodes: []v1.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||||
},
|
},
|
||||||
@@ -3696,7 +3697,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodes: []v1.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||||
},
|
},
|
||||||
@@ -3748,7 +3749,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodes: []v1.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||||
},
|
},
|
||||||
@@ -3803,7 +3804,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodes: []v1.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||||
},
|
},
|
||||||
@@ -3889,7 +3890,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodes: []v1.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: map[string]string{"region": "r1", "zone": "z3", "hostname": "nodeC"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeC", Labels: map[string]string{"region": "r1", "zone": "z3", "hostname": "nodeC"}}},
|
||||||
@@ -3946,7 +3947,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodes: []v1.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeB"}}},
|
||||||
},
|
},
|
||||||
@@ -4007,7 +4008,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
nodes: []v1.Node{
|
nodes: []*v1.Node{
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeA", Labels: map[string]string{"region": "r1", "zone": "z1", "hostname": "nodeA"}}},
|
||||||
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
{ObjectMeta: metav1.ObjectMeta{Name: "nodeB", Labels: map[string]string{"region": "r1", "zone": "z2", "hostname": "nodeB"}}},
|
||||||
},
|
},
|
||||||
@@ -4027,7 +4028,6 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
|
|
||||||
for indexTest, test := range tests {
|
for indexTest, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
nodeListInfo := FakeNodeListInfo(test.nodes)
|
|
||||||
nodeInfoMap := make(map[string]*schedulernodeinfo.NodeInfo)
|
nodeInfoMap := make(map[string]*schedulernodeinfo.NodeInfo)
|
||||||
for i, node := range test.nodes {
|
for i, node := range test.nodes {
|
||||||
var podsOnNode []*v1.Pod
|
var podsOnNode []*v1.Pod
|
||||||
@@ -4038,14 +4038,14 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
nodeInfo := schedulernodeinfo.NewNodeInfo(podsOnNode...)
|
nodeInfo := schedulernodeinfo.NewNodeInfo(podsOnNode...)
|
||||||
nodeInfo.SetNode(&test.nodes[i])
|
nodeInfo.SetNode(test.nodes[i])
|
||||||
nodeInfoMap[node.Name] = nodeInfo
|
nodeInfoMap[node.Name] = nodeInfo
|
||||||
}
|
}
|
||||||
|
|
||||||
for indexNode, node := range test.nodes {
|
for indexNode, node := range test.nodes {
|
||||||
testFit := PodAffinityChecker{
|
testFit := PodAffinityChecker{
|
||||||
info: nodeListInfo,
|
nodeLister: fakelisters.NodeLister(test.nodes),
|
||||||
podLister: st.FakePodLister(test.pods),
|
podLister: fakelisters.PodLister(test.pods),
|
||||||
}
|
}
|
||||||
|
|
||||||
var meta PredicateMetadata
|
var meta PredicateMetadata
|
||||||
@@ -4060,7 +4060,7 @@ func TestInterPodAffinityWithMultipleNodes(t *testing.T) {
|
|||||||
affinity := test.pod.Spec.Affinity
|
affinity := test.pod.Spec.Affinity
|
||||||
if affinity != nil && affinity.NodeAffinity != nil {
|
if affinity != nil && affinity.NodeAffinity != nil {
|
||||||
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
nodeInfo := schedulernodeinfo.NewNodeInfo()
|
||||||
nodeInfo.SetNode(&node)
|
nodeInfo.SetNode(node)
|
||||||
nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{node.Name: nodeInfo}
|
nodeInfoMap := map[string]*schedulernodeinfo.NodeInfo{node.Name: nodeInfo}
|
||||||
fits2, reasons, err := PodMatchNodeSelector(test.pod, GetPredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
|
fits2, reasons, err := PodMatchNodeSelector(test.pod, GetPredicateMetadata(test.pod, nodeInfoMap), nodeInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -4302,7 +4302,7 @@ func createPodWithVolume(pod, pv, pvc string) *v1.Pod {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestVolumeZonePredicate(t *testing.T) {
|
func TestVolumeZonePredicate(t *testing.T) {
|
||||||
pvInfo := FakePersistentVolumeInfo{
|
pvInfo := fakelisters.PersistentVolumeInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}},
|
ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}},
|
||||||
},
|
},
|
||||||
@@ -4314,7 +4314,7 @@ func TestVolumeZonePredicate(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pvcInfo := FakePersistentVolumeClaimInfo{
|
pvcInfo := fakelisters.PersistentVolumeClaimInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"},
|
ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"},
|
||||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"},
|
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"},
|
||||||
@@ -4431,7 +4431,7 @@ func TestVolumeZonePredicate(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestVolumeZonePredicateMultiZone(t *testing.T) {
|
func TestVolumeZonePredicateMultiZone(t *testing.T) {
|
||||||
pvInfo := FakePersistentVolumeInfo{
|
pvInfo := fakelisters.PersistentVolumeInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}},
|
ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}},
|
||||||
},
|
},
|
||||||
@@ -4443,7 +4443,7 @@ func TestVolumeZonePredicateMultiZone(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pvcInfo := FakePersistentVolumeClaimInfo{
|
pvcInfo := fakelisters.PersistentVolumeClaimInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"},
|
ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"},
|
||||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"},
|
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"},
|
||||||
@@ -4533,7 +4533,7 @@ func TestVolumeZonePredicateWithVolumeBinding(t *testing.T) {
|
|||||||
classImmediate = "Class_Immediate"
|
classImmediate = "Class_Immediate"
|
||||||
)
|
)
|
||||||
|
|
||||||
classInfo := FakeStorageClassInfo{
|
classInfo := fakelisters.StorageClassInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: classImmediate},
|
ObjectMeta: metav1.ObjectMeta{Name: classImmediate},
|
||||||
},
|
},
|
||||||
@@ -4543,13 +4543,13 @@ func TestVolumeZonePredicateWithVolumeBinding(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pvInfo := FakePersistentVolumeInfo{
|
pvInfo := fakelisters.PersistentVolumeInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}},
|
ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pvcInfo := FakePersistentVolumeClaimInfo{
|
pvcInfo := fakelisters.PersistentVolumeClaimInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"},
|
ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"},
|
||||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"},
|
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"},
|
||||||
|
@@ -1,112 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2017 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package predicates
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
storagev1 "k8s.io/api/storage/v1"
|
|
||||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
|
||||||
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FakePersistentVolumeClaimInfo declares a []v1.PersistentVolumeClaim type for testing.
|
|
||||||
type FakePersistentVolumeClaimInfo []v1.PersistentVolumeClaim
|
|
||||||
|
|
||||||
// GetPersistentVolumeClaimInfo gets PVC matching the namespace and PVC ID.
|
|
||||||
func (pvcs FakePersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, pvcID string) (*v1.PersistentVolumeClaim, error) {
|
|
||||||
for _, pvc := range pvcs {
|
|
||||||
if pvc.Name == pvcID && pvc.Namespace == namespace {
|
|
||||||
return &pvc, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("Unable to find persistent volume claim: %s/%s", namespace, pvcID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FakeNodeInfo declares a v1.Node type for testing.
|
|
||||||
type FakeNodeInfo v1.Node
|
|
||||||
|
|
||||||
// GetNodeInfo return a fake node info object.
|
|
||||||
func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
|
|
||||||
node := v1.Node(n)
|
|
||||||
return &node, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FakeNodeListInfo declares a []v1.Node type for testing.
|
|
||||||
type FakeNodeListInfo []v1.Node
|
|
||||||
|
|
||||||
// GetNodeInfo returns a fake node object in the fake nodes.
|
|
||||||
func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
|
|
||||||
for _, node := range nodes {
|
|
||||||
if node.Name == nodeName {
|
|
||||||
return &node, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("Unable to find node: %s", nodeName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FakeCSINodeInfo declares a storagev1beta1.CSINode type for testing.
|
|
||||||
type FakeCSINodeInfo storagev1beta1.CSINode
|
|
||||||
|
|
||||||
// GetCSINodeInfo returns a fake CSINode object.
|
|
||||||
func (n FakeCSINodeInfo) GetCSINodeInfo(name string) (*storagev1beta1.CSINode, error) {
|
|
||||||
csiNode := storagev1beta1.CSINode(n)
|
|
||||||
return &csiNode, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FakePersistentVolumeInfo declares a []v1.PersistentVolume type for testing.
|
|
||||||
type FakePersistentVolumeInfo []v1.PersistentVolume
|
|
||||||
|
|
||||||
// GetPersistentVolumeInfo returns a fake PV object in the fake PVs by PV ID.
|
|
||||||
func (pvs FakePersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) {
|
|
||||||
for _, pv := range pvs {
|
|
||||||
if pv.Name == pvID {
|
|
||||||
return &pv, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("Unable to find persistent volume: %s", pvID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// FakeStorageClassInfo declares a []storagev1.StorageClass type for testing.
|
|
||||||
type FakeStorageClassInfo []storagev1.StorageClass
|
|
||||||
|
|
||||||
// GetStorageClassInfo returns a fake storage class object in the fake storage classes by name.
|
|
||||||
func (classes FakeStorageClassInfo) GetStorageClassInfo(name string) (*storagev1.StorageClass, error) {
|
|
||||||
for _, sc := range classes {
|
|
||||||
if sc.Name == name {
|
|
||||||
return &sc, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("Unable to find storage class: %s", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetVolumeLimitKey returns a ResourceName by filter type
|
|
||||||
func GetVolumeLimitKey(filterType string) v1.ResourceName {
|
|
||||||
switch filterType {
|
|
||||||
case EBSVolumeFilterType:
|
|
||||||
return v1.ResourceName(volumeutil.EBSVolumeLimitKey)
|
|
||||||
case GCEPDVolumeFilterType:
|
|
||||||
return v1.ResourceName(volumeutil.GCEVolumeLimitKey)
|
|
||||||
case AzureDiskVolumeFilterType:
|
|
||||||
return v1.ResourceName(volumeutil.AzureVolumeLimitKey)
|
|
||||||
case CinderVolumeFilterType:
|
|
||||||
return v1.ResourceName(volumeutil.CinderVolumeLimitKey)
|
|
||||||
default:
|
|
||||||
return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType))
|
|
||||||
}
|
|
||||||
}
|
|
@@ -33,10 +33,10 @@ go_library(
|
|||||||
deps = [
|
deps = [
|
||||||
"//pkg/apis/core/v1/helper:go_default_library",
|
"//pkg/apis/core/v1/helper:go_default_library",
|
||||||
"//pkg/features:go_default_library",
|
"//pkg/features:go_default_library",
|
||||||
"//pkg/scheduler/algorithm:go_default_library",
|
|
||||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||||
|
"//pkg/scheduler/listers:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
"//pkg/scheduler/util:go_default_library",
|
"//pkg/scheduler/util:go_default_library",
|
||||||
"//pkg/util/node:go_default_library",
|
"//pkg/util/node:go_default_library",
|
||||||
@@ -78,6 +78,7 @@ go_test(
|
|||||||
"//pkg/scheduler/algorithm:go_default_library",
|
"//pkg/scheduler/algorithm:go_default_library",
|
||||||
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
"//pkg/scheduler/algorithm/priorities/util:go_default_library",
|
||||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||||
|
"//pkg/scheduler/listers/fake:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
"//pkg/scheduler/testing:go_default_library",
|
"//pkg/scheduler/testing:go_default_library",
|
||||||
"//pkg/util/parsers:go_default_library",
|
"//pkg/util/parsers:go_default_library",
|
||||||
|
@@ -23,9 +23,9 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
|
||||||
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
priorityutil "k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/util"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
|
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
schedutil "k8s.io/kubernetes/pkg/scheduler/util"
|
||||||
|
|
||||||
@@ -34,16 +34,14 @@ import (
|
|||||||
|
|
||||||
// InterPodAffinity contains information to calculate inter pod affinity.
|
// InterPodAffinity contains information to calculate inter pod affinity.
|
||||||
type InterPodAffinity struct {
|
type InterPodAffinity struct {
|
||||||
info predicates.NodeInfo
|
nodeLister schedulerlisters.NodeLister
|
||||||
hardPodAffinityWeight int32
|
hardPodAffinityWeight int32
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewInterPodAffinityPriority creates an InterPodAffinity.
|
// NewInterPodAffinityPriority creates an InterPodAffinity.
|
||||||
func NewInterPodAffinityPriority(
|
func NewInterPodAffinityPriority(nodeLister schedulerlisters.NodeLister, hardPodAffinityWeight int32) PriorityFunction {
|
||||||
info predicates.NodeInfo,
|
|
||||||
hardPodAffinityWeight int32) PriorityFunction {
|
|
||||||
interPodAffinity := &InterPodAffinity{
|
interPodAffinity := &InterPodAffinity{
|
||||||
info: info,
|
nodeLister: nodeLister,
|
||||||
hardPodAffinityWeight: hardPodAffinityWeight,
|
hardPodAffinityWeight: hardPodAffinityWeight,
|
||||||
}
|
}
|
||||||
return interPodAffinity.CalculateInterPodAffinityPriority
|
return interPodAffinity.CalculateInterPodAffinityPriority
|
||||||
@@ -111,7 +109,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
|
|||||||
var maxCount, minCount int64
|
var maxCount, minCount int64
|
||||||
|
|
||||||
processPod := func(existingPod *v1.Pod) error {
|
processPod := func(existingPod *v1.Pod) error {
|
||||||
existingPodNode, err := ipa.info.GetNodeInfo(existingPod.Spec.NodeName)
|
existingPodNode, err := ipa.nodeLister.GetNodeInfo(existingPod.Spec.NodeName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
klog.Errorf("Node not found, %v", existingPod.Spec.NodeName)
|
klog.Errorf("Node not found, %v", existingPod.Spec.NodeName)
|
||||||
return nil
|
return nil
|
||||||
|
@@ -17,28 +17,17 @@ limitations under the License.
|
|||||||
package priorities
|
package priorities
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
|
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
type FakeNodeListInfo []*v1.Node
|
|
||||||
|
|
||||||
func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
|
|
||||||
for _, node := range nodes {
|
|
||||||
if node.Name == nodeName {
|
|
||||||
return node, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("Unable to find node: %s", nodeName)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestInterPodAffinityPriority(t *testing.T) {
|
func TestInterPodAffinityPriority(t *testing.T) {
|
||||||
labelRgChina := map[string]string{
|
labelRgChina := map[string]string{
|
||||||
"region": "China",
|
"region": "China",
|
||||||
@@ -528,7 +517,7 @@ func TestInterPodAffinityPriority(t *testing.T) {
|
|||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
||||||
interPodAffinity := InterPodAffinity{
|
interPodAffinity := InterPodAffinity{
|
||||||
info: FakeNodeListInfo(test.nodes),
|
nodeLister: fakelisters.NodeLister(test.nodes),
|
||||||
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||||
}
|
}
|
||||||
list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes)
|
list, err := interPodAffinity.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes)
|
||||||
@@ -616,7 +605,7 @@ func TestHardPodAffinitySymmetricWeight(t *testing.T) {
|
|||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
||||||
ipa := InterPodAffinity{
|
ipa := InterPodAffinity{
|
||||||
info: FakeNodeListInfo(test.nodes),
|
nodeLister: fakelisters.NodeLister(test.nodes),
|
||||||
hardPodAffinityWeight: test.hardPodAffinityWeight,
|
hardPodAffinityWeight: test.hardPodAffinityWeight,
|
||||||
}
|
}
|
||||||
list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes)
|
list, err := ipa.CalculateInterPodAffinityPriority(test.pod, nodeNameToInfo, test.nodes)
|
||||||
@@ -673,7 +662,7 @@ func BenchmarkInterPodAffinityPriority(b *testing.B) {
|
|||||||
existingPods, allNodes := tt.prepFunc(tt.existingPodsNum, tt.allNodesNum)
|
existingPods, allNodes := tt.prepFunc(tt.existingPodsNum, tt.allNodesNum)
|
||||||
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(existingPods, allNodes)
|
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(existingPods, allNodes)
|
||||||
interPodAffinity := InterPodAffinity{
|
interPodAffinity := InterPodAffinity{
|
||||||
info: FakeNodeListInfo(allNodes),
|
nodeLister: fakelisters.NodeLister(allNodes),
|
||||||
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
hardPodAffinityWeight: v1.DefaultHardPodAffinitySymmetricWeight,
|
||||||
}
|
}
|
||||||
b.ResetTimer()
|
b.ResetTimer()
|
||||||
|
@@ -23,8 +23,8 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
|
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
utilnode "k8s.io/kubernetes/pkg/util/node"
|
utilnode "k8s.io/kubernetes/pkg/util/node"
|
||||||
|
|
||||||
@@ -155,13 +155,13 @@ func (s *SelectorSpread) CalculateSpreadPriorityReduce(pod *v1.Pod, meta interfa
|
|||||||
|
|
||||||
// ServiceAntiAffinity contains information to calculate service anti-affinity priority.
|
// ServiceAntiAffinity contains information to calculate service anti-affinity priority.
|
||||||
type ServiceAntiAffinity struct {
|
type ServiceAntiAffinity struct {
|
||||||
podLister algorithm.PodLister
|
podLister schedulerlisters.PodLister
|
||||||
serviceLister corelisters.ServiceLister
|
serviceLister corelisters.ServiceLister
|
||||||
label string
|
label string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewServiceAntiAffinityPriority creates a ServiceAntiAffinity.
|
// NewServiceAntiAffinityPriority creates a ServiceAntiAffinity.
|
||||||
func NewServiceAntiAffinityPriority(podLister algorithm.PodLister, serviceLister corelisters.ServiceLister, label string) (PriorityMapFunction, PriorityReduceFunction) {
|
func NewServiceAntiAffinityPriority(podLister schedulerlisters.PodLister, serviceLister corelisters.ServiceLister, label string) (PriorityMapFunction, PriorityReduceFunction) {
|
||||||
antiAffinity := &ServiceAntiAffinity{
|
antiAffinity := &ServiceAntiAffinity{
|
||||||
podLister: podLister,
|
podLister: podLister,
|
||||||
serviceLister: serviceLister,
|
serviceLister: serviceLister,
|
||||||
|
@@ -24,8 +24,8 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
|
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func controllerRef(kind, name, uid string) []metav1.OwnerReference {
|
func controllerRef(kind, name, uid string) []metav1.OwnerReference {
|
||||||
@@ -339,17 +339,17 @@ func TestSelectorSpreadPriority(t *testing.T) {
|
|||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, makeNodeList(test.nodes))
|
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, makeNodeList(test.nodes))
|
||||||
selectorSpread := SelectorSpread{
|
selectorSpread := SelectorSpread{
|
||||||
serviceLister: schedulertesting.FakeServiceLister(test.services),
|
serviceLister: fakelisters.ServiceLister(test.services),
|
||||||
controllerLister: schedulertesting.FakeControllerLister(test.rcs),
|
controllerLister: fakelisters.ControllerLister(test.rcs),
|
||||||
replicaSetLister: schedulertesting.FakeReplicaSetLister(test.rss),
|
replicaSetLister: fakelisters.ReplicaSetLister(test.rss),
|
||||||
statefulSetLister: schedulertesting.FakeStatefulSetLister(test.sss),
|
statefulSetLister: fakelisters.StatefulSetLister(test.sss),
|
||||||
}
|
}
|
||||||
|
|
||||||
metaDataProducer := NewPriorityMetadataFactory(
|
metaDataProducer := NewPriorityMetadataFactory(
|
||||||
schedulertesting.FakeServiceLister(test.services),
|
fakelisters.ServiceLister(test.services),
|
||||||
schedulertesting.FakeControllerLister(test.rcs),
|
fakelisters.ControllerLister(test.rcs),
|
||||||
schedulertesting.FakeReplicaSetLister(test.rss),
|
fakelisters.ReplicaSetLister(test.rss),
|
||||||
schedulertesting.FakeStatefulSetLister(test.sss))
|
fakelisters.StatefulSetLister(test.sss))
|
||||||
metaData := metaDataProducer(test.pod, nodeNameToInfo)
|
metaData := metaDataProducer(test.pod, nodeNameToInfo)
|
||||||
|
|
||||||
ttp := priorityFunction(selectorSpread.CalculateSpreadPriorityMap, selectorSpread.CalculateSpreadPriorityReduce, metaData)
|
ttp := priorityFunction(selectorSpread.CalculateSpreadPriorityMap, selectorSpread.CalculateSpreadPriorityReduce, metaData)
|
||||||
@@ -575,17 +575,17 @@ func TestZoneSelectorSpreadPriority(t *testing.T) {
|
|||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(labeledNodes))
|
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(labeledNodes))
|
||||||
selectorSpread := SelectorSpread{
|
selectorSpread := SelectorSpread{
|
||||||
serviceLister: schedulertesting.FakeServiceLister(test.services),
|
serviceLister: fakelisters.ServiceLister(test.services),
|
||||||
controllerLister: schedulertesting.FakeControllerLister(test.rcs),
|
controllerLister: fakelisters.ControllerLister(test.rcs),
|
||||||
replicaSetLister: schedulertesting.FakeReplicaSetLister(test.rss),
|
replicaSetLister: fakelisters.ReplicaSetLister(test.rss),
|
||||||
statefulSetLister: schedulertesting.FakeStatefulSetLister(test.sss),
|
statefulSetLister: fakelisters.StatefulSetLister(test.sss),
|
||||||
}
|
}
|
||||||
|
|
||||||
metaDataProducer := NewPriorityMetadataFactory(
|
metaDataProducer := NewPriorityMetadataFactory(
|
||||||
schedulertesting.FakeServiceLister(test.services),
|
fakelisters.ServiceLister(test.services),
|
||||||
schedulertesting.FakeControllerLister(test.rcs),
|
fakelisters.ControllerLister(test.rcs),
|
||||||
schedulertesting.FakeReplicaSetLister(test.rss),
|
fakelisters.ReplicaSetLister(test.rss),
|
||||||
schedulertesting.FakeStatefulSetLister(test.sss))
|
fakelisters.StatefulSetLister(test.sss))
|
||||||
metaData := metaDataProducer(test.pod, nodeNameToInfo)
|
metaData := metaDataProducer(test.pod, nodeNameToInfo)
|
||||||
ttp := priorityFunction(selectorSpread.CalculateSpreadPriorityMap, selectorSpread.CalculateSpreadPriorityReduce, metaData)
|
ttp := priorityFunction(selectorSpread.CalculateSpreadPriorityMap, selectorSpread.CalculateSpreadPriorityReduce, metaData)
|
||||||
list, err := ttp(test.pod, nodeNameToInfo, makeLabeledNodeList(labeledNodes))
|
list, err := ttp(test.pod, nodeNameToInfo, makeLabeledNodeList(labeledNodes))
|
||||||
@@ -766,13 +766,13 @@ func TestZoneSpreadPriority(t *testing.T) {
|
|||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(test.nodes))
|
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, makeLabeledNodeList(test.nodes))
|
||||||
zoneSpread := ServiceAntiAffinity{podLister: schedulertesting.FakePodLister(test.pods), serviceLister: schedulertesting.FakeServiceLister(test.services), label: "zone"}
|
zoneSpread := ServiceAntiAffinity{podLister: fakelisters.PodLister(test.pods), serviceLister: fakelisters.ServiceLister(test.services), label: "zone"}
|
||||||
|
|
||||||
metaDataProducer := NewPriorityMetadataFactory(
|
metaDataProducer := NewPriorityMetadataFactory(
|
||||||
schedulertesting.FakeServiceLister(test.services),
|
fakelisters.ServiceLister(test.services),
|
||||||
schedulertesting.FakeControllerLister(rcs),
|
fakelisters.ControllerLister(rcs),
|
||||||
schedulertesting.FakeReplicaSetLister(rss),
|
fakelisters.ReplicaSetLister(rss),
|
||||||
schedulertesting.FakeStatefulSetLister(sss))
|
fakelisters.StatefulSetLister(sss))
|
||||||
metaData := metaDataProducer(test.pod, nodeNameToInfo)
|
metaData := metaDataProducer(test.pod, nodeNameToInfo)
|
||||||
ttp := priorityFunction(zoneSpread.CalculateAntiAffinityPriorityMap, zoneSpread.CalculateAntiAffinityPriorityReduce, metaData)
|
ttp := priorityFunction(zoneSpread.CalculateAntiAffinityPriorityMap, zoneSpread.CalculateAntiAffinityPriorityReduce, metaData)
|
||||||
list, err := ttp(test.pod, nodeNameToInfo, makeLabeledNodeList(test.nodes))
|
list, err := ttp(test.pod, nodeNameToInfo, makeLabeledNodeList(test.nodes))
|
||||||
|
@@ -32,19 +32,6 @@ var NodeFieldSelectorKeys = map[string]func(*v1.Node) string{
|
|||||||
schedulerapi.NodeFieldSelectorKeyNodeName: func(n *v1.Node) string { return n.Name },
|
schedulerapi.NodeFieldSelectorKeyNodeName: func(n *v1.Node) string { return n.Name },
|
||||||
}
|
}
|
||||||
|
|
||||||
// PodFilter is a function to filter a pod. If pod passed return true else return false.
|
|
||||||
type PodFilter func(*v1.Pod) bool
|
|
||||||
|
|
||||||
// PodLister interface represents anything that can list pods for a scheduler.
|
|
||||||
type PodLister interface {
|
|
||||||
// We explicitly return []*v1.Pod, instead of v1.PodList, to avoid
|
|
||||||
// performing expensive copies that are unneeded.
|
|
||||||
List(labels.Selector) ([]*v1.Pod, error)
|
|
||||||
// This is similar to "List()", but the returned slice does not
|
|
||||||
// contain pods that don't pass `podFilter`.
|
|
||||||
FilteredList(podFilter PodFilter, selector labels.Selector) ([]*v1.Pod, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ corelisters.ReplicationControllerLister = &EmptyControllerLister{}
|
var _ corelisters.ReplicationControllerLister = &EmptyControllerLister{}
|
||||||
|
|
||||||
// EmptyControllerLister implements ControllerLister on []v1.ReplicationController returning empty data
|
// EmptyControllerLister implements ControllerLister on []v1.ReplicationController returning empty data
|
||||||
|
@@ -27,11 +27,11 @@ import (
|
|||||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
policylisters "k8s.io/client-go/listers/policy/v1beta1"
|
policylisters "k8s.io/client-go/listers/policy/v1beta1"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/priorities"
|
||||||
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
|
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
"k8s.io/kubernetes/pkg/scheduler/volumebinder"
|
||||||
|
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
@@ -39,13 +39,13 @@ import (
|
|||||||
|
|
||||||
// PluginFactoryArgs are passed to all plugin factory functions.
|
// PluginFactoryArgs are passed to all plugin factory functions.
|
||||||
type PluginFactoryArgs struct {
|
type PluginFactoryArgs struct {
|
||||||
PodLister algorithm.PodLister
|
PodLister schedulerlisters.PodLister
|
||||||
ServiceLister corelisters.ServiceLister
|
ServiceLister corelisters.ServiceLister
|
||||||
ControllerLister corelisters.ReplicationControllerLister
|
ControllerLister corelisters.ReplicationControllerLister
|
||||||
ReplicaSetLister appslisters.ReplicaSetLister
|
ReplicaSetLister appslisters.ReplicaSetLister
|
||||||
StatefulSetLister appslisters.StatefulSetLister
|
StatefulSetLister appslisters.StatefulSetLister
|
||||||
PDBLister policylisters.PodDisruptionBudgetLister
|
PDBLister policylisters.PodDisruptionBudgetLister
|
||||||
NodeInfo predicates.NodeInfo
|
NodeLister schedulerlisters.NodeLister
|
||||||
CSINodeInfo predicates.CSINodeInfo
|
CSINodeInfo predicates.CSINodeInfo
|
||||||
PVInfo predicates.PersistentVolumeInfo
|
PVInfo predicates.PersistentVolumeInfo
|
||||||
PVCInfo predicates.PersistentVolumeClaimInfo
|
PVCInfo predicates.PersistentVolumeClaimInfo
|
||||||
@@ -268,9 +268,9 @@ func RegisterCustomFitPredicate(policy schedulerapi.PredicatePolicy) string {
|
|||||||
if policy.Argument.ServiceAffinity != nil {
|
if policy.Argument.ServiceAffinity != nil {
|
||||||
predicateFactory = func(args PluginFactoryArgs) predicates.FitPredicate {
|
predicateFactory = func(args PluginFactoryArgs) predicates.FitPredicate {
|
||||||
predicate, precomputationFunction := predicates.NewServiceAffinityPredicate(
|
predicate, precomputationFunction := predicates.NewServiceAffinityPredicate(
|
||||||
|
args.NodeLister,
|
||||||
args.PodLister,
|
args.PodLister,
|
||||||
args.ServiceLister,
|
args.ServiceLister,
|
||||||
args.NodeInfo,
|
|
||||||
policy.Argument.ServiceAffinity.Labels,
|
policy.Argument.ServiceAffinity.Labels,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@@ -92,7 +92,7 @@ func init() {
|
|||||||
scheduler.RegisterFitPredicateFactory(
|
scheduler.RegisterFitPredicateFactory(
|
||||||
predicates.MatchInterPodAffinityPred,
|
predicates.MatchInterPodAffinityPred,
|
||||||
func(args scheduler.PluginFactoryArgs) predicates.FitPredicate {
|
func(args scheduler.PluginFactoryArgs) predicates.FitPredicate {
|
||||||
return predicates.NewPodAffinityPredicate(args.NodeInfo, args.PodLister)
|
return predicates.NewPodAffinityPredicate(args.NodeLister, args.PodLister)
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@@ -70,7 +70,7 @@ func init() {
|
|||||||
priorities.InterPodAffinityPriority,
|
priorities.InterPodAffinityPriority,
|
||||||
scheduler.PriorityConfigFactory{
|
scheduler.PriorityConfigFactory{
|
||||||
Function: func(args scheduler.PluginFactoryArgs) priorities.PriorityFunction {
|
Function: func(args scheduler.PluginFactoryArgs) priorities.PriorityFunction {
|
||||||
return priorities.NewInterPodAffinityPriority(args.NodeInfo, args.HardPodAffinitySymmetricWeight)
|
return priorities.NewInterPodAffinityPriority(args.NodeLister, args.HardPodAffinitySymmetricWeight)
|
||||||
},
|
},
|
||||||
Weight: 1,
|
Weight: 1,
|
||||||
},
|
},
|
||||||
|
@@ -58,8 +58,8 @@ go_test(
|
|||||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||||
"//pkg/scheduler/internal/cache:go_default_library",
|
"//pkg/scheduler/internal/cache:go_default_library",
|
||||||
"//pkg/scheduler/internal/queue:go_default_library",
|
"//pkg/scheduler/internal/queue:go_default_library",
|
||||||
|
"//pkg/scheduler/listers/fake:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
"//pkg/scheduler/testing:go_default_library",
|
|
||||||
"//pkg/scheduler/util:go_default_library",
|
"//pkg/scheduler/util:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||||
|
@@ -47,8 +47,8 @@ import (
|
|||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||||
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
internalqueue "k8s.io/kubernetes/pkg/scheduler/internal/queue"
|
||||||
|
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
schedulertesting "k8s.io/kubernetes/pkg/scheduler/testing"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -657,7 +657,7 @@ func TestGenericScheduler(t *testing.T) {
|
|||||||
pvcs := []*v1.PersistentVolumeClaim{}
|
pvcs := []*v1.PersistentVolumeClaim{}
|
||||||
pvcs = append(pvcs, test.pvcs...)
|
pvcs = append(pvcs, test.pvcs...)
|
||||||
|
|
||||||
pvcLister := schedulertesting.FakePersistentVolumeClaimLister(pvcs)
|
pvcLister := fakelisters.PersistentVolumeClaimLister(pvcs)
|
||||||
|
|
||||||
predMetaProducer := algorithmpredicates.EmptyPredicateMetadataProducer
|
predMetaProducer := algorithmpredicates.EmptyPredicateMetadataProducer
|
||||||
if test.buildPredMeta {
|
if test.buildPredMeta {
|
||||||
@@ -1064,13 +1064,6 @@ func checkPreemptionVictims(expected map[string]map[string]bool, nodeToPods map[
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type FakeNodeInfo v1.Node
|
|
||||||
|
|
||||||
func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*v1.Node, error) {
|
|
||||||
node := v1.Node(n)
|
|
||||||
return &node, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var smallContainers = []v1.Container{
|
var smallContainers = []v1.Container{
|
||||||
{
|
{
|
||||||
Resources: v1.ResourceRequirements{
|
Resources: v1.ResourceRequirements{
|
||||||
@@ -1427,7 +1420,9 @@ func TestSelectNodesForPreemption(t *testing.T) {
|
|||||||
nodes = append(nodes, node)
|
nodes = append(nodes, node)
|
||||||
}
|
}
|
||||||
if test.addAffinityPredicate {
|
if test.addAffinityPredicate {
|
||||||
test.predicates[algorithmpredicates.MatchInterPodAffinityPred] = algorithmpredicates.NewPodAffinityPredicate(FakeNodeInfo(*nodes[0]), schedulertesting.FakePodLister(test.pods))
|
n := fakelisters.NodeLister([]*v1.Node{nodes[0]})
|
||||||
|
p := fakelisters.PodLister(test.pods)
|
||||||
|
test.predicates[algorithmpredicates.MatchInterPodAffinityPred] = algorithmpredicates.NewPodAffinityPredicate(n, p)
|
||||||
}
|
}
|
||||||
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, nodes)
|
nodeNameToInfo := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, nodes)
|
||||||
// newnode simulate a case that a new node is added to the cluster, but nodeNameToInfo
|
// newnode simulate a case that a new node is added to the cluster, but nodeNameToInfo
|
||||||
|
@@ -593,7 +593,7 @@ func (c *Configurator) getAlgorithmArgs() (*PluginFactoryArgs, *plugins.ConfigPr
|
|||||||
ReplicaSetLister: c.replicaSetLister,
|
ReplicaSetLister: c.replicaSetLister,
|
||||||
StatefulSetLister: c.statefulSetLister,
|
StatefulSetLister: c.statefulSetLister,
|
||||||
PDBLister: c.pdbLister,
|
PDBLister: c.pdbLister,
|
||||||
NodeInfo: c.schedulerCache,
|
NodeLister: c.schedulerCache,
|
||||||
CSINodeInfo: &predicates.CachedCSINodeInfo{CSINodeLister: c.csiNodeLister},
|
CSINodeInfo: &predicates.CachedCSINodeInfo{CSINodeLister: c.csiNodeLister},
|
||||||
PVInfo: &predicates.CachedPersistentVolumeInfo{PersistentVolumeLister: c.pVLister},
|
PVInfo: &predicates.CachedPersistentVolumeInfo{PersistentVolumeLister: c.pVLister},
|
||||||
PVCInfo: &predicates.CachedPersistentVolumeClaimInfo{PersistentVolumeClaimLister: c.pVCLister},
|
PVCInfo: &predicates.CachedPersistentVolumeClaimInfo{PersistentVolumeClaimLister: c.pVCLister},
|
||||||
|
@@ -6,10 +6,10 @@ go_library(
|
|||||||
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity",
|
importpath = "k8s.io/kubernetes/pkg/scheduler/framework/plugins/interpodaffinity",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/scheduler/algorithm:go_default_library",
|
|
||||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||||
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
||||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||||
|
"//pkg/scheduler/listers:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
],
|
],
|
||||||
@@ -23,8 +23,8 @@ go_test(
|
|||||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||||
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
"//pkg/scheduler/framework/plugins/migration:go_default_library",
|
||||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||||
|
"//pkg/scheduler/listers/fake:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
"//pkg/scheduler/testing:go_default_library",
|
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
],
|
],
|
||||||
|
@@ -21,10 +21,10 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"k8s.io/api/core/v1"
|
"k8s.io/api/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
|
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
"k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -54,8 +54,8 @@ func (pl *InterPodAffinity) Filter(ctx context.Context, cycleState *framework.Cy
|
|||||||
}
|
}
|
||||||
|
|
||||||
// New initializes a new plugin and returns it.
|
// New initializes a new plugin and returns it.
|
||||||
func New(nodeInfo predicates.NodeInfo, podLister algorithm.PodLister) framework.Plugin {
|
func New(nodeLister schedulerlisters.NodeLister, podLister schedulerlisters.PodLister) framework.Plugin {
|
||||||
return &InterPodAffinity{
|
return &InterPodAffinity{
|
||||||
predicate: predicates.NewPodAffinityPredicate(nodeInfo, podLister),
|
predicate: predicates.NewPodAffinityPredicate(nodeLister, podLister),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@@ -26,8 +26,8 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/migration"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
|
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
st "k8s.io/kubernetes/pkg/scheduler/testing"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -739,7 +739,7 @@ func TestSingleNode(t *testing.T) {
|
|||||||
state := framework.NewCycleState()
|
state := framework.NewCycleState()
|
||||||
state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta})
|
state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta})
|
||||||
|
|
||||||
p := New(predicates.FakeNodeInfo(*node), st.FakePodLister(test.pods))
|
p := New(fakelisters.NodeLister([]*v1.Node{node}), fakelisters.PodLister(test.pods))
|
||||||
gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), state, test.pod, nodeInfoMap[node.Name])
|
gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), state, test.pod, nodeInfoMap[node.Name])
|
||||||
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
if !reflect.DeepEqual(gotStatus, test.wantStatus) {
|
||||||
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
t.Errorf("status does not match: %v, want: %v", gotStatus, test.wantStatus)
|
||||||
@@ -1432,18 +1432,14 @@ func TestMultipleNodes(t *testing.T) {
|
|||||||
|
|
||||||
for indexTest, test := range tests {
|
for indexTest, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
nodeList := make([]v1.Node, len(test.nodes))
|
nodeListInfo := fakelisters.NodeLister(test.nodes)
|
||||||
for _, node := range test.nodes {
|
|
||||||
nodeList = append(nodeList, *node)
|
|
||||||
}
|
|
||||||
nodeListInfo := predicates.FakeNodeListInfo(nodeList)
|
|
||||||
nodeInfoMap := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
nodeInfoMap := schedulernodeinfo.CreateNodeNameToInfoMap(test.pods, test.nodes)
|
||||||
for indexNode, node := range test.nodes {
|
for indexNode, node := range test.nodes {
|
||||||
meta := predicates.GetPredicateMetadata(test.pod, nodeInfoMap)
|
meta := predicates.GetPredicateMetadata(test.pod, nodeInfoMap)
|
||||||
state := framework.NewCycleState()
|
state := framework.NewCycleState()
|
||||||
state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta})
|
state.Write(migration.PredicatesStateKey, &migration.PredicatesStateData{Reference: meta})
|
||||||
|
|
||||||
p := New(nodeListInfo, st.FakePodLister(test.pods))
|
p := New(nodeListInfo, fakelisters.PodLister(test.pods))
|
||||||
gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), state, test.pod, nodeInfoMap[node.Name])
|
gotStatus := p.(framework.FilterPlugin).Filter(context.Background(), state, test.pod, nodeInfoMap[node.Name])
|
||||||
if !reflect.DeepEqual(gotStatus, test.wantStatuses[indexNode]) {
|
if !reflect.DeepEqual(gotStatus, test.wantStatuses[indexNode]) {
|
||||||
t.Errorf("index: %d status does not match: %v, want: %v", indexTest, gotStatus, test.wantStatuses[indexNode])
|
t.Errorf("index: %d status does not match: %v, want: %v", indexTest, gotStatus, test.wantStatuses[indexNode])
|
||||||
|
@@ -35,7 +35,9 @@ go_test(
|
|||||||
"//pkg/features:go_default_library",
|
"//pkg/features:go_default_library",
|
||||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||||
|
"//pkg/scheduler/listers/fake:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
|
"//pkg/volume/util:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||||
|
@@ -18,14 +18,15 @@ package nodevolumelimits
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"fmt"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/api/core/v1"
|
|
||||||
"k8s.io/api/storage/v1beta1"
|
"k8s.io/api/storage/v1beta1"
|
||||||
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
@@ -33,10 +34,10 @@ import (
|
|||||||
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
csilibplugins "k8s.io/csi-translation-lib/plugins"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
|
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
|
volumeutil "k8s.io/kubernetes/pkg/volume/util"
|
||||||
utilpointer "k8s.io/utils/pointer"
|
utilpointer "k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -47,6 +48,22 @@ const (
|
|||||||
hostpathInTreePluginName = "kubernetes.io/hostpath"
|
hostpathInTreePluginName = "kubernetes.io/hostpath"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// getVolumeLimitKey returns a ResourceName by filter type
|
||||||
|
func getVolumeLimitKey(filterType string) v1.ResourceName {
|
||||||
|
switch filterType {
|
||||||
|
case predicates.EBSVolumeFilterType:
|
||||||
|
return v1.ResourceName(volumeutil.EBSVolumeLimitKey)
|
||||||
|
case predicates.GCEPDVolumeFilterType:
|
||||||
|
return v1.ResourceName(volumeutil.GCEVolumeLimitKey)
|
||||||
|
case predicates.AzureDiskVolumeFilterType:
|
||||||
|
return v1.ResourceName(volumeutil.AzureVolumeLimitKey)
|
||||||
|
case predicates.CinderVolumeFilterType:
|
||||||
|
return v1.ResourceName(volumeutil.CinderVolumeLimitKey)
|
||||||
|
default:
|
||||||
|
return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestCSILimits(t *testing.T) {
|
func TestCSILimits(t *testing.T) {
|
||||||
runningPod := &v1.Pod{
|
runningPod := &v1.Pod{
|
||||||
Spec: v1.PodSpec{
|
Spec: v1.PodSpec{
|
||||||
@@ -455,8 +472,8 @@ func TestCSILimits(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFakeCSIPVInfo(volumeName string, driverNames ...string) predicates.FakePersistentVolumeInfo {
|
func getFakeCSIPVInfo(volumeName string, driverNames ...string) fakelisters.PersistentVolumeInfo {
|
||||||
pvInfos := predicates.FakePersistentVolumeInfo{}
|
pvInfos := fakelisters.PersistentVolumeInfo{}
|
||||||
for _, driver := range driverNames {
|
for _, driver := range driverNames {
|
||||||
for j := 0; j < 4; j++ {
|
for j := 0; j < 4; j++ {
|
||||||
volumeHandle := fmt.Sprintf("%s-%s-%d", volumeName, driver, j)
|
volumeHandle := fmt.Sprintf("%s-%s-%d", volumeName, driver, j)
|
||||||
@@ -500,8 +517,8 @@ func getFakeCSIPVInfo(volumeName string, driverNames ...string) predicates.FakeP
|
|||||||
return pvInfos
|
return pvInfos
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFakeCSIPVCInfo(volumeName, scName string, driverNames ...string) predicates.FakePersistentVolumeClaimInfo {
|
func getFakeCSIPVCInfo(volumeName, scName string, driverNames ...string) fakelisters.PersistentVolumeClaimInfo {
|
||||||
pvcInfos := predicates.FakePersistentVolumeClaimInfo{}
|
pvcInfos := fakelisters.PersistentVolumeClaimInfo{}
|
||||||
for _, driver := range driverNames {
|
for _, driver := range driverNames {
|
||||||
for j := 0; j < 4; j++ {
|
for j := 0; j < 4; j++ {
|
||||||
v := fmt.Sprintf("%s-%s-%d", volumeName, driver, j)
|
v := fmt.Sprintf("%s-%s-%d", volumeName, driver, j)
|
||||||
@@ -543,8 +560,8 @@ func enableMigrationOnNode(csiNode *storagev1beta1.CSINode, pluginName string) {
|
|||||||
csiNode.Annotations = nodeInfoAnnotations
|
csiNode.Annotations = nodeInfoAnnotations
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFakeCSIStorageClassInfo(scName, provisionerName string) predicates.FakeStorageClassInfo {
|
func getFakeCSIStorageClassInfo(scName, provisionerName string) fakelisters.StorageClassInfo {
|
||||||
return predicates.FakeStorageClassInfo{
|
return fakelisters.StorageClassInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: scName},
|
ObjectMeta: metav1.ObjectMeta{Name: scName},
|
||||||
Provisioner: provisionerName,
|
Provisioner: provisionerName,
|
||||||
@@ -552,11 +569,11 @@ func getFakeCSIStorageClassInfo(scName, provisionerName string) predicates.FakeS
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFakeCSINodeInfo(csiNode *storagev1beta1.CSINode) predicates.FakeCSINodeInfo {
|
func getFakeCSINodeInfo(csiNode *storagev1beta1.CSINode) fakelisters.CSINodeInfo {
|
||||||
if csiNode != nil {
|
if csiNode != nil {
|
||||||
return predicates.FakeCSINodeInfo(*csiNode)
|
return fakelisters.CSINodeInfo(*csiNode)
|
||||||
}
|
}
|
||||||
return predicates.FakeCSINodeInfo{}
|
return fakelisters.CSINodeInfo{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*schedulernodeinfo.NodeInfo, *v1beta1.CSINode) {
|
func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int64, driverNames ...string) (*schedulernodeinfo.NodeInfo, *v1beta1.CSINode) {
|
||||||
@@ -571,7 +588,7 @@ func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int
|
|||||||
|
|
||||||
addLimitToNode := func() {
|
addLimitToNode := func() {
|
||||||
for _, driver := range driverNames {
|
for _, driver := range driverNames {
|
||||||
node.Status.Allocatable[predicates.GetVolumeLimitKey(driver)] = *resource.NewQuantity(limit, resource.DecimalSI)
|
node.Status.Allocatable[getVolumeLimitKey(driver)] = *resource.NewQuantity(limit, resource.DecimalSI)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@@ -30,6 +30,7 @@ import (
|
|||||||
"k8s.io/kubernetes/pkg/features"
|
"k8s.io/kubernetes/pkg/features"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
|
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||||
utilpointer "k8s.io/utils/pointer"
|
utilpointer "k8s.io/utils/pointer"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -484,8 +485,8 @@ func TestEBSLimits(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFakePVCInfo(filterName string) predicates.FakePersistentVolumeClaimInfo {
|
func getFakePVCInfo(filterName string) fakelisters.PersistentVolumeClaimInfo {
|
||||||
return predicates.FakePersistentVolumeClaimInfo{
|
return fakelisters.PersistentVolumeClaimInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
||||||
Spec: v1.PersistentVolumeClaimSpec{
|
Spec: v1.PersistentVolumeClaimSpec{
|
||||||
@@ -545,8 +546,8 @@ func getFakePVCInfo(filterName string) predicates.FakePersistentVolumeClaimInfo
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getFakePVInfo(filterName string) predicates.FakePersistentVolumeInfo {
|
func getFakePVInfo(filterName string) fakelisters.PersistentVolumeInfo {
|
||||||
return predicates.FakePersistentVolumeInfo{
|
return fakelisters.PersistentVolumeInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
ObjectMeta: metav1.ObjectMeta{Name: "some" + filterName + "Vol"},
|
||||||
Spec: v1.PersistentVolumeSpec{
|
Spec: v1.PersistentVolumeSpec{
|
||||||
|
@@ -22,6 +22,7 @@ go_test(
|
|||||||
deps = [
|
deps = [
|
||||||
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
"//pkg/scheduler/algorithm/predicates:go_default_library",
|
||||||
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
"//pkg/scheduler/framework/v1alpha1:go_default_library",
|
||||||
|
"//pkg/scheduler/listers/fake:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||||
|
@@ -26,6 +26,7 @@ import (
|
|||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
"k8s.io/kubernetes/pkg/scheduler/algorithm/predicates"
|
||||||
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
|
||||||
|
fakelisters "k8s.io/kubernetes/pkg/scheduler/listers/fake"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -48,7 +49,7 @@ func createPodWithVolume(pod, pv, pvc string) *v1.Pod {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestSingleZone(t *testing.T) {
|
func TestSingleZone(t *testing.T) {
|
||||||
pvInfo := predicates.FakePersistentVolumeInfo{
|
pvInfo := fakelisters.PersistentVolumeInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}},
|
ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}},
|
||||||
},
|
},
|
||||||
@@ -60,7 +61,7 @@ func TestSingleZone(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pvcInfo := predicates.FakePersistentVolumeClaimInfo{
|
pvcInfo := fakelisters.PersistentVolumeClaimInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"},
|
ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"},
|
||||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"},
|
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"},
|
||||||
@@ -166,7 +167,7 @@ func TestSingleZone(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestMultiZone(t *testing.T) {
|
func TestMultiZone(t *testing.T) {
|
||||||
pvInfo := predicates.FakePersistentVolumeInfo{
|
pvInfo := fakelisters.PersistentVolumeInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}},
|
ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}},
|
||||||
},
|
},
|
||||||
@@ -178,7 +179,7 @@ func TestMultiZone(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pvcInfo := predicates.FakePersistentVolumeClaimInfo{
|
pvcInfo := fakelisters.PersistentVolumeClaimInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"},
|
ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"},
|
||||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"},
|
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"},
|
||||||
@@ -259,7 +260,7 @@ func TestWithBinding(t *testing.T) {
|
|||||||
classImmediate = "Class_Immediate"
|
classImmediate = "Class_Immediate"
|
||||||
)
|
)
|
||||||
|
|
||||||
classInfo := predicates.FakeStorageClassInfo{
|
classInfo := fakelisters.StorageClassInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: classImmediate},
|
ObjectMeta: metav1.ObjectMeta{Name: classImmediate},
|
||||||
},
|
},
|
||||||
@@ -269,13 +270,13 @@ func TestWithBinding(t *testing.T) {
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pvInfo := predicates.FakePersistentVolumeInfo{
|
pvInfo := fakelisters.PersistentVolumeInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}},
|
ObjectMeta: metav1.ObjectMeta{Name: "Vol_1", Labels: map[string]string{v1.LabelZoneFailureDomain: "us-west1-a"}},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
pvcInfo := predicates.FakePersistentVolumeClaimInfo{
|
pvcInfo := fakelisters.PersistentVolumeClaimInfo{
|
||||||
{
|
{
|
||||||
ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"},
|
ObjectMeta: metav1.ObjectMeta{Name: "PVC_1", Namespace: "default"},
|
||||||
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"},
|
Spec: v1.PersistentVolumeClaimSpec{VolumeName: "Vol_1"},
|
||||||
|
5
pkg/scheduler/internal/cache/BUILD
vendored
5
pkg/scheduler/internal/cache/BUILD
vendored
@@ -8,10 +8,10 @@ go_library(
|
|||||||
"node_tree.go",
|
"node_tree.go",
|
||||||
],
|
],
|
||||||
importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache",
|
importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//pkg/scheduler:__subpackages__"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/features:go_default_library",
|
"//pkg/features:go_default_library",
|
||||||
"//pkg/scheduler/algorithm:go_default_library",
|
"//pkg/scheduler/listers:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
"//pkg/util/node:go_default_library",
|
"//pkg/util/node:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
@@ -37,7 +37,6 @@ go_test(
|
|||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/api/resource:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||||
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
|
||||||
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
"//staging/src/k8s.io/component-base/featuregate/testing:go_default_library",
|
||||||
|
9
pkg/scheduler/internal/cache/cache.go
vendored
9
pkg/scheduler/internal/cache/cache.go
vendored
@@ -26,11 +26,10 @@ import (
|
|||||||
"k8s.io/apimachinery/pkg/util/sets"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
"k8s.io/kubernetes/pkg/features"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
|
||||||
|
|
||||||
"k8s.io/klog"
|
"k8s.io/klog"
|
||||||
|
"k8s.io/kubernetes/pkg/features"
|
||||||
|
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||||
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -254,7 +253,7 @@ func (cache *schedulerCache) List(selector labels.Selector) ([]*v1.Pod, error) {
|
|||||||
return cache.FilteredList(alwaysTrue, selector)
|
return cache.FilteredList(alwaysTrue, selector)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cache *schedulerCache) FilteredList(podFilter algorithm.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
func (cache *schedulerCache) FilteredList(podFilter schedulerlisters.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
||||||
cache.mu.RLock()
|
cache.mu.RLock()
|
||||||
defer cache.mu.RUnlock()
|
defer cache.mu.RUnlock()
|
||||||
// podFilter is expected to return true for most or all of the pods. We
|
// podFilter is expected to return true for most or all of the pods. We
|
||||||
|
9
pkg/scheduler/internal/cache/cache_test.go
vendored
9
pkg/scheduler/internal/cache/cache_test.go
vendored
@@ -26,7 +26,6 @@ import (
|
|||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||||
@@ -1376,14 +1375,6 @@ func compareCacheWithNodeInfoSnapshot(cache *schedulerCache, snapshot *scheduler
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkList1kNodes30kPods(b *testing.B) {
|
|
||||||
cache := setupCacheOf1kNodes30kPods(b)
|
|
||||||
b.ResetTimer()
|
|
||||||
for n := 0; n < b.N; n++ {
|
|
||||||
cache.List(labels.Everything())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func BenchmarkUpdate1kNodes30kPods(b *testing.B) {
|
func BenchmarkUpdate1kNodes30kPods(b *testing.B) {
|
||||||
// Enable volumesOnNodeForBalancing to do balanced resource allocation
|
// Enable volumesOnNodeForBalancing to do balanced resource allocation
|
||||||
defer featuregatetesting.SetFeatureGateDuringTest(nil, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
|
defer featuregatetesting.SetFeatureGateDuringTest(nil, utilfeature.DefaultFeatureGate, features.BalanceAttachedNodeVolumes, true)()
|
||||||
|
2
pkg/scheduler/internal/cache/fake/BUILD
vendored
2
pkg/scheduler/internal/cache/fake/BUILD
vendored
@@ -6,8 +6,8 @@ go_library(
|
|||||||
importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake",
|
importpath = "k8s.io/kubernetes/pkg/scheduler/internal/cache/fake",
|
||||||
visibility = ["//pkg/scheduler:__subpackages__"],
|
visibility = ["//pkg/scheduler:__subpackages__"],
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/scheduler/algorithm:go_default_library",
|
|
||||||
"//pkg/scheduler/internal/cache:go_default_library",
|
"//pkg/scheduler/internal/cache:go_default_library",
|
||||||
|
"//pkg/scheduler/listers:go_default_library",
|
||||||
"//pkg/scheduler/nodeinfo:go_default_library",
|
"//pkg/scheduler/nodeinfo:go_default_library",
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
|
@@ -19,8 +19,8 @@ package fake
|
|||||||
import (
|
import (
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
|
||||||
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
internalcache "k8s.io/kubernetes/pkg/scheduler/internal/cache"
|
||||||
|
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -84,7 +84,7 @@ func (c *Cache) UpdateNodeInfoSnapshot(nodeSnapshot *schedulernodeinfo.Snapshot)
|
|||||||
func (c *Cache) List(s labels.Selector) ([]*v1.Pod, error) { return nil, nil }
|
func (c *Cache) List(s labels.Selector) ([]*v1.Pod, error) { return nil, nil }
|
||||||
|
|
||||||
// FilteredList is a fake method for testing.
|
// FilteredList is a fake method for testing.
|
||||||
func (c *Cache) FilteredList(filter algorithm.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
func (c *Cache) FilteredList(filter schedulerlisters.PodFilter, selector labels.Selector) ([]*v1.Pod, error) {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
4
pkg/scheduler/internal/cache/interface.go
vendored
4
pkg/scheduler/internal/cache/interface.go
vendored
@@ -18,7 +18,7 @@ package cache
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||||
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
schedulernodeinfo "k8s.io/kubernetes/pkg/scheduler/nodeinfo"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -57,7 +57,7 @@ import (
|
|||||||
// - Both "Expired" and "Deleted" are valid end states. In case of some problems, e.g. network issue,
|
// - Both "Expired" and "Deleted" are valid end states. In case of some problems, e.g. network issue,
|
||||||
// a pod might have changed its state (e.g. added and deleted) without delivering notification to the cache.
|
// a pod might have changed its state (e.g. added and deleted) without delivering notification to the cache.
|
||||||
type Cache interface {
|
type Cache interface {
|
||||||
algorithm.PodLister
|
schedulerlisters.PodLister
|
||||||
|
|
||||||
// AssumePod assumes a pod scheduled and aggregates the pod's information into its node.
|
// AssumePod assumes a pod scheduled and aggregates the pod's information into its node.
|
||||||
// The implementation also decides the policy to expire pod before being confirmed (receiving Add event).
|
// The implementation also decides the policy to expire pod before being confirmed (receiving Add event).
|
||||||
|
29
pkg/scheduler/listers/BUILD
Normal file
29
pkg/scheduler/listers/BUILD
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = ["listers.go"],
|
||||||
|
importpath = "k8s.io/kubernetes/pkg/scheduler/listers",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = [
|
||||||
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [
|
||||||
|
":package-srcs",
|
||||||
|
"//pkg/scheduler/listers/fake:all-srcs",
|
||||||
|
],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
33
pkg/scheduler/listers/fake/BUILD
Normal file
33
pkg/scheduler/listers/fake/BUILD
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "go_default_library",
|
||||||
|
srcs = ["listers.go"],
|
||||||
|
importpath = "k8s.io/kubernetes/pkg/scheduler/listers/fake",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = [
|
||||||
|
"//pkg/scheduler/listers:go_default_library",
|
||||||
|
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/api/storage/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/api/storage/v1beta1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||||
|
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
|
||||||
|
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "package-srcs",
|
||||||
|
srcs = glob(["**"]),
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:private"],
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "all-srcs",
|
||||||
|
srcs = [":package-srcs"],
|
||||||
|
tags = ["automanaged"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
299
pkg/scheduler/listers/fake/listers.go
Normal file
299
pkg/scheduler/listers/fake/listers.go
Normal file
@@ -0,0 +1,299 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2017 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package fake
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
storagev1 "k8s.io/api/storage/v1"
|
||||||
|
storagev1beta1 "k8s.io/api/storage/v1beta1"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
appslisters "k8s.io/client-go/listers/apps/v1"
|
||||||
|
corelisters "k8s.io/client-go/listers/core/v1"
|
||||||
|
schedulerlisters "k8s.io/kubernetes/pkg/scheduler/listers"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ schedulerlisters.PodLister = &PodLister{}
|
||||||
|
|
||||||
|
// PodLister implements PodLister on an []v1.Pods for test purposes.
|
||||||
|
type PodLister []*v1.Pod
|
||||||
|
|
||||||
|
// List returns []*v1.Pod matching a query.
|
||||||
|
func (f PodLister) List(s labels.Selector) (selected []*v1.Pod, err error) {
|
||||||
|
for _, pod := range f {
|
||||||
|
if s.Matches(labels.Set(pod.Labels)) {
|
||||||
|
selected = append(selected, pod)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return selected, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FilteredList returns pods matching a pod filter and a label selector.
|
||||||
|
func (f PodLister) FilteredList(podFilter schedulerlisters.PodFilter, s labels.Selector) (selected []*v1.Pod, err error) {
|
||||||
|
for _, pod := range f {
|
||||||
|
if podFilter(pod) && s.Matches(labels.Set(pod.Labels)) {
|
||||||
|
selected = append(selected, pod)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return selected, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ corelisters.ServiceLister = &ServiceLister{}
|
||||||
|
|
||||||
|
// ServiceLister implements ServiceLister on []v1.Service for test purposes.
|
||||||
|
type ServiceLister []*v1.Service
|
||||||
|
|
||||||
|
// Services returns nil.
|
||||||
|
func (f ServiceLister) Services(namespace string) corelisters.ServiceNamespaceLister {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// List returns v1.ServiceList, the list of all services.
|
||||||
|
func (f ServiceLister) List(labels.Selector) ([]*v1.Service, error) {
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPodServices gets the services that have the selector that match the labels on the given pod.
|
||||||
|
func (f ServiceLister) GetPodServices(pod *v1.Pod) (services []*v1.Service, err error) {
|
||||||
|
var selector labels.Selector
|
||||||
|
|
||||||
|
for i := range f {
|
||||||
|
service := f[i]
|
||||||
|
// consider only services that are in the same namespace as the pod
|
||||||
|
if service.Namespace != pod.Namespace {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
selector = labels.Set(service.Spec.Selector).AsSelectorPreValidated()
|
||||||
|
if selector.Matches(labels.Set(pod.Labels)) {
|
||||||
|
services = append(services, service)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ corelisters.ReplicationControllerLister = &ControllerLister{}
|
||||||
|
|
||||||
|
// ControllerLister implements ControllerLister on []v1.ReplicationController for test purposes.
|
||||||
|
type ControllerLister []*v1.ReplicationController
|
||||||
|
|
||||||
|
// List returns []v1.ReplicationController, the list of all ReplicationControllers.
|
||||||
|
func (f ControllerLister) List(labels.Selector) ([]*v1.ReplicationController, error) {
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPodControllers gets the ReplicationControllers that have the selector that match the labels on the given pod
|
||||||
|
func (f ControllerLister) GetPodControllers(pod *v1.Pod) (controllers []*v1.ReplicationController, err error) {
|
||||||
|
var selector labels.Selector
|
||||||
|
|
||||||
|
for i := range f {
|
||||||
|
controller := f[i]
|
||||||
|
if controller.Namespace != pod.Namespace {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
selector = labels.Set(controller.Spec.Selector).AsSelectorPreValidated()
|
||||||
|
if selector.Matches(labels.Set(pod.Labels)) {
|
||||||
|
controllers = append(controllers, controller)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(controllers) == 0 {
|
||||||
|
err = fmt.Errorf("Could not find Replication Controller for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplicationControllers returns nil
|
||||||
|
func (f ControllerLister) ReplicationControllers(namespace string) corelisters.ReplicationControllerNamespaceLister {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ appslisters.ReplicaSetLister = &ReplicaSetLister{}
|
||||||
|
|
||||||
|
// ReplicaSetLister implements ControllerLister on []extensions.ReplicaSet for test purposes.
|
||||||
|
type ReplicaSetLister []*appsv1.ReplicaSet
|
||||||
|
|
||||||
|
// List returns replica sets.
|
||||||
|
func (f ReplicaSetLister) List(labels.Selector) ([]*appsv1.ReplicaSet, error) {
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPodReplicaSets gets the ReplicaSets that have the selector that match the labels on the given pod
|
||||||
|
func (f ReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*appsv1.ReplicaSet, err error) {
|
||||||
|
var selector labels.Selector
|
||||||
|
|
||||||
|
for _, rs := range f {
|
||||||
|
if rs.Namespace != pod.Namespace {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
selector, err = metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if selector.Matches(labels.Set(pod.Labels)) {
|
||||||
|
rss = append(rss, rs)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(rss) == 0 {
|
||||||
|
err = fmt.Errorf("Could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplicaSets returns nil
|
||||||
|
func (f ReplicaSetLister) ReplicaSets(namespace string) appslisters.ReplicaSetNamespaceLister {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ appslisters.StatefulSetLister = &StatefulSetLister{}
|
||||||
|
|
||||||
|
// StatefulSetLister implements ControllerLister on []appsv1.StatefulSet for testing purposes.
|
||||||
|
type StatefulSetLister []*appsv1.StatefulSet
|
||||||
|
|
||||||
|
// List returns stateful sets.
|
||||||
|
func (f StatefulSetLister) List(labels.Selector) ([]*appsv1.StatefulSet, error) {
|
||||||
|
return f, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetPodStatefulSets gets the StatefulSets that have the selector that match the labels on the given pod.
|
||||||
|
func (f StatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*appsv1.StatefulSet, err error) {
|
||||||
|
var selector labels.Selector
|
||||||
|
|
||||||
|
for _, ss := range f {
|
||||||
|
if ss.Namespace != pod.Namespace {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
selector, err = metav1.LabelSelectorAsSelector(ss.Spec.Selector)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if selector.Matches(labels.Set(pod.Labels)) {
|
||||||
|
sss = append(sss, ss)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if len(sss) == 0 {
|
||||||
|
err = fmt.Errorf("Could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// StatefulSets returns nil
|
||||||
|
func (f StatefulSetLister) StatefulSets(namespace string) appslisters.StatefulSetNamespaceLister {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PersistentVolumeClaimLister implements PersistentVolumeClaimLister on []*v1.PersistentVolumeClaim for test purposes.
|
||||||
|
type PersistentVolumeClaimLister []*v1.PersistentVolumeClaim
|
||||||
|
|
||||||
|
var _ corelisters.PersistentVolumeClaimLister = PersistentVolumeClaimLister{}
|
||||||
|
|
||||||
|
// List returns not implemented error.
|
||||||
|
func (f PersistentVolumeClaimLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) {
|
||||||
|
return nil, fmt.Errorf("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// PersistentVolumeClaims returns a fake PersistentVolumeClaimLister object.
|
||||||
|
func (f PersistentVolumeClaimLister) PersistentVolumeClaims(namespace string) corelisters.PersistentVolumeClaimNamespaceLister {
|
||||||
|
return &persistentVolumeClaimNamespaceLister{
|
||||||
|
pvcs: f,
|
||||||
|
namespace: namespace,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// persistentVolumeClaimNamespaceLister is implementation of PersistentVolumeClaimNamespaceLister returned by List() above.
|
||||||
|
type persistentVolumeClaimNamespaceLister struct {
|
||||||
|
pvcs []*v1.PersistentVolumeClaim
|
||||||
|
namespace string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *persistentVolumeClaimNamespaceLister) Get(name string) (*v1.PersistentVolumeClaim, error) {
|
||||||
|
for _, pvc := range f.pvcs {
|
||||||
|
if pvc.Name == name && pvc.Namespace == f.namespace {
|
||||||
|
return pvc, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("persistentvolumeclaim %q not found", name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f persistentVolumeClaimNamespaceLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) {
|
||||||
|
return nil, fmt.Errorf("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
// PersistentVolumeClaimInfo declares a []v1.PersistentVolumeClaim type for testing.
|
||||||
|
type PersistentVolumeClaimInfo []v1.PersistentVolumeClaim
|
||||||
|
|
||||||
|
// GetPersistentVolumeClaimInfo gets PVC matching the namespace and PVC ID.
|
||||||
|
func (pvcs PersistentVolumeClaimInfo) GetPersistentVolumeClaimInfo(namespace string, pvcID string) (*v1.PersistentVolumeClaim, error) {
|
||||||
|
for _, pvc := range pvcs {
|
||||||
|
if pvc.Name == pvcID && pvc.Namespace == namespace {
|
||||||
|
return &pvc, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("Unable to find persistent volume claim: %s/%s", namespace, pvcID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeLister declares a *v1.Node type for testing.
|
||||||
|
type NodeLister []*v1.Node
|
||||||
|
|
||||||
|
// GetNodeInfo returns a fake node object in the fake nodes.
|
||||||
|
func (nodes NodeLister) GetNodeInfo(nodeName string) (*v1.Node, error) {
|
||||||
|
for _, node := range nodes {
|
||||||
|
if node != nil && node.Name == nodeName {
|
||||||
|
return node, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("Unable to find node: %s", nodeName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CSINodeInfo declares a storagev1beta1.CSINode type for testing.
|
||||||
|
type CSINodeInfo storagev1beta1.CSINode
|
||||||
|
|
||||||
|
// GetCSINodeInfo returns a fake CSINode object.
|
||||||
|
func (n CSINodeInfo) GetCSINodeInfo(name string) (*storagev1beta1.CSINode, error) {
|
||||||
|
csiNode := storagev1beta1.CSINode(n)
|
||||||
|
return &csiNode, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PersistentVolumeInfo declares a []v1.PersistentVolume type for testing.
|
||||||
|
type PersistentVolumeInfo []v1.PersistentVolume
|
||||||
|
|
||||||
|
// GetPersistentVolumeInfo returns a fake PV object in the fake PVs by PV ID.
|
||||||
|
func (pvs PersistentVolumeInfo) GetPersistentVolumeInfo(pvID string) (*v1.PersistentVolume, error) {
|
||||||
|
for _, pv := range pvs {
|
||||||
|
if pv.Name == pvID {
|
||||||
|
return &pv, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("Unable to find persistent volume: %s", pvID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StorageClassInfo declares a []storagev1.StorageClass type for testing.
|
||||||
|
type StorageClassInfo []storagev1.StorageClass
|
||||||
|
|
||||||
|
// GetStorageClassInfo returns a fake storage class object in the fake storage classes by name.
|
||||||
|
func (classes StorageClassInfo) GetStorageClassInfo(name string) (*storagev1.StorageClass, error) {
|
||||||
|
for _, sc := range classes {
|
||||||
|
if sc.Name == name {
|
||||||
|
return &sc, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil, fmt.Errorf("Unable to find storage class: %s", name)
|
||||||
|
}
|
40
pkg/scheduler/listers/listers.go
Normal file
40
pkg/scheduler/listers/listers.go
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
/*
|
||||||
|
Copyright 2019 The Kubernetes Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package listers
|
||||||
|
|
||||||
|
import (
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PodFilter is a function to filter a pod. If pod passed return true else return false.
|
||||||
|
type PodFilter func(*v1.Pod) bool
|
||||||
|
|
||||||
|
// PodLister interface represents anything that can list pods for a scheduler.
|
||||||
|
type PodLister interface {
|
||||||
|
// Returns the list of pods.
|
||||||
|
List(labels.Selector) ([]*v1.Pod, error)
|
||||||
|
// This is similar to "List()", but the returned slice does not
|
||||||
|
// contain pods that don't pass `podFilter`.
|
||||||
|
FilteredList(podFilter PodFilter, selector labels.Selector) ([]*v1.Pod, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeLister interface represents anything that can list/get node object from node name.
|
||||||
|
type NodeLister interface {
|
||||||
|
// TODO(ahg-g): rename to Get and add a List interface.
|
||||||
|
GetNodeInfo(nodeName string) (*v1.Node, error)
|
||||||
|
}
|
@@ -733,8 +733,7 @@ func (sched *Scheduler) scheduleOne(ctx context.Context) {
|
|||||||
} else {
|
} else {
|
||||||
// Calculating nodeResourceString can be heavy. Avoid it if klog verbosity is below 2.
|
// Calculating nodeResourceString can be heavy. Avoid it if klog verbosity is below 2.
|
||||||
if klog.V(2) {
|
if klog.V(2) {
|
||||||
node, _ := sched.Cache().GetNodeInfo(scheduleResult.SuggestedHost)
|
klog.Infof("pod %v/%v is bound successfully on node %q, %d nodes evaluated, %d nodes were found feasible.", assumedPod.Namespace, assumedPod.Name, scheduleResult.SuggestedHost, scheduleResult.EvaluatedNodes, scheduleResult.FeasibleNodes)
|
||||||
klog.Infof("pod %v/%v is bound successfully on node %q, %d nodes evaluated, %d nodes were found feasible. Bound node resource: %q.", assumedPod.Namespace, assumedPod.Name, scheduleResult.SuggestedHost, scheduleResult.EvaluatedNodes, scheduleResult.FeasibleNodes, nodeResourceString(node))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
metrics.PodScheduleSuccesses.Inc()
|
metrics.PodScheduleSuccesses.Inc()
|
||||||
@@ -785,15 +784,3 @@ func (p *podPreemptorImpl) removeNominatedNodeName(pod *v1.Pod) error {
|
|||||||
}
|
}
|
||||||
return p.setNominatedNodeName(pod, "")
|
return p.setNominatedNodeName(pod, "")
|
||||||
}
|
}
|
||||||
|
|
||||||
// nodeResourceString returns a string representation of node resources.
|
|
||||||
func nodeResourceString(n *v1.Node) string {
|
|
||||||
if n == nil {
|
|
||||||
return "N/A"
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("Capacity: %s; Allocatable: %s.", resourceString(&n.Status.Capacity), resourceString(&n.Status.Allocatable))
|
|
||||||
}
|
|
||||||
|
|
||||||
func resourceString(r *v1.ResourceList) string {
|
|
||||||
return fmt.Sprintf("CPU<%s>|Memory<%s>|Pods<%s>|StorageEphemeral<%s>", r.Cpu().String(), r.Memory().String(), r.Pods().String(), r.StorageEphemeral().String())
|
|
||||||
}
|
|
||||||
|
@@ -5,19 +5,13 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
|||||||
go_library(
|
go_library(
|
||||||
name = "go_default_library",
|
name = "go_default_library",
|
||||||
srcs = [
|
srcs = [
|
||||||
"fake_lister.go",
|
|
||||||
"workload_prep.go",
|
"workload_prep.go",
|
||||||
"wrappers.go",
|
"wrappers.go",
|
||||||
],
|
],
|
||||||
importpath = "k8s.io/kubernetes/pkg/scheduler/testing",
|
importpath = "k8s.io/kubernetes/pkg/scheduler/testing",
|
||||||
deps = [
|
deps = [
|
||||||
"//pkg/scheduler/algorithm:go_default_library",
|
|
||||||
"//staging/src/k8s.io/api/apps/v1:go_default_library",
|
|
||||||
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
"//staging/src/k8s.io/api/core/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||||
"//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library",
|
|
||||||
"//staging/src/k8s.io/client-go/listers/apps/v1:go_default_library",
|
|
||||||
"//staging/src/k8s.io/client-go/listers/core/v1:go_default_library",
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@@ -1,236 +0,0 @@
|
|||||||
/*
|
|
||||||
Copyright 2014 The Kubernetes Authors.
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package testing
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
|
||||||
v1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
|
||||||
appslisters "k8s.io/client-go/listers/apps/v1"
|
|
||||||
corelisters "k8s.io/client-go/listers/core/v1"
|
|
||||||
"k8s.io/kubernetes/pkg/scheduler/algorithm"
|
|
||||||
)
|
|
||||||
|
|
||||||
var _ algorithm.PodLister = &FakePodLister{}
|
|
||||||
|
|
||||||
// FakePodLister implements PodLister on an []v1.Pods for test purposes.
|
|
||||||
type FakePodLister []*v1.Pod
|
|
||||||
|
|
||||||
// List returns []*v1.Pod matching a query.
|
|
||||||
func (f FakePodLister) List(s labels.Selector) (selected []*v1.Pod, err error) {
|
|
||||||
for _, pod := range f {
|
|
||||||
if s.Matches(labels.Set(pod.Labels)) {
|
|
||||||
selected = append(selected, pod)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return selected, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FilteredList returns pods matching a pod filter and a label selector.
|
|
||||||
func (f FakePodLister) FilteredList(podFilter algorithm.PodFilter, s labels.Selector) (selected []*v1.Pod, err error) {
|
|
||||||
for _, pod := range f {
|
|
||||||
if podFilter(pod) && s.Matches(labels.Set(pod.Labels)) {
|
|
||||||
selected = append(selected, pod)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return selected, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ corelisters.ServiceLister = &FakeServiceLister{}
|
|
||||||
|
|
||||||
// FakeServiceLister implements ServiceLister on []v1.Service for test purposes.
|
|
||||||
type FakeServiceLister []*v1.Service
|
|
||||||
|
|
||||||
// Services returns nil.
|
|
||||||
func (f FakeServiceLister) Services(namespace string) corelisters.ServiceNamespaceLister {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// List returns v1.ServiceList, the list of all services.
|
|
||||||
func (f FakeServiceLister) List(labels.Selector) ([]*v1.Service, error) {
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPodServices gets the services that have the selector that match the labels on the given pod.
|
|
||||||
func (f FakeServiceLister) GetPodServices(pod *v1.Pod) (services []*v1.Service, err error) {
|
|
||||||
var selector labels.Selector
|
|
||||||
|
|
||||||
for i := range f {
|
|
||||||
service := f[i]
|
|
||||||
// consider only services that are in the same namespace as the pod
|
|
||||||
if service.Namespace != pod.Namespace {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
selector = labels.Set(service.Spec.Selector).AsSelectorPreValidated()
|
|
||||||
if selector.Matches(labels.Set(pod.Labels)) {
|
|
||||||
services = append(services, service)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ corelisters.ReplicationControllerLister = &FakeControllerLister{}
|
|
||||||
|
|
||||||
// FakeControllerLister implements ControllerLister on []v1.ReplicationController for test purposes.
|
|
||||||
type FakeControllerLister []*v1.ReplicationController
|
|
||||||
|
|
||||||
// List returns []v1.ReplicationController, the list of all ReplicationControllers.
|
|
||||||
func (f FakeControllerLister) List(labels.Selector) ([]*v1.ReplicationController, error) {
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPodControllers gets the ReplicationControllers that have the selector that match the labels on the given pod
|
|
||||||
func (f FakeControllerLister) GetPodControllers(pod *v1.Pod) (controllers []*v1.ReplicationController, err error) {
|
|
||||||
var selector labels.Selector
|
|
||||||
|
|
||||||
for i := range f {
|
|
||||||
controller := f[i]
|
|
||||||
if controller.Namespace != pod.Namespace {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
selector = labels.Set(controller.Spec.Selector).AsSelectorPreValidated()
|
|
||||||
if selector.Matches(labels.Set(pod.Labels)) {
|
|
||||||
controllers = append(controllers, controller)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(controllers) == 0 {
|
|
||||||
err = fmt.Errorf("Could not find Replication Controller for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReplicationControllers returns nil
|
|
||||||
func (f FakeControllerLister) ReplicationControllers(namespace string) corelisters.ReplicationControllerNamespaceLister {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ appslisters.ReplicaSetLister = &FakeReplicaSetLister{}
|
|
||||||
|
|
||||||
// FakeReplicaSetLister implements ControllerLister on []extensions.ReplicaSet for test purposes.
|
|
||||||
type FakeReplicaSetLister []*appsv1.ReplicaSet
|
|
||||||
|
|
||||||
// List returns replica sets.
|
|
||||||
func (f FakeReplicaSetLister) List(labels.Selector) ([]*appsv1.ReplicaSet, error) {
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPodReplicaSets gets the ReplicaSets that have the selector that match the labels on the given pod
|
|
||||||
func (f FakeReplicaSetLister) GetPodReplicaSets(pod *v1.Pod) (rss []*appsv1.ReplicaSet, err error) {
|
|
||||||
var selector labels.Selector
|
|
||||||
|
|
||||||
for _, rs := range f {
|
|
||||||
if rs.Namespace != pod.Namespace {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
selector, err = metav1.LabelSelectorAsSelector(rs.Spec.Selector)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if selector.Matches(labels.Set(pod.Labels)) {
|
|
||||||
rss = append(rss, rs)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(rss) == 0 {
|
|
||||||
err = fmt.Errorf("Could not find ReplicaSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReplicaSets returns nil
|
|
||||||
func (f FakeReplicaSetLister) ReplicaSets(namespace string) appslisters.ReplicaSetNamespaceLister {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var _ appslisters.StatefulSetLister = &FakeStatefulSetLister{}
|
|
||||||
|
|
||||||
// FakeStatefulSetLister implements ControllerLister on []appsv1.StatefulSet for testing purposes.
|
|
||||||
type FakeStatefulSetLister []*appsv1.StatefulSet
|
|
||||||
|
|
||||||
// List returns stateful sets.
|
|
||||||
func (f FakeStatefulSetLister) List(labels.Selector) ([]*appsv1.StatefulSet, error) {
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetPodStatefulSets gets the StatefulSets that have the selector that match the labels on the given pod.
|
|
||||||
func (f FakeStatefulSetLister) GetPodStatefulSets(pod *v1.Pod) (sss []*appsv1.StatefulSet, err error) {
|
|
||||||
var selector labels.Selector
|
|
||||||
|
|
||||||
for _, ss := range f {
|
|
||||||
if ss.Namespace != pod.Namespace {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
selector, err = metav1.LabelSelectorAsSelector(ss.Spec.Selector)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if selector.Matches(labels.Set(pod.Labels)) {
|
|
||||||
sss = append(sss, ss)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(sss) == 0 {
|
|
||||||
err = fmt.Errorf("Could not find StatefulSet for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// StatefulSets returns nil
|
|
||||||
func (f FakeStatefulSetLister) StatefulSets(namespace string) appslisters.StatefulSetNamespaceLister {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FakePersistentVolumeClaimLister implements PersistentVolumeClaimLister on []*v1.PersistentVolumeClaim for test purposes.
|
|
||||||
type FakePersistentVolumeClaimLister []*v1.PersistentVolumeClaim
|
|
||||||
|
|
||||||
var _ corelisters.PersistentVolumeClaimLister = FakePersistentVolumeClaimLister{}
|
|
||||||
|
|
||||||
// List returns not implemented error.
|
|
||||||
func (f FakePersistentVolumeClaimLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) {
|
|
||||||
return nil, fmt.Errorf("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
// PersistentVolumeClaims returns a FakePersistentVolumeClaimLister object.
|
|
||||||
func (f FakePersistentVolumeClaimLister) PersistentVolumeClaims(namespace string) corelisters.PersistentVolumeClaimNamespaceLister {
|
|
||||||
return &fakePersistentVolumeClaimNamespaceLister{
|
|
||||||
pvcs: f,
|
|
||||||
namespace: namespace,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// fakePersistentVolumeClaimNamespaceLister is implementation of PersistentVolumeClaimNamespaceLister returned by List() above.
|
|
||||||
type fakePersistentVolumeClaimNamespaceLister struct {
|
|
||||||
pvcs []*v1.PersistentVolumeClaim
|
|
||||||
namespace string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *fakePersistentVolumeClaimNamespaceLister) Get(name string) (*v1.PersistentVolumeClaim, error) {
|
|
||||||
for _, pvc := range f.pvcs {
|
|
||||||
if pvc.Name == name && pvc.Namespace == f.namespace {
|
|
||||||
return pvc, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("persistentvolumeclaim %q not found", name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f fakePersistentVolumeClaimNamespaceLister) List(selector labels.Selector) (ret []*v1.PersistentVolumeClaim, err error) {
|
|
||||||
return nil, fmt.Errorf("not implemented")
|
|
||||||
}
|
|
Reference in New Issue
Block a user