Rebased onto the latest changes to the scheduler code
This commit is contained in:
@@ -38,7 +38,7 @@ func affinityPredicates() util.StringSet {
|
||||
"NoDiskConflict",
|
||||
// Ensures that all pods within the same service are hosted on minions within the same region as defined by the "region" label
|
||||
factory.RegisterFitPredicate("ServiceAffinity", algorithm.NewServiceAffinityPredicate(factory.PodLister, factory.ServiceLister, factory.MinionLister, []string{"region"})),
|
||||
// Fit is defined based on the presence/absence of the "region" label on a minion, regardless of value.
|
||||
// Fit is defined based on the presence of the "region" label on a minion, regardless of value.
|
||||
factory.RegisterFitPredicate("NodeLabelPredicate", algorithm.NewNodeLabelPredicate(factory.MinionLister, []string{"region"}, true)),
|
||||
)
|
||||
}
|
||||
@@ -48,9 +48,8 @@ func affinityPriorities() util.StringSet {
|
||||
"LeastRequestedPriority",
|
||||
"ServiceSpreadingPriority",
|
||||
// spreads pods belonging to the same service across minions in different zones
|
||||
// region and zone can be nested infrastructure topology levels and defined by labels on minions
|
||||
factory.RegisterPriorityFunction("ZoneSpreadingPriority", algorithm.NewServiceAntiAffinityPriority(factory.ServiceLister, "zone"), 2),
|
||||
// Prioritize nodes based on the presence/absence of a label on a minion, regardless of value.
|
||||
// Prioritize nodes based on the presence of the "zone" label on a minion, regardless of value.
|
||||
factory.RegisterPriorityFunction("NodeLabelPriority", algorithm.NewNodeLabelPriority("zone", true), 1),
|
||||
)
|
||||
}
|
||||
|
@@ -187,12 +187,12 @@ func (factory *ConfigFactory) pollMinions() (cache.Enumerator, error) {
|
||||
return &nodeEnumerator{list}, nil
|
||||
}
|
||||
|
||||
// createServiceLW returns a listWatch that gets all changes to services.
|
||||
func (factory *ConfigFactory) createServiceLW() *listWatch {
|
||||
return &listWatch{
|
||||
client: factory.Client,
|
||||
fieldSelector: parseSelectorOrDie(""),
|
||||
resource: "services",
|
||||
// createServiceLW returns a cache.ListWatch that gets all changes to services.
|
||||
func (factory *ConfigFactory) createServiceLW() *cache.ListWatch {
|
||||
return &cache.ListWatch{
|
||||
Client: factory.Client,
|
||||
FieldSelector: parseSelectorOrDie(""),
|
||||
Resource: "services",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -226,40 +226,6 @@ type nodeEnumerator struct {
|
||||
*api.NodeList
|
||||
}
|
||||
|
||||
// storeToServiceLister turns a store into a service lister. The store must contain (only) services.
|
||||
type storeToServiceLister struct {
|
||||
cache.Store
|
||||
}
|
||||
|
||||
func (s *storeToServiceLister) ListServices() (services api.ServiceList, err error) {
|
||||
for _, m := range s.List() {
|
||||
services.Items = append(services.Items, *(m.(*api.Service)))
|
||||
}
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func (s *storeToServiceLister) GetPodServices(pod api.Pod) (services []api.Service, err error) {
|
||||
var selector labels.Selector
|
||||
var service api.Service
|
||||
|
||||
for _, m := range s.List() {
|
||||
service = *m.(*api.Service)
|
||||
// consider only services that are in the same namespace as the pod
|
||||
if service.Namespace != pod.Namespace {
|
||||
continue
|
||||
}
|
||||
selector = labels.Set(service.Spec.Selector).AsSelector()
|
||||
if selector.Matches(labels.Set(pod.Labels)) {
|
||||
services = append(services, service)
|
||||
}
|
||||
}
|
||||
if len(services) == 0 {
|
||||
err = fmt.Errorf("Could not find service for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Len returns the number of items in the node list.
|
||||
func (ne *nodeEnumerator) Len() int {
|
||||
if ne.NodeList == nil {
|
||||
|
Reference in New Issue
Block a user