endpoints: remove obsolete ServiceSelectorCache
Since https://github.com/kubernetes/kubernetes/pull/112648, we can efficiently handle selectors from pre-existing `map[string]string`, making the cache obsolete. Benchmark: ``` name old time/op new time/op delta GetPodServiceMemberships-48 189µs ± 1% 193µs ± 1% +2.10% (p=0.000 n=10+10) name old alloc/op new alloc/op delta GetPodServiceMemberships-48 59.0kB ± 0% 58.9kB ± 0% -0.09% (p=0.000 n=9+9) name old allocs/op new allocs/op delta GetPodServiceMemberships-48 1.02k ± 0% 1.02k ± 0% ~ (all equal) ```
This commit is contained in:
@@ -22,7 +22,6 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
discovery "k8s.io/api/discovery/v1"
|
||||
@@ -49,54 +48,15 @@ var semanticIgnoreResourceVersion = conversion.EqualitiesOrDie(
|
||||
},
|
||||
)
|
||||
|
||||
// ServiceSelectorCache is a cache of service selectors to avoid high CPU consumption caused by frequent calls to AsSelectorPreValidated (see #73527)
|
||||
type ServiceSelectorCache struct {
|
||||
lock sync.RWMutex
|
||||
cache map[string]labels.Selector
|
||||
}
|
||||
|
||||
// NewServiceSelectorCache init ServiceSelectorCache for both endpoint controller and endpointSlice controller.
|
||||
func NewServiceSelectorCache() *ServiceSelectorCache {
|
||||
return &ServiceSelectorCache{
|
||||
cache: map[string]labels.Selector{},
|
||||
}
|
||||
}
|
||||
|
||||
// Get return selector and existence in ServiceSelectorCache by key.
|
||||
func (sc *ServiceSelectorCache) Get(key string) (labels.Selector, bool) {
|
||||
sc.lock.RLock()
|
||||
selector, ok := sc.cache[key]
|
||||
// fine-grained lock improves GetPodServiceMemberships performance(16.5%) than defer measured by BenchmarkGetPodServiceMemberships
|
||||
sc.lock.RUnlock()
|
||||
return selector, ok
|
||||
}
|
||||
|
||||
// Update can update or add a selector in ServiceSelectorCache while service's selector changed.
|
||||
func (sc *ServiceSelectorCache) Update(key string, rawSelector map[string]string) labels.Selector {
|
||||
sc.lock.Lock()
|
||||
defer sc.lock.Unlock()
|
||||
selector := labels.Set(rawSelector).AsSelectorPreValidated()
|
||||
sc.cache[key] = selector
|
||||
return selector
|
||||
}
|
||||
|
||||
// Delete can delete selector which exist in ServiceSelectorCache.
|
||||
func (sc *ServiceSelectorCache) Delete(key string) {
|
||||
sc.lock.Lock()
|
||||
defer sc.lock.Unlock()
|
||||
delete(sc.cache, key)
|
||||
}
|
||||
|
||||
// GetPodServiceMemberships returns a set of Service keys for Services that have
|
||||
// a selector matching the given pod.
|
||||
func (sc *ServiceSelectorCache) GetPodServiceMemberships(serviceLister v1listers.ServiceLister, pod *v1.Pod) (sets.String, error) {
|
||||
func GetPodServiceMemberships(serviceLister v1listers.ServiceLister, pod *v1.Pod) (sets.String, error) {
|
||||
set := sets.String{}
|
||||
services, err := serviceLister.Services(pod.Namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return set, err
|
||||
}
|
||||
|
||||
var selector labels.Selector
|
||||
for _, service := range services {
|
||||
if service.Spec.Selector == nil {
|
||||
// if the service has a nil selector this means selectors match nothing, not everything.
|
||||
@@ -106,13 +66,7 @@ func (sc *ServiceSelectorCache) GetPodServiceMemberships(serviceLister v1listers
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if v, ok := sc.Get(key); ok {
|
||||
selector = v
|
||||
} else {
|
||||
selector = sc.Update(key, service.Spec.Selector)
|
||||
}
|
||||
|
||||
if selector.Matches(labels.Set(pod.Labels)) {
|
||||
if labels.ValidatedSetSelector(service.Spec.Selector).Matches(labels.Set(pod.Labels)) {
|
||||
set.Insert(key)
|
||||
}
|
||||
}
|
||||
@@ -206,7 +160,7 @@ func podEndpointsChanged(oldPod, newPod *v1.Pod) (bool, bool) {
|
||||
|
||||
// GetServicesToUpdateOnPodChange returns a set of Service keys for Services
|
||||
// that have potentially been affected by a change to this pod.
|
||||
func GetServicesToUpdateOnPodChange(serviceLister v1listers.ServiceLister, selectorCache *ServiceSelectorCache, old, cur interface{}) sets.String {
|
||||
func GetServicesToUpdateOnPodChange(serviceLister v1listers.ServiceLister, old, cur interface{}) sets.String {
|
||||
newPod := cur.(*v1.Pod)
|
||||
oldPod := old.(*v1.Pod)
|
||||
if newPod.ResourceVersion == oldPod.ResourceVersion {
|
||||
@@ -222,14 +176,14 @@ func GetServicesToUpdateOnPodChange(serviceLister v1listers.ServiceLister, selec
|
||||
return sets.String{}
|
||||
}
|
||||
|
||||
services, err := selectorCache.GetPodServiceMemberships(serviceLister, newPod)
|
||||
services, err := GetPodServiceMemberships(serviceLister, newPod)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to get pod %s/%s's service memberships: %v", newPod.Namespace, newPod.Name, err))
|
||||
return sets.String{}
|
||||
}
|
||||
|
||||
if labelsChanged {
|
||||
oldServices, err := selectorCache.GetPodServiceMemberships(serviceLister, oldPod)
|
||||
oldServices, err := GetPodServiceMemberships(serviceLister, oldPod)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to get pod %s/%s's service memberships: %v", newPod.Namespace, newPod.Name, err))
|
||||
}
|
||||
|
@@ -18,14 +18,12 @@ package endpoint
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
discovery "k8s.io/api/discovery/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
@@ -330,7 +328,7 @@ func genSimpleSvc(namespace, name string) *v1.Service {
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceSelectorCache_GetPodServiceMemberships(t *testing.T) {
|
||||
func TestGetPodServiceMemberships(t *testing.T) {
|
||||
fakeInformerFactory := informers.NewSharedInformerFactory(&fake.Clientset{}, 0*time.Second)
|
||||
for i := 0; i < 3; i++ {
|
||||
service := &v1.Service{
|
||||
@@ -361,7 +359,6 @@ func TestServiceSelectorCache_GetPodServiceMemberships(t *testing.T) {
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
|
||||
cache := NewServiceSelectorCache()
|
||||
tests := []struct {
|
||||
name string
|
||||
pod *v1.Pod
|
||||
@@ -395,7 +392,7 @@ func TestServiceSelectorCache_GetPodServiceMemberships(t *testing.T) {
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
services, err := cache.GetPodServiceMemberships(fakeInformerFactory.Core().V1().Services().Lister(), test.pod)
|
||||
services, err := GetPodServiceMemberships(fakeInformerFactory.Core().V1().Services().Lister(), test.pod)
|
||||
if err != nil {
|
||||
t.Errorf("Error from cache.GetPodServiceMemberships: %v", err)
|
||||
} else if !services.Equal(test.expect) {
|
||||
@@ -405,57 +402,6 @@ func TestServiceSelectorCache_GetPodServiceMemberships(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestServiceSelectorCache_Update(t *testing.T) {
|
||||
var selectors []labels.Selector
|
||||
for i := 0; i < 5; i++ {
|
||||
selector := labels.Set(map[string]string{"app": fmt.Sprintf("test-%d", i)}).AsSelectorPreValidated()
|
||||
selectors = append(selectors, selector)
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
key string
|
||||
cache *ServiceSelectorCache
|
||||
update map[string]string
|
||||
expect labels.Selector
|
||||
}{
|
||||
{
|
||||
name: "add test/service-0",
|
||||
key: "test/service-0",
|
||||
cache: generateServiceSelectorCache(map[string]labels.Selector{}),
|
||||
update: map[string]string{"app": "test-0"},
|
||||
expect: selectors[0],
|
||||
},
|
||||
{
|
||||
name: "add test/service-1",
|
||||
key: "test/service-1",
|
||||
cache: generateServiceSelectorCache(map[string]labels.Selector{"test/service-0": selectors[0]}),
|
||||
update: map[string]string{"app": "test-1"},
|
||||
expect: selectors[1],
|
||||
},
|
||||
{
|
||||
name: "update test/service-2",
|
||||
key: "test/service-2",
|
||||
cache: generateServiceSelectorCache(map[string]labels.Selector{"test/service-2": selectors[2]}),
|
||||
update: map[string]string{"app": "test-0"},
|
||||
expect: selectors[0],
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
selector := test.cache.Update(test.key, test.update)
|
||||
if !reflect.DeepEqual(selector, test.expect) {
|
||||
t.Errorf("Expect selector %v , but got %v", test.expect, selector)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func generateServiceSelectorCache(cache map[string]labels.Selector) *ServiceSelectorCache {
|
||||
return &ServiceSelectorCache{
|
||||
cache: cache,
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGetPodServiceMemberships(b *testing.B) {
|
||||
// init fake service informer.
|
||||
fakeInformerFactory := informers.NewSharedInformerFactory(&fake.Clientset{}, 0*time.Second)
|
||||
@@ -484,11 +430,10 @@ func BenchmarkGetPodServiceMemberships(b *testing.B) {
|
||||
},
|
||||
}
|
||||
|
||||
cache := NewServiceSelectorCache()
|
||||
expect := sets.NewString("test/service-0")
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
services, err := cache.GetPodServiceMemberships(fakeInformerFactory.Core().V1().Services().Lister(), pod)
|
||||
services, err := GetPodServiceMemberships(fakeInformerFactory.Core().V1().Services().Lister(), pod)
|
||||
if err != nil {
|
||||
b.Fatalf("Error from GetPodServiceMemberships(): %v", err)
|
||||
}
|
||||
|
Reference in New Issue
Block a user