Use shared informers for proxy endpoints and service configs
Use shared informers instead of creating local controllers/reflectors for the proxy's endpoints and service configs. This allows downstream integrators to pass in preexisting shared informers to save on memory & cpu usage. This also enables the cache mutation detector for kube-proxy for those presubmit jobs that already turn it on.
This commit is contained in:
@@ -17,11 +17,10 @@ go_library(
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion/core/internalversion:go_default_library",
|
||||
"//pkg/client/listers/core/internalversion:go_default_library",
|
||||
"//pkg/util/config:go_default_library",
|
||||
"//vendor:github.com/golang/glog",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/fields",
|
||||
"//vendor:k8s.io/apimachinery/pkg/labels",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/runtime",
|
||||
"//vendor:k8s.io/client-go/tools/cache",
|
||||
@@ -38,11 +37,12 @@ go_test(
|
||||
tags = ["automanaged"],
|
||||
deps = [
|
||||
"//pkg/api:go_default_library",
|
||||
"//pkg/client/clientset_generated/internalclientset/fake:go_default_library",
|
||||
"//pkg/client/informers/informers_generated/internalversion:go_default_library",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/runtime",
|
||||
"//vendor:k8s.io/apimachinery/pkg/util/wait",
|
||||
"//vendor:k8s.io/apimachinery/pkg/watch",
|
||||
"//vendor:k8s.io/client-go/tools/cache",
|
||||
"//vendor:k8s.io/client-go/testing",
|
||||
],
|
||||
)
|
||||
|
||||
|
@@ -24,27 +24,13 @@ import (
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
ktesting "k8s.io/client-go/testing"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
)
|
||||
|
||||
type fakeLW struct {
|
||||
listResp runtime.Object
|
||||
watchResp watch.Interface
|
||||
}
|
||||
|
||||
func (lw fakeLW) List(options metav1.ListOptions) (runtime.Object, error) {
|
||||
return lw.listResp, nil
|
||||
}
|
||||
|
||||
func (lw fakeLW) Watch(options metav1.ListOptions) (watch.Interface, error) {
|
||||
return lw.watchResp, nil
|
||||
}
|
||||
|
||||
var _ cache.ListerWatcher = fakeLW{}
|
||||
|
||||
func TestNewServicesSourceApi_UpdatesAndMultipleServices(t *testing.T) {
|
||||
service1v1 := &api.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "testnamespace", Name: "s1"},
|
||||
@@ -57,11 +43,9 @@ func TestNewServicesSourceApi_UpdatesAndMultipleServices(t *testing.T) {
|
||||
Spec: api.ServiceSpec{Ports: []api.ServicePort{{Protocol: "TCP", Port: 30}}}}
|
||||
|
||||
// Setup fake api client.
|
||||
client := fake.NewSimpleClientset()
|
||||
fakeWatch := watch.NewFake()
|
||||
lw := fakeLW{
|
||||
listResp: &api.ServiceList{Items: []api.Service{}},
|
||||
watchResp: fakeWatch,
|
||||
}
|
||||
client.PrependWatchReactor("services", ktesting.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
@@ -69,8 +53,11 @@ func TestNewServicesSourceApi_UpdatesAndMultipleServices(t *testing.T) {
|
||||
ch := make(chan struct{})
|
||||
handler := newSvcHandler(t, nil, func() { ch <- struct{}{} })
|
||||
|
||||
serviceConfig := newServiceConfig(lw, time.Minute)
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
|
||||
|
||||
serviceConfig := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), time.Minute)
|
||||
serviceConfig.RegisterHandler(handler)
|
||||
go sharedInformers.Start(stopCh)
|
||||
go serviceConfig.Run(stopCh)
|
||||
|
||||
// Add the first service
|
||||
@@ -130,11 +117,9 @@ func TestNewEndpointsSourceApi_UpdatesAndMultipleEndpoints(t *testing.T) {
|
||||
}
|
||||
|
||||
// Setup fake api client.
|
||||
client := fake.NewSimpleClientset()
|
||||
fakeWatch := watch.NewFake()
|
||||
lw := fakeLW{
|
||||
listResp: &api.EndpointsList{Items: []api.Endpoints{}},
|
||||
watchResp: fakeWatch,
|
||||
}
|
||||
client.PrependWatchReactor("endpoints", ktesting.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
@@ -142,8 +127,11 @@ func TestNewEndpointsSourceApi_UpdatesAndMultipleEndpoints(t *testing.T) {
|
||||
ch := make(chan struct{})
|
||||
handler := newEpsHandler(t, nil, func() { ch <- struct{}{} })
|
||||
|
||||
endpointsConfig := newEndpointsConfig(lw, time.Minute)
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
|
||||
|
||||
endpointsConfig := NewEndpointsConfig(sharedInformers.Core().InternalVersion().Endpoints(), time.Minute)
|
||||
endpointsConfig.RegisterHandler(handler)
|
||||
go sharedInformers.Start(stopCh)
|
||||
go endpointsConfig.Run(stopCh)
|
||||
|
||||
// Add the first endpoints
|
||||
@@ -229,19 +217,11 @@ func TestInitialSync(t *testing.T) {
|
||||
wg.Add(2)
|
||||
|
||||
// Setup fake api client.
|
||||
fakeSvcWatch := watch.NewFake()
|
||||
svcLW := fakeLW{
|
||||
listResp: &api.ServiceList{Items: []api.Service{*svc1, *svc2}},
|
||||
watchResp: fakeSvcWatch,
|
||||
}
|
||||
fakeEpsWatch := watch.NewFake()
|
||||
epsLW := fakeLW{
|
||||
listResp: &api.EndpointsList{Items: []api.Endpoints{*eps2, *eps1}},
|
||||
watchResp: fakeEpsWatch,
|
||||
}
|
||||
client := fake.NewSimpleClientset(svc1, svc2, eps2, eps1)
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
||||
|
||||
svcConfig := newServiceConfig(svcLW, time.Minute)
|
||||
epsConfig := newEndpointsConfig(epsLW, time.Minute)
|
||||
svcConfig := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), 0)
|
||||
epsConfig := NewEndpointsConfig(sharedInformers.Core().InternalVersion().Endpoints(), 0)
|
||||
svcHandler := newSvcHandler(t, []*api.Service{svc2, svc1}, wg.Done)
|
||||
svcConfig.RegisterHandler(svcHandler)
|
||||
epsHandler := newEpsHandler(t, []*api.Endpoints{eps2, eps1}, wg.Done)
|
||||
@@ -249,6 +229,7 @@ func TestInitialSync(t *testing.T) {
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
go sharedInformers.Start(stopCh)
|
||||
go svcConfig.Run(stopCh)
|
||||
go epsConfig.Run(stopCh)
|
||||
wg.Wait()
|
||||
|
@@ -21,12 +21,11 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
coreinformers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion/core/internalversion"
|
||||
listers "k8s.io/kubernetes/pkg/client/listers/core/internalversion"
|
||||
"k8s.io/kubernetes/pkg/util/config"
|
||||
)
|
||||
@@ -64,35 +63,36 @@ type EndpointsConfigHandler interface {
|
||||
// EndpointsConfig tracks a set of endpoints configurations.
|
||||
// It accepts "set", "add" and "remove" operations of endpoints via channels, and invokes registered handlers on change.
|
||||
type EndpointsConfig struct {
|
||||
informer cache.Controller
|
||||
lister listers.EndpointsLister
|
||||
handlers []EndpointsConfigHandler
|
||||
lister listers.EndpointsLister
|
||||
listerSynced cache.InformerSynced
|
||||
handlers []EndpointsConfigHandler
|
||||
// updates channel is used to trigger registered handlers.
|
||||
updates chan struct{}
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
// NewEndpointsConfig creates a new EndpointsConfig.
|
||||
func NewEndpointsConfig(c cache.Getter, period time.Duration) *EndpointsConfig {
|
||||
endpointsLW := cache.NewListWatchFromClient(c, "endpoints", metav1.NamespaceAll, fields.Everything())
|
||||
return newEndpointsConfig(endpointsLW, period)
|
||||
}
|
||||
func NewEndpointsConfig(endpointsInformer coreinformers.EndpointsInformer, resyncPeriod time.Duration) *EndpointsConfig {
|
||||
result := &EndpointsConfig{
|
||||
lister: endpointsInformer.Lister(),
|
||||
listerSynced: endpointsInformer.Informer().HasSynced,
|
||||
// The updates channel is used to send interrupts to the Endpoints handler.
|
||||
// It's buffered because we never want to block for as long as there is a
|
||||
// pending interrupt, but don't want to drop them if the handler is doing
|
||||
// work.
|
||||
updates: make(chan struct{}, 1),
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
|
||||
func newEndpointsConfig(lw cache.ListerWatcher, period time.Duration) *EndpointsConfig {
|
||||
result := &EndpointsConfig{}
|
||||
|
||||
store, informer := cache.NewIndexerInformer(
|
||||
lw,
|
||||
&api.Endpoints{},
|
||||
period,
|
||||
endpointsInformer.Informer().AddEventHandlerWithResyncPeriod(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: result.handleAddEndpoints,
|
||||
UpdateFunc: result.handleUpdateEndpoints,
|
||||
DeleteFunc: result.handleDeleteEndpoints,
|
||||
},
|
||||
cache.Indexers{},
|
||||
resyncPeriod,
|
||||
)
|
||||
result.informer = informer
|
||||
result.lister = listers.NewEndpointsLister(store)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -101,16 +101,9 @@ func (c *EndpointsConfig) RegisterHandler(handler EndpointsConfigHandler) {
|
||||
c.handlers = append(c.handlers, handler)
|
||||
}
|
||||
|
||||
// Run starts the underlying informer and goroutine responsible for calling
|
||||
// registered handlers.
|
||||
// Run starts the goroutine responsible for calling registered handlers.
|
||||
func (c *EndpointsConfig) Run(stopCh <-chan struct{}) {
|
||||
// The updates channel is used to send interrupts to the Endpoints handler.
|
||||
// It's buffered because we never want to block for as long as there is a
|
||||
// pending interrupt, but don't want to drop them if the handler is doing
|
||||
// work.
|
||||
c.updates = make(chan struct{}, 1)
|
||||
go c.informer.Run(stopCh)
|
||||
if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) {
|
||||
if !cache.WaitForCacheSync(stopCh, c.listerSynced) {
|
||||
utilruntime.HandleError(fmt.Errorf("endpoint controller not synced"))
|
||||
return
|
||||
}
|
||||
@@ -118,27 +111,32 @@ func (c *EndpointsConfig) Run(stopCh <-chan struct{}) {
|
||||
// We have synced informers. Now we can start delivering updates
|
||||
// to the registered handler.
|
||||
go func() {
|
||||
for range c.updates {
|
||||
endpoints, err := c.lister.List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Errorf("Error while listing endpoints from cache: %v", err)
|
||||
// This will cause a retry (if there isn't any other trigger in-flight).
|
||||
c.dispatchUpdate()
|
||||
continue
|
||||
}
|
||||
if endpoints == nil {
|
||||
endpoints = []*api.Endpoints{}
|
||||
}
|
||||
for i := range c.handlers {
|
||||
glog.V(3).Infof("Calling handler.OnEndpointsUpdate()")
|
||||
c.handlers[i].OnEndpointsUpdate(endpoints)
|
||||
for {
|
||||
select {
|
||||
case <-c.updates:
|
||||
endpoints, err := c.lister.List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Errorf("Error while listing endpoints from cache: %v", err)
|
||||
// This will cause a retry (if there isn't any other trigger in-flight).
|
||||
c.dispatchUpdate()
|
||||
continue
|
||||
}
|
||||
if endpoints == nil {
|
||||
endpoints = []*api.Endpoints{}
|
||||
}
|
||||
for i := range c.handlers {
|
||||
glog.V(3).Infof("Calling handler.OnEndpointsUpdate()")
|
||||
c.handlers[i].OnEndpointsUpdate(endpoints)
|
||||
}
|
||||
case <-c.stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
// Close updates channel when stopCh is closed.
|
||||
go func() {
|
||||
<-stopCh
|
||||
close(c.updates)
|
||||
close(c.stop)
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -157,6 +155,9 @@ func (c *EndpointsConfig) handleDeleteEndpoints(_ interface{}) {
|
||||
func (c *EndpointsConfig) dispatchUpdate() {
|
||||
select {
|
||||
case c.updates <- struct{}{}:
|
||||
// Work enqueued successfully
|
||||
case <-c.stop:
|
||||
// We're shut down / avoid logging the message below
|
||||
default:
|
||||
glog.V(4).Infof("Endpoints handler already has a pending interrupt.")
|
||||
}
|
||||
@@ -165,35 +166,36 @@ func (c *EndpointsConfig) dispatchUpdate() {
|
||||
// ServiceConfig tracks a set of service configurations.
|
||||
// It accepts "set", "add" and "remove" operations of services via channels, and invokes registered handlers on change.
|
||||
type ServiceConfig struct {
|
||||
informer cache.Controller
|
||||
lister listers.ServiceLister
|
||||
handlers []ServiceConfigHandler
|
||||
lister listers.ServiceLister
|
||||
listerSynced cache.InformerSynced
|
||||
handlers []ServiceConfigHandler
|
||||
// updates channel is used to trigger registered handlers
|
||||
updates chan struct{}
|
||||
stop chan struct{}
|
||||
}
|
||||
|
||||
// NewServiceConfig creates a new ServiceConfig.
|
||||
func NewServiceConfig(c cache.Getter, period time.Duration) *ServiceConfig {
|
||||
servicesLW := cache.NewListWatchFromClient(c, "services", metav1.NamespaceAll, fields.Everything())
|
||||
return newServiceConfig(servicesLW, period)
|
||||
}
|
||||
func NewServiceConfig(serviceInformer coreinformers.ServiceInformer, resyncPeriod time.Duration) *ServiceConfig {
|
||||
result := &ServiceConfig{
|
||||
lister: serviceInformer.Lister(),
|
||||
listerSynced: serviceInformer.Informer().HasSynced,
|
||||
// The updates channel is used to send interrupts to the Services handler.
|
||||
// It's buffered because we never want to block for as long as there is a
|
||||
// pending interrupt, but don't want to drop them if the handler is doing
|
||||
// work.
|
||||
updates: make(chan struct{}, 1),
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
|
||||
func newServiceConfig(lw cache.ListerWatcher, period time.Duration) *ServiceConfig {
|
||||
result := &ServiceConfig{}
|
||||
|
||||
store, informer := cache.NewIndexerInformer(
|
||||
lw,
|
||||
&api.Service{},
|
||||
period,
|
||||
serviceInformer.Informer().AddEventHandlerWithResyncPeriod(
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: result.handleAddService,
|
||||
UpdateFunc: result.handleUpdateService,
|
||||
DeleteFunc: result.handleDeleteService,
|
||||
},
|
||||
cache.Indexers{},
|
||||
resyncPeriod,
|
||||
)
|
||||
result.informer = informer
|
||||
result.lister = listers.NewServiceLister(store)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -202,16 +204,10 @@ func (c *ServiceConfig) RegisterHandler(handler ServiceConfigHandler) {
|
||||
c.handlers = append(c.handlers, handler)
|
||||
}
|
||||
|
||||
// Run starts the underlying informer and goroutine responsible for calling
|
||||
// Run starts the goroutine responsible for calling
|
||||
// registered handlers.
|
||||
func (c *ServiceConfig) Run(stopCh <-chan struct{}) {
|
||||
// The updates channel is used to send interrupts to the Services handler.
|
||||
// It's buffered because we never want to block for as long as there is a
|
||||
// pending interrupt, but don't want to drop them if the handler is doing
|
||||
// work.
|
||||
c.updates = make(chan struct{}, 1)
|
||||
go c.informer.Run(stopCh)
|
||||
if !cache.WaitForCacheSync(stopCh, c.informer.HasSynced) {
|
||||
if !cache.WaitForCacheSync(stopCh, c.listerSynced) {
|
||||
utilruntime.HandleError(fmt.Errorf("service controller not synced"))
|
||||
return
|
||||
}
|
||||
@@ -219,27 +215,32 @@ func (c *ServiceConfig) Run(stopCh <-chan struct{}) {
|
||||
// We have synced informers. Now we can start delivering updates
|
||||
// to the registered handler.
|
||||
go func() {
|
||||
for range c.updates {
|
||||
services, err := c.lister.List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Errorf("Error while listing services from cache: %v", err)
|
||||
// This will cause a retry (if there isn't any other trigger in-flight).
|
||||
c.dispatchUpdate()
|
||||
continue
|
||||
}
|
||||
if services == nil {
|
||||
services = []*api.Service{}
|
||||
}
|
||||
for i := range c.handlers {
|
||||
glog.V(3).Infof("Calling handler.OnServiceUpdate()")
|
||||
c.handlers[i].OnServiceUpdate(services)
|
||||
for {
|
||||
select {
|
||||
case <-c.updates:
|
||||
services, err := c.lister.List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Errorf("Error while listing services from cache: %v", err)
|
||||
// This will cause a retry (if there isnt' any other trigger in-flight).
|
||||
c.dispatchUpdate()
|
||||
continue
|
||||
}
|
||||
if services == nil {
|
||||
services = []*api.Service{}
|
||||
}
|
||||
for i := range c.handlers {
|
||||
glog.V(3).Infof("Calling handler.OnServiceUpdate()")
|
||||
c.handlers[i].OnServiceUpdate(services)
|
||||
}
|
||||
case <-c.stop:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
// Close updates channel when stopCh is closed.
|
||||
go func() {
|
||||
<-stopCh
|
||||
close(c.updates)
|
||||
close(c.stop)
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -258,8 +259,11 @@ func (c *ServiceConfig) handleDeleteService(_ interface{}) {
|
||||
func (c *ServiceConfig) dispatchUpdate() {
|
||||
select {
|
||||
case c.updates <- struct{}{}:
|
||||
// Work enqueued successfully
|
||||
case <-c.stop:
|
||||
// We're shut down / avoid logging the message below
|
||||
default:
|
||||
glog.V(4).Infof("Service handler alread has a pending interrupt.")
|
||||
glog.V(4).Infof("Service handler already has a pending interrupt.")
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -25,7 +25,10 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
ktesting "k8s.io/client-go/testing"
|
||||
"k8s.io/kubernetes/pkg/api"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset/fake"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/internalversion"
|
||||
)
|
||||
|
||||
type sortedServices []*api.Service
|
||||
@@ -121,17 +124,19 @@ func (h *EndpointsHandlerMock) ValidateEndpoints(t *testing.T, expectedEndpoints
|
||||
}
|
||||
|
||||
func TestNewServiceAddedAndNotified(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
fakeWatch := watch.NewFake()
|
||||
lw := fakeLW{
|
||||
listResp: &api.ServiceList{Items: []api.Service{}},
|
||||
watchResp: fakeWatch,
|
||||
}
|
||||
client.PrependWatchReactor("services", ktesting.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
config := newServiceConfig(lw, time.Minute)
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
|
||||
|
||||
config := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), time.Minute)
|
||||
handler := NewServiceHandlerMock()
|
||||
config.RegisterHandler(handler)
|
||||
go sharedInformers.Start(stopCh)
|
||||
go config.Run(stopCh)
|
||||
|
||||
service := &api.Service{
|
||||
@@ -143,17 +148,19 @@ func TestNewServiceAddedAndNotified(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestServiceAddedRemovedSetAndNotified(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
fakeWatch := watch.NewFake()
|
||||
lw := fakeLW{
|
||||
listResp: &api.ServiceList{Items: []api.Service{}},
|
||||
watchResp: fakeWatch,
|
||||
}
|
||||
client.PrependWatchReactor("services", ktesting.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
config := newServiceConfig(lw, time.Minute)
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
|
||||
|
||||
config := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), time.Minute)
|
||||
handler := NewServiceHandlerMock()
|
||||
config.RegisterHandler(handler)
|
||||
go sharedInformers.Start(stopCh)
|
||||
go config.Run(stopCh)
|
||||
|
||||
service1 := &api.Service{
|
||||
@@ -177,19 +184,21 @@ func TestServiceAddedRemovedSetAndNotified(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewServicesMultipleHandlersAddedAndNotified(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
fakeWatch := watch.NewFake()
|
||||
lw := fakeLW{
|
||||
listResp: &api.ServiceList{Items: []api.Service{}},
|
||||
watchResp: fakeWatch,
|
||||
}
|
||||
client.PrependWatchReactor("services", ktesting.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
config := newServiceConfig(lw, time.Minute)
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
|
||||
|
||||
config := NewServiceConfig(sharedInformers.Core().InternalVersion().Services(), time.Minute)
|
||||
handler := NewServiceHandlerMock()
|
||||
handler2 := NewServiceHandlerMock()
|
||||
config.RegisterHandler(handler)
|
||||
config.RegisterHandler(handler2)
|
||||
go sharedInformers.Start(stopCh)
|
||||
go config.Run(stopCh)
|
||||
|
||||
service1 := &api.Service{
|
||||
@@ -209,19 +218,21 @@ func TestNewServicesMultipleHandlersAddedAndNotified(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewEndpointsMultipleHandlersAddedAndNotified(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
fakeWatch := watch.NewFake()
|
||||
lw := fakeLW{
|
||||
listResp: &api.EndpointsList{Items: []api.Endpoints{}},
|
||||
watchResp: fakeWatch,
|
||||
}
|
||||
client.PrependWatchReactor("endpoints", ktesting.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
config := newEndpointsConfig(lw, time.Minute)
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
|
||||
|
||||
config := NewEndpointsConfig(sharedInformers.Core().InternalVersion().Endpoints(), time.Minute)
|
||||
handler := NewEndpointsHandlerMock()
|
||||
handler2 := NewEndpointsHandlerMock()
|
||||
config.RegisterHandler(handler)
|
||||
config.RegisterHandler(handler2)
|
||||
go sharedInformers.Start(stopCh)
|
||||
go config.Run(stopCh)
|
||||
|
||||
endpoints1 := &api.Endpoints{
|
||||
@@ -247,19 +258,21 @@ func TestNewEndpointsMultipleHandlersAddedAndNotified(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNewEndpointsMultipleHandlersAddRemoveSetAndNotified(t *testing.T) {
|
||||
client := fake.NewSimpleClientset()
|
||||
fakeWatch := watch.NewFake()
|
||||
lw := fakeLW{
|
||||
listResp: &api.EndpointsList{Items: []api.Endpoints{}},
|
||||
watchResp: fakeWatch,
|
||||
}
|
||||
client.PrependWatchReactor("endpoints", ktesting.DefaultWatchReactor(fakeWatch, nil))
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
defer close(stopCh)
|
||||
|
||||
config := newEndpointsConfig(lw, time.Minute)
|
||||
sharedInformers := informers.NewSharedInformerFactory(client, time.Minute)
|
||||
|
||||
config := NewEndpointsConfig(sharedInformers.Core().InternalVersion().Endpoints(), time.Minute)
|
||||
handler := NewEndpointsHandlerMock()
|
||||
handler2 := NewEndpointsHandlerMock()
|
||||
config.RegisterHandler(handler)
|
||||
config.RegisterHandler(handler2)
|
||||
go sharedInformers.Start(stopCh)
|
||||
go config.Run(stopCh)
|
||||
|
||||
endpoints1 := &api.Endpoints{
|
||||
|
Reference in New Issue
Block a user