Switch pv controller to shared informers
This commit is contained in:
@@ -35,6 +35,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/diff"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
core "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
@@ -45,7 +46,9 @@ import (
|
||||
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1beta1/util"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
|
||||
"k8s.io/kubernetes/pkg/client/clientset_generated/clientset/fake"
|
||||
fcache "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/testing"
|
||||
informers "k8s.io/kubernetes/pkg/client/informers/informers_generated/externalversions"
|
||||
storagelisters "k8s.io/kubernetes/pkg/client/listers/storage/v1beta1"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
vol "k8s.io/kubernetes/pkg/volume"
|
||||
)
|
||||
|
||||
@@ -112,9 +115,10 @@ var noerrors = []reactorError{}
|
||||
// is updated first and claim.Phase second. This queue will then contain both
|
||||
// updates as separate entries.
|
||||
// - Number of changes since the last call to volumeReactor.syncAll().
|
||||
// - Optionally, volume and claim event sources. When set, all changed
|
||||
// volumes/claims are sent as Modify event to these sources. These sources can
|
||||
// be linked back to the controller watcher as "volume/claim updated" events.
|
||||
// - Optionally, volume and claim fake watchers which should be the same ones
|
||||
// used by the controller. Any time an event function like deleteVolumeEvent
|
||||
// is called to simulate an event, the reactor's stores are updated and the
|
||||
// controller is sent the event via the fake watcher.
|
||||
// - Optionally, list of error that should be returned by reactor, simulating
|
||||
// etcd / API server failures. These errors are evaluated in order and every
|
||||
// error is returned only once. I.e. when the reactor finds matching
|
||||
@@ -126,8 +130,8 @@ type volumeReactor struct {
|
||||
changedObjects []interface{}
|
||||
changedSinceLastSync int
|
||||
ctrl *PersistentVolumeController
|
||||
volumeSource *fcache.FakePVControllerSource
|
||||
claimSource *fcache.FakePVCControllerSource
|
||||
fakeVolumeWatch *watch.FakeWatcher
|
||||
fakeClaimWatch *watch.FakeWatcher
|
||||
lock sync.Mutex
|
||||
errors []reactorError
|
||||
}
|
||||
@@ -176,9 +180,6 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
|
||||
}
|
||||
|
||||
// Store the updated object to appropriate places.
|
||||
if r.volumeSource != nil {
|
||||
r.volumeSource.Add(volume)
|
||||
}
|
||||
r.volumes[volume.Name] = volume
|
||||
r.changedObjects = append(r.changedObjects, volume)
|
||||
r.changedSinceLastSync++
|
||||
@@ -203,9 +204,6 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
|
||||
}
|
||||
|
||||
// Store the updated object to appropriate places.
|
||||
if r.volumeSource != nil {
|
||||
r.volumeSource.Modify(volume)
|
||||
}
|
||||
r.volumes[volume.Name] = volume
|
||||
r.changedObjects = append(r.changedObjects, volume)
|
||||
r.changedSinceLastSync++
|
||||
@@ -231,9 +229,6 @@ func (r *volumeReactor) React(action core.Action) (handled bool, ret runtime.Obj
|
||||
|
||||
// Store the updated object to appropriate places.
|
||||
r.claims[claim.Name] = claim
|
||||
if r.claimSource != nil {
|
||||
r.claimSource.Modify(claim)
|
||||
}
|
||||
r.changedObjects = append(r.changedObjects, claim)
|
||||
r.changedSinceLastSync++
|
||||
glog.V(4).Infof("saved updated claim %s", claim.Name)
|
||||
@@ -513,9 +508,11 @@ func (r *volumeReactor) deleteVolumeEvent(volume *v1.PersistentVolume) {
|
||||
|
||||
// Generate deletion event. Cloned volume is needed to prevent races (and we
|
||||
// would get a clone from etcd too).
|
||||
clone, _ := api.Scheme.DeepCopy(volume)
|
||||
volumeClone := clone.(*v1.PersistentVolume)
|
||||
r.volumeSource.Delete(volumeClone)
|
||||
if r.fakeVolumeWatch != nil {
|
||||
clone, _ := api.Scheme.DeepCopy(volume)
|
||||
volumeClone := clone.(*v1.PersistentVolume)
|
||||
r.fakeVolumeWatch.Delete(volumeClone)
|
||||
}
|
||||
}
|
||||
|
||||
// deleteClaimEvent simulates that a claim has been deleted in etcd and the
|
||||
@@ -529,9 +526,11 @@ func (r *volumeReactor) deleteClaimEvent(claim *v1.PersistentVolumeClaim) {
|
||||
|
||||
// Generate deletion event. Cloned volume is needed to prevent races (and we
|
||||
// would get a clone from etcd too).
|
||||
clone, _ := api.Scheme.DeepCopy(claim)
|
||||
claimClone := clone.(*v1.PersistentVolumeClaim)
|
||||
r.claimSource.Delete(claimClone)
|
||||
if r.fakeClaimWatch != nil {
|
||||
clone, _ := api.Scheme.DeepCopy(claim)
|
||||
claimClone := clone.(*v1.PersistentVolumeClaim)
|
||||
r.fakeClaimWatch.Delete(claimClone)
|
||||
}
|
||||
}
|
||||
|
||||
// addVolumeEvent simulates that a volume has been added in etcd and the
|
||||
@@ -543,7 +542,9 @@ func (r *volumeReactor) addVolumeEvent(volume *v1.PersistentVolume) {
|
||||
r.volumes[volume.Name] = volume
|
||||
// Generate event. No cloning is needed, this claim is not stored in the
|
||||
// controller cache yet.
|
||||
r.volumeSource.Add(volume)
|
||||
if r.fakeVolumeWatch != nil {
|
||||
r.fakeVolumeWatch.Add(volume)
|
||||
}
|
||||
}
|
||||
|
||||
// modifyVolumeEvent simulates that a volume has been modified in etcd and the
|
||||
@@ -555,9 +556,11 @@ func (r *volumeReactor) modifyVolumeEvent(volume *v1.PersistentVolume) {
|
||||
r.volumes[volume.Name] = volume
|
||||
// Generate deletion event. Cloned volume is needed to prevent races (and we
|
||||
// would get a clone from etcd too).
|
||||
clone, _ := api.Scheme.DeepCopy(volume)
|
||||
volumeClone := clone.(*v1.PersistentVolume)
|
||||
r.volumeSource.Modify(volumeClone)
|
||||
if r.fakeVolumeWatch != nil {
|
||||
clone, _ := api.Scheme.DeepCopy(volume)
|
||||
volumeClone := clone.(*v1.PersistentVolume)
|
||||
r.fakeVolumeWatch.Modify(volumeClone)
|
||||
}
|
||||
}
|
||||
|
||||
// addClaimEvent simulates that a claim has been deleted in etcd and the
|
||||
@@ -569,45 +572,49 @@ func (r *volumeReactor) addClaimEvent(claim *v1.PersistentVolumeClaim) {
|
||||
r.claims[claim.Name] = claim
|
||||
// Generate event. No cloning is needed, this claim is not stored in the
|
||||
// controller cache yet.
|
||||
r.claimSource.Add(claim)
|
||||
if r.fakeClaimWatch != nil {
|
||||
r.fakeClaimWatch.Add(claim)
|
||||
}
|
||||
}
|
||||
|
||||
func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, volumeSource *fcache.FakePVControllerSource, claimSource *fcache.FakePVCControllerSource, errors []reactorError) *volumeReactor {
|
||||
func newVolumeReactor(client *fake.Clientset, ctrl *PersistentVolumeController, fakeVolumeWatch, fakeClaimWatch *watch.FakeWatcher, errors []reactorError) *volumeReactor {
|
||||
reactor := &volumeReactor{
|
||||
volumes: make(map[string]*v1.PersistentVolume),
|
||||
claims: make(map[string]*v1.PersistentVolumeClaim),
|
||||
ctrl: ctrl,
|
||||
volumeSource: volumeSource,
|
||||
claimSource: claimSource,
|
||||
errors: errors,
|
||||
volumes: make(map[string]*v1.PersistentVolume),
|
||||
claims: make(map[string]*v1.PersistentVolumeClaim),
|
||||
ctrl: ctrl,
|
||||
fakeVolumeWatch: fakeVolumeWatch,
|
||||
fakeClaimWatch: fakeClaimWatch,
|
||||
errors: errors,
|
||||
}
|
||||
client.AddReactor("*", "*", reactor.React)
|
||||
client.AddReactor("create", "persistentvolumes", reactor.React)
|
||||
client.AddReactor("update", "persistentvolumes", reactor.React)
|
||||
client.AddReactor("update", "persistentvolumeclaims", reactor.React)
|
||||
client.AddReactor("get", "persistentvolumes", reactor.React)
|
||||
client.AddReactor("delete", "persistentvolumes", reactor.React)
|
||||
client.AddReactor("delete", "persistentvolumeclaims", reactor.React)
|
||||
|
||||
return reactor
|
||||
}
|
||||
func alwaysReady() bool { return true }
|
||||
|
||||
func newTestController(kubeClient clientset.Interface, volumeSource, claimSource, classSource cache.ListerWatcher, enableDynamicProvisioning bool) *PersistentVolumeController {
|
||||
if volumeSource == nil {
|
||||
volumeSource = fcache.NewFakePVControllerSource()
|
||||
func newTestController(kubeClient clientset.Interface, informerFactory informers.SharedInformerFactory, enableDynamicProvisioning bool) *PersistentVolumeController {
|
||||
if informerFactory == nil {
|
||||
informerFactory = informers.NewSharedInformerFactory(kubeClient, controller.NoResyncPeriodFunc())
|
||||
}
|
||||
if claimSource == nil {
|
||||
claimSource = fcache.NewFakePVCControllerSource()
|
||||
}
|
||||
if classSource == nil {
|
||||
classSource = fcache.NewFakeControllerSource()
|
||||
}
|
||||
|
||||
params := ControllerParameters{
|
||||
KubeClient: kubeClient,
|
||||
SyncPeriod: 5 * time.Second,
|
||||
VolumePlugins: []vol.VolumePlugin{},
|
||||
VolumeSource: volumeSource,
|
||||
ClaimSource: claimSource,
|
||||
ClassSource: classSource,
|
||||
VolumeInformer: informerFactory.Core().V1().PersistentVolumes(),
|
||||
ClaimInformer: informerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
ClassInformer: informerFactory.Storage().V1beta1().StorageClasses(),
|
||||
EventRecorder: record.NewFakeRecorder(1000),
|
||||
EnableDynamicProvisioning: enableDynamicProvisioning,
|
||||
}
|
||||
ctrl := NewController(params)
|
||||
|
||||
ctrl.volumeListerSynced = alwaysReady
|
||||
ctrl.claimListerSynced = alwaysReady
|
||||
ctrl.classListerSynced = alwaysReady
|
||||
// Speed up the test
|
||||
ctrl.createProvisionedPVInterval = 5 * time.Millisecond
|
||||
return ctrl
|
||||
@@ -924,7 +931,7 @@ func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storag
|
||||
|
||||
// Initialize the controller
|
||||
client := &fake.Clientset{}
|
||||
ctrl := newTestController(client, nil, nil, nil, true)
|
||||
ctrl := newTestController(client, nil, true)
|
||||
reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors)
|
||||
for _, claim := range test.initialClaims {
|
||||
ctrl.claims.Add(claim)
|
||||
@@ -935,14 +942,12 @@ func runSyncTests(t *testing.T, tests []controllerTest, storageClasses []*storag
|
||||
reactor.volumes[volume.Name] = volume
|
||||
}
|
||||
|
||||
// Convert classes to []interface{} and forcefully inject them into
|
||||
// controller.
|
||||
storageClassPtrs := make([]interface{}, len(storageClasses))
|
||||
for i, s := range storageClasses {
|
||||
storageClassPtrs[i] = s
|
||||
// Inject classes into controller via a custom lister.
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
|
||||
for _, class := range storageClasses {
|
||||
indexer.Add(class)
|
||||
}
|
||||
// 1 is the resource version
|
||||
ctrl.classes.Replace(storageClassPtrs, "1")
|
||||
ctrl.classLister = storagelisters.NewStorageClassLister(indexer)
|
||||
|
||||
// Run the tested functions
|
||||
err := test.test(ctrl, reactor, test)
|
||||
@@ -980,15 +985,14 @@ func runMultisyncTests(t *testing.T, tests []controllerTest, storageClasses []*s
|
||||
|
||||
// Initialize the controller
|
||||
client := &fake.Clientset{}
|
||||
ctrl := newTestController(client, nil, nil, nil, true)
|
||||
ctrl := newTestController(client, nil, true)
|
||||
|
||||
// Convert classes to []interface{} and forcefully inject them into
|
||||
// controller.
|
||||
storageClassPtrs := make([]interface{}, len(storageClasses))
|
||||
for i, s := range storageClasses {
|
||||
storageClassPtrs[i] = s
|
||||
// Inject classes into controller via a custom lister.
|
||||
indexer := cache.NewIndexer(cache.MetaNamespaceKeyFunc, cache.Indexers{})
|
||||
for _, class := range storageClasses {
|
||||
indexer.Add(class)
|
||||
}
|
||||
ctrl.classes.Replace(storageClassPtrs, "1")
|
||||
ctrl.classLister = storagelisters.NewStorageClassLister(indexer)
|
||||
|
||||
reactor := newVolumeReactor(client, ctrl, nil, nil, test.errors)
|
||||
for _, claim := range test.initialClaims {
|
||||
|
Reference in New Issue
Block a user