Enable garbage collection of custom resources

Enhance the garbage collector to periodically refresh the resources it
monitors (via discovery) to enable custom resource definition GC.

This implementation caches Unstructured structs for any kinds not
covered by a shared informer. The existing meta-only codec only supports
compiled types; an improved codec which supports arbitrary types could
be introduced to optimize caching to store only metadata for all
non-informer types.
This commit is contained in:
Dan Mace
2017-05-17 15:54:58 -07:00
parent 3d3d3922c2
commit d08dfb92c7
16 changed files with 856 additions and 235 deletions

View File

@@ -46,25 +46,62 @@ import (
"k8s.io/kubernetes/pkg/controller/garbagecollector/metaonly"
)
func TestNewGarbageCollector(t *testing.T) {
func TestGarbageCollectorConstruction(t *testing.T) {
config := &restclient.Config{}
config.ContentConfig.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: metaonly.NewMetadataCodecFactory()}
metaOnlyClientPool := dynamic.NewClientPool(config, api.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
tweakableRM := meta.NewDefaultRESTMapper(nil, nil)
rm := meta.MultiRESTMapper{tweakableRM, api.Registry.RESTMapper()}
metaOnlyClientPool := dynamic.NewClientPool(config, rm, dynamic.LegacyAPIPathResolverFunc)
config.ContentConfig.NegotiatedSerializer = nil
clientPool := dynamic.NewClientPool(config, api.Registry.RESTMapper(), dynamic.LegacyAPIPathResolverFunc)
clientPool := dynamic.NewClientPool(config, rm, dynamic.LegacyAPIPathResolverFunc)
podResource := map[schema.GroupVersionResource]struct{}{
{Version: "v1", Resource: "pods"}: {},
// no monitor will be constructed for non-core resource, the GC construction will not fail.
}
twoResources := map[schema.GroupVersionResource]struct{}{
{Version: "v1", Resource: "pods"}: {},
{Group: "tpr.io", Version: "v1", Resource: "unknown"}: {},
}
client := fake.NewSimpleClientset()
sharedInformers := informers.NewSharedInformerFactory(client, 0)
gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, api.Registry.RESTMapper(), podResource, ignoredResources, sharedInformers)
// No monitor will be constructed for the non-core resource, but the GC
// construction will not fail.
gc, err := NewGarbageCollector(metaOnlyClientPool, clientPool, rm, twoResources, map[schema.GroupResource]struct{}{}, sharedInformers)
if err != nil {
t.Fatal(err)
}
assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
// Make sure resource monitor syncing creates and stops resource monitors.
tweakableRM.Add(schema.GroupVersionKind{Group: "tpr.io", Version: "v1", Kind: "unknown"}, nil)
err = gc.resyncMonitors(twoResources)
if err != nil {
t.Errorf("Failed adding a monitor: %v", err)
}
assert.Equal(t, 2, len(gc.dependencyGraphBuilder.monitors))
err = gc.resyncMonitors(podResource)
if err != nil {
t.Errorf("Failed removing a monitor: %v", err)
}
assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
// Make sure the syncing mechanism also works after Run() has been called
stopCh := make(chan struct{})
defer close(stopCh)
go gc.Run(1, stopCh)
err = gc.resyncMonitors(twoResources)
if err != nil {
t.Errorf("Failed adding a monitor: %v", err)
}
assert.Equal(t, 2, len(gc.dependencyGraphBuilder.monitors))
err = gc.resyncMonitors(podResource)
if err != nil {
t.Errorf("Failed removing a monitor: %v", err)
}
assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
}
// fakeAction records information about requests to aid in testing.