
Before deleting an object based on absent owners, GC verifies absence of those owners with a live lookup. The coordinates used to perform that live lookup are the ones specified in the ownerReference of the child. In order to performantly delete multiple children from the same parent (e.g. 1000 pods from a replicaset), a 404 response to a lookup is cached in absentOwnerCache. Previously, the cache was a simple uid set. However, since children can disagree on the coordinates that should be used to look up a given uid, the cache should record the exact coordinates verified absent. This is a [apiVersion, kind, namespace, name, uid] tuple.
971 lines
29 KiB
Go
971 lines
29 KiB
Go
/*
|
|
Copyright 2016 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package garbagecollector
|
|
|
|
import (
|
|
"fmt"
|
|
"net/http"
|
|
"net/http/httptest"
|
|
"reflect"
|
|
"strings"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
|
|
_ "k8s.io/kubernetes/pkg/apis/core/install"
|
|
"k8s.io/utils/pointer"
|
|
|
|
v1 "k8s.io/api/core/v1"
|
|
"k8s.io/apimachinery/pkg/api/meta"
|
|
"k8s.io/apimachinery/pkg/api/meta/testrestmapper"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
|
"k8s.io/apimachinery/pkg/types"
|
|
"k8s.io/apimachinery/pkg/util/json"
|
|
"k8s.io/apimachinery/pkg/util/sets"
|
|
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
|
"k8s.io/client-go/discovery"
|
|
"k8s.io/client-go/informers"
|
|
"k8s.io/client-go/kubernetes"
|
|
"k8s.io/client-go/kubernetes/fake"
|
|
"k8s.io/client-go/metadata"
|
|
"k8s.io/client-go/metadata/metadatainformer"
|
|
restclient "k8s.io/client-go/rest"
|
|
"k8s.io/client-go/util/workqueue"
|
|
"k8s.io/controller-manager/pkg/informerfactory"
|
|
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
|
)
|
|
|
|
type testRESTMapper struct {
|
|
meta.RESTMapper
|
|
}
|
|
|
|
func (*testRESTMapper) Reset() {}
|
|
|
|
func TestGarbageCollectorConstruction(t *testing.T) {
|
|
config := &restclient.Config{}
|
|
tweakableRM := meta.NewDefaultRESTMapper(nil)
|
|
rm := &testRESTMapper{meta.MultiRESTMapper{tweakableRM, testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}}
|
|
metadataClient, err := metadata.NewForConfig(config)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
podResource := map[schema.GroupVersionResource]struct{}{
|
|
{Version: "v1", Resource: "pods"}: {},
|
|
}
|
|
twoResources := map[schema.GroupVersionResource]struct{}{
|
|
{Version: "v1", Resource: "pods"}: {},
|
|
{Group: "tpr.io", Version: "v1", Resource: "unknown"}: {},
|
|
}
|
|
client := fake.NewSimpleClientset()
|
|
|
|
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
|
metadataInformers := metadatainformer.NewSharedInformerFactory(metadataClient, 0)
|
|
// No monitor will be constructed for the non-core resource, but the GC
|
|
// construction will not fail.
|
|
alwaysStarted := make(chan struct{})
|
|
close(alwaysStarted)
|
|
gc, err := NewGarbageCollector(client, metadataClient, rm, map[schema.GroupResource]struct{}{},
|
|
informerfactory.NewInformerFactory(sharedInformers, metadataInformers), alwaysStarted)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
assert.Equal(t, 0, len(gc.dependencyGraphBuilder.monitors))
|
|
|
|
// Make sure resource monitor syncing creates and stops resource monitors.
|
|
tweakableRM.Add(schema.GroupVersionKind{Group: "tpr.io", Version: "v1", Kind: "unknown"}, nil)
|
|
err = gc.resyncMonitors(twoResources)
|
|
if err != nil {
|
|
t.Errorf("Failed adding a monitor: %v", err)
|
|
}
|
|
assert.Equal(t, 2, len(gc.dependencyGraphBuilder.monitors))
|
|
|
|
err = gc.resyncMonitors(podResource)
|
|
if err != nil {
|
|
t.Errorf("Failed removing a monitor: %v", err)
|
|
}
|
|
assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
|
|
|
|
// Make sure the syncing mechanism also works after Run() has been called
|
|
stopCh := make(chan struct{})
|
|
defer close(stopCh)
|
|
go gc.Run(1, stopCh)
|
|
|
|
err = gc.resyncMonitors(twoResources)
|
|
if err != nil {
|
|
t.Errorf("Failed adding a monitor: %v", err)
|
|
}
|
|
assert.Equal(t, 2, len(gc.dependencyGraphBuilder.monitors))
|
|
|
|
err = gc.resyncMonitors(podResource)
|
|
if err != nil {
|
|
t.Errorf("Failed removing a monitor: %v", err)
|
|
}
|
|
assert.Equal(t, 1, len(gc.dependencyGraphBuilder.monitors))
|
|
}
|
|
|
|
// fakeAction records information about requests to aid in testing.
|
|
type fakeAction struct {
|
|
method string
|
|
path string
|
|
query string
|
|
}
|
|
|
|
// String returns method=path to aid in testing
|
|
func (f *fakeAction) String() string {
|
|
return strings.Join([]string{f.method, f.path}, "=")
|
|
}
|
|
|
|
type FakeResponse struct {
|
|
statusCode int
|
|
content []byte
|
|
}
|
|
|
|
// fakeActionHandler holds a list of fakeActions received
|
|
type fakeActionHandler struct {
|
|
// statusCode and content returned by this handler for different method + path.
|
|
response map[string]FakeResponse
|
|
|
|
lock sync.Mutex
|
|
actions []fakeAction
|
|
}
|
|
|
|
// ServeHTTP logs the action that occurred and always returns the associated status code
|
|
func (f *fakeActionHandler) ServeHTTP(response http.ResponseWriter, request *http.Request) {
|
|
func() {
|
|
f.lock.Lock()
|
|
defer f.lock.Unlock()
|
|
|
|
f.actions = append(f.actions, fakeAction{method: request.Method, path: request.URL.Path, query: request.URL.RawQuery})
|
|
fakeResponse, ok := f.response[request.Method+request.URL.Path]
|
|
if !ok {
|
|
fakeResponse.statusCode = 200
|
|
fakeResponse.content = []byte(`{"apiVersion": "v1", "kind": "List"}`)
|
|
}
|
|
response.Header().Set("Content-Type", "application/json")
|
|
response.WriteHeader(fakeResponse.statusCode)
|
|
response.Write(fakeResponse.content)
|
|
}()
|
|
|
|
// This is to allow the fakeActionHandler to simulate a watch being opened
|
|
if strings.Contains(request.URL.RawQuery, "watch=true") {
|
|
hijacker, ok := response.(http.Hijacker)
|
|
if !ok {
|
|
return
|
|
}
|
|
connection, _, err := hijacker.Hijack()
|
|
if err != nil {
|
|
return
|
|
}
|
|
defer connection.Close()
|
|
time.Sleep(30 * time.Second)
|
|
}
|
|
}
|
|
|
|
// testServerAndClientConfig returns a server that listens and a config that can reference it
|
|
func testServerAndClientConfig(handler func(http.ResponseWriter, *http.Request)) (*httptest.Server, *restclient.Config) {
|
|
srv := httptest.NewServer(http.HandlerFunc(handler))
|
|
config := &restclient.Config{
|
|
Host: srv.URL,
|
|
}
|
|
return srv, config
|
|
}
|
|
|
|
type garbageCollector struct {
|
|
*GarbageCollector
|
|
stop chan struct{}
|
|
}
|
|
|
|
func setupGC(t *testing.T, config *restclient.Config) garbageCollector {
|
|
metadataClient, err := metadata.NewForConfig(config)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
client := fake.NewSimpleClientset()
|
|
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
|
alwaysStarted := make(chan struct{})
|
|
close(alwaysStarted)
|
|
gc, err := NewGarbageCollector(client, metadataClient, &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}, ignoredResources, sharedInformers, alwaysStarted)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
stop := make(chan struct{})
|
|
go sharedInformers.Start(stop)
|
|
return garbageCollector{gc, stop}
|
|
}
|
|
|
|
func getPod(podName string, ownerReferences []metav1.OwnerReference) *v1.Pod {
|
|
return &v1.Pod{
|
|
TypeMeta: metav1.TypeMeta{
|
|
Kind: "Pod",
|
|
APIVersion: "v1",
|
|
},
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: podName,
|
|
Namespace: "ns1",
|
|
UID: "456",
|
|
OwnerReferences: ownerReferences,
|
|
},
|
|
}
|
|
}
|
|
|
|
func serilizeOrDie(t *testing.T, object interface{}) []byte {
|
|
data, err := json.Marshal(object)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
return data
|
|
}
|
|
|
|
// test the attemptToDeleteItem function making the expected actions.
|
|
func TestAttemptToDeleteItem(t *testing.T) {
|
|
pod := getPod("ToBeDeletedPod", []metav1.OwnerReference{
|
|
{
|
|
Kind: "ReplicationController",
|
|
Name: "owner1",
|
|
UID: "123",
|
|
APIVersion: "v1",
|
|
},
|
|
})
|
|
testHandler := &fakeActionHandler{
|
|
response: map[string]FakeResponse{
|
|
"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/owner1": {
|
|
404,
|
|
[]byte{},
|
|
},
|
|
"GET" + "/api/v1/namespaces/ns1/pods/ToBeDeletedPod": {
|
|
200,
|
|
serilizeOrDie(t, pod),
|
|
},
|
|
},
|
|
}
|
|
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
|
|
defer srv.Close()
|
|
|
|
gc := setupGC(t, clientConfig)
|
|
defer close(gc.stop)
|
|
|
|
item := &node{
|
|
identity: objectReference{
|
|
OwnerReference: metav1.OwnerReference{
|
|
Kind: pod.Kind,
|
|
APIVersion: pod.APIVersion,
|
|
Name: pod.Name,
|
|
UID: pod.UID,
|
|
},
|
|
Namespace: pod.Namespace,
|
|
},
|
|
// owners are intentionally left empty. The attemptToDeleteItem routine should get the latest item from the server.
|
|
owners: nil,
|
|
}
|
|
err := gc.attemptToDeleteItem(item)
|
|
if err != nil {
|
|
t.Errorf("Unexpected Error: %v", err)
|
|
}
|
|
expectedActionSet := sets.NewString()
|
|
expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/replicationcontrollers/owner1")
|
|
expectedActionSet.Insert("DELETE=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")
|
|
expectedActionSet.Insert("GET=/api/v1/namespaces/ns1/pods/ToBeDeletedPod")
|
|
|
|
actualActionSet := sets.NewString()
|
|
for _, action := range testHandler.actions {
|
|
actualActionSet.Insert(action.String())
|
|
}
|
|
if !expectedActionSet.Equal(actualActionSet) {
|
|
t.Errorf("expected actions:\n%v\n but got:\n%v\nDifference:\n%v", expectedActionSet,
|
|
actualActionSet, expectedActionSet.Difference(actualActionSet))
|
|
}
|
|
}
|
|
|
|
// verifyGraphInvariants verifies that all of a node's owners list the node as a
|
|
// dependent and vice versa. uidToNode has all the nodes in the graph.
|
|
func verifyGraphInvariants(scenario string, uidToNode map[types.UID]*node, t *testing.T) {
|
|
for myUID, node := range uidToNode {
|
|
for dependentNode := range node.dependents {
|
|
found := false
|
|
for _, owner := range dependentNode.owners {
|
|
if owner.UID == myUID {
|
|
found = true
|
|
break
|
|
}
|
|
}
|
|
if !found {
|
|
t.Errorf("scenario: %s: node %s has node %s as a dependent, but it's not present in the latter node's owners list", scenario, node.identity, dependentNode.identity)
|
|
}
|
|
}
|
|
|
|
for _, owner := range node.owners {
|
|
ownerNode, ok := uidToNode[owner.UID]
|
|
if !ok {
|
|
// It's possible that the owner node doesn't exist
|
|
continue
|
|
}
|
|
if _, ok := ownerNode.dependents[node]; !ok {
|
|
t.Errorf("node %s has node %s as an owner, but it's not present in the latter node's dependents list", node.identity, ownerNode.identity)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
func createEvent(eventType eventType, selfUID string, owners []string) event {
|
|
var ownerReferences []metav1.OwnerReference
|
|
for i := 0; i < len(owners); i++ {
|
|
ownerReferences = append(ownerReferences, metav1.OwnerReference{UID: types.UID(owners[i])})
|
|
}
|
|
return event{
|
|
eventType: eventType,
|
|
obj: &v1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
UID: types.UID(selfUID),
|
|
OwnerReferences: ownerReferences,
|
|
},
|
|
},
|
|
}
|
|
}
|
|
|
|
func TestProcessEvent(t *testing.T) {
|
|
var testScenarios = []struct {
|
|
name string
|
|
// a series of events that will be supplied to the
|
|
// GraphBuilder.graphChanges.
|
|
events []event
|
|
}{
|
|
{
|
|
name: "test1",
|
|
events: []event{
|
|
createEvent(addEvent, "1", []string{}),
|
|
createEvent(addEvent, "2", []string{"1"}),
|
|
createEvent(addEvent, "3", []string{"1", "2"}),
|
|
},
|
|
},
|
|
{
|
|
name: "test2",
|
|
events: []event{
|
|
createEvent(addEvent, "1", []string{}),
|
|
createEvent(addEvent, "2", []string{"1"}),
|
|
createEvent(addEvent, "3", []string{"1", "2"}),
|
|
createEvent(addEvent, "4", []string{"2"}),
|
|
createEvent(deleteEvent, "2", []string{"doesn't matter"}),
|
|
},
|
|
},
|
|
{
|
|
name: "test3",
|
|
events: []event{
|
|
createEvent(addEvent, "1", []string{}),
|
|
createEvent(addEvent, "2", []string{"1"}),
|
|
createEvent(addEvent, "3", []string{"1", "2"}),
|
|
createEvent(addEvent, "4", []string{"3"}),
|
|
createEvent(updateEvent, "2", []string{"4"}),
|
|
},
|
|
},
|
|
{
|
|
name: "reverse test2",
|
|
events: []event{
|
|
createEvent(addEvent, "4", []string{"2"}),
|
|
createEvent(addEvent, "3", []string{"1", "2"}),
|
|
createEvent(addEvent, "2", []string{"1"}),
|
|
createEvent(addEvent, "1", []string{}),
|
|
createEvent(deleteEvent, "2", []string{"doesn't matter"}),
|
|
},
|
|
},
|
|
}
|
|
|
|
alwaysStarted := make(chan struct{})
|
|
close(alwaysStarted)
|
|
for _, scenario := range testScenarios {
|
|
dependencyGraphBuilder := &GraphBuilder{
|
|
informersStarted: alwaysStarted,
|
|
graphChanges: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
|
uidToNode: &concurrentUIDToNode{
|
|
uidToNodeLock: sync.RWMutex{},
|
|
uidToNode: make(map[types.UID]*node),
|
|
},
|
|
attemptToDelete: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()),
|
|
absentOwnerCache: NewReferenceCache(2),
|
|
}
|
|
for i := 0; i < len(scenario.events); i++ {
|
|
dependencyGraphBuilder.graphChanges.Add(&scenario.events[i])
|
|
dependencyGraphBuilder.processGraphChanges()
|
|
verifyGraphInvariants(scenario.name, dependencyGraphBuilder.uidToNode.uidToNode, t)
|
|
}
|
|
}
|
|
}
|
|
|
|
func BenchmarkReferencesDiffs(t *testing.B) {
|
|
t.ReportAllocs()
|
|
t.ResetTimer()
|
|
for n := 0; n < t.N; n++ {
|
|
old := []metav1.OwnerReference{{UID: "1"}, {UID: "2"}}
|
|
new := []metav1.OwnerReference{{UID: "2"}, {UID: "3"}}
|
|
referencesDiffs(old, new)
|
|
}
|
|
}
|
|
|
|
// TestDependentsRace relies on golang's data race detector to check if there is
|
|
// data race among in the dependents field.
|
|
func TestDependentsRace(t *testing.T) {
|
|
gc := setupGC(t, &restclient.Config{})
|
|
defer close(gc.stop)
|
|
|
|
const updates = 100
|
|
owner := &node{dependents: make(map[*node]struct{})}
|
|
ownerUID := types.UID("owner")
|
|
gc.dependencyGraphBuilder.uidToNode.Write(owner)
|
|
go func() {
|
|
for i := 0; i < updates; i++ {
|
|
dependent := &node{}
|
|
gc.dependencyGraphBuilder.addDependentToOwners(dependent, []metav1.OwnerReference{{UID: ownerUID}})
|
|
gc.dependencyGraphBuilder.removeDependentFromOwners(dependent, []metav1.OwnerReference{{UID: ownerUID}})
|
|
}
|
|
}()
|
|
go func() {
|
|
gc.attemptToOrphan.Add(owner)
|
|
for i := 0; i < updates; i++ {
|
|
gc.attemptToOrphanWorker()
|
|
}
|
|
}()
|
|
}
|
|
|
|
func podToGCNode(pod *v1.Pod) *node {
|
|
return &node{
|
|
identity: objectReference{
|
|
OwnerReference: metav1.OwnerReference{
|
|
Kind: pod.Kind,
|
|
APIVersion: pod.APIVersion,
|
|
Name: pod.Name,
|
|
UID: pod.UID,
|
|
},
|
|
Namespace: pod.Namespace,
|
|
},
|
|
// owners are intentionally left empty. The attemptToDeleteItem routine should get the latest item from the server.
|
|
owners: nil,
|
|
}
|
|
}
|
|
|
|
func TestAbsentOwnerCache(t *testing.T) {
|
|
rc1Pod1 := getPod("rc1Pod1", []metav1.OwnerReference{
|
|
{
|
|
Kind: "ReplicationController",
|
|
Name: "rc1",
|
|
UID: "1",
|
|
APIVersion: "v1",
|
|
Controller: pointer.BoolPtr(true),
|
|
},
|
|
})
|
|
rc1Pod2 := getPod("rc1Pod2", []metav1.OwnerReference{
|
|
{
|
|
Kind: "ReplicationController",
|
|
Name: "rc1",
|
|
UID: "1",
|
|
APIVersion: "v1",
|
|
Controller: pointer.BoolPtr(false),
|
|
},
|
|
})
|
|
rc2Pod1 := getPod("rc2Pod1", []metav1.OwnerReference{
|
|
{
|
|
Kind: "ReplicationController",
|
|
Name: "rc2",
|
|
UID: "2",
|
|
APIVersion: "v1",
|
|
},
|
|
})
|
|
rc3Pod1 := getPod("rc3Pod1", []metav1.OwnerReference{
|
|
{
|
|
Kind: "ReplicationController",
|
|
Name: "rc3",
|
|
UID: "3",
|
|
APIVersion: "v1",
|
|
},
|
|
})
|
|
testHandler := &fakeActionHandler{
|
|
response: map[string]FakeResponse{
|
|
"GET" + "/api/v1/namespaces/ns1/pods/rc1Pod1": {
|
|
200,
|
|
serilizeOrDie(t, rc1Pod1),
|
|
},
|
|
"GET" + "/api/v1/namespaces/ns1/pods/rc1Pod2": {
|
|
200,
|
|
serilizeOrDie(t, rc1Pod2),
|
|
},
|
|
"GET" + "/api/v1/namespaces/ns1/pods/rc2Pod1": {
|
|
200,
|
|
serilizeOrDie(t, rc2Pod1),
|
|
},
|
|
"GET" + "/api/v1/namespaces/ns1/pods/rc3Pod1": {
|
|
200,
|
|
serilizeOrDie(t, rc3Pod1),
|
|
},
|
|
"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc1": {
|
|
404,
|
|
[]byte{},
|
|
},
|
|
"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc2": {
|
|
404,
|
|
[]byte{},
|
|
},
|
|
"GET" + "/api/v1/namespaces/ns1/replicationcontrollers/rc3": {
|
|
404,
|
|
[]byte{},
|
|
},
|
|
},
|
|
}
|
|
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
|
|
defer srv.Close()
|
|
gc := setupGC(t, clientConfig)
|
|
defer close(gc.stop)
|
|
gc.absentOwnerCache = NewReferenceCache(2)
|
|
gc.attemptToDeleteItem(podToGCNode(rc1Pod1))
|
|
gc.attemptToDeleteItem(podToGCNode(rc2Pod1))
|
|
// rc1 should already be in the cache, no request should be sent. rc1 should be promoted in the UIDCache
|
|
gc.attemptToDeleteItem(podToGCNode(rc1Pod2))
|
|
// after this call, rc2 should be evicted from the UIDCache
|
|
gc.attemptToDeleteItem(podToGCNode(rc3Pod1))
|
|
// check cache
|
|
if !gc.absentOwnerCache.Has(objectReference{Namespace: "ns1", OwnerReference: metav1.OwnerReference{Kind: "ReplicationController", Name: "rc1", UID: "1", APIVersion: "v1"}}) {
|
|
t.Errorf("expected rc1 to be in the cache")
|
|
}
|
|
if gc.absentOwnerCache.Has(objectReference{Namespace: "ns1", OwnerReference: metav1.OwnerReference{Kind: "ReplicationController", Name: "rc2", UID: "2", APIVersion: "v1"}}) {
|
|
t.Errorf("expected rc2 to not exist in the cache")
|
|
}
|
|
if !gc.absentOwnerCache.Has(objectReference{Namespace: "ns1", OwnerReference: metav1.OwnerReference{Kind: "ReplicationController", Name: "rc3", UID: "3", APIVersion: "v1"}}) {
|
|
t.Errorf("expected rc3 to be in the cache")
|
|
}
|
|
// check the request sent to the server
|
|
count := 0
|
|
for _, action := range testHandler.actions {
|
|
if action.String() == "GET=/api/v1/namespaces/ns1/replicationcontrollers/rc1" {
|
|
count++
|
|
}
|
|
}
|
|
if count != 1 {
|
|
t.Errorf("expected only 1 GET rc1 request, got %d", count)
|
|
}
|
|
}
|
|
|
|
func TestDeleteOwnerRefPatch(t *testing.T) {
|
|
original := v1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
UID: "100",
|
|
OwnerReferences: []metav1.OwnerReference{
|
|
{UID: "1"},
|
|
{UID: "2"},
|
|
{UID: "3"},
|
|
},
|
|
},
|
|
}
|
|
originalData := serilizeOrDie(t, original)
|
|
expected := v1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
UID: "100",
|
|
OwnerReferences: []metav1.OwnerReference{
|
|
{UID: "1"},
|
|
},
|
|
},
|
|
}
|
|
patch := deleteOwnerRefStrategicMergePatch("100", "2", "3")
|
|
patched, err := strategicpatch.StrategicMergePatch(originalData, patch, v1.Pod{})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
var got v1.Pod
|
|
if err := json.Unmarshal(patched, &got); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual(expected, got) {
|
|
t.Errorf("expected: %#v,\ngot: %#v", expected, got)
|
|
}
|
|
}
|
|
|
|
func TestUnblockOwnerReference(t *testing.T) {
|
|
trueVar := true
|
|
falseVar := false
|
|
original := v1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
UID: "100",
|
|
OwnerReferences: []metav1.OwnerReference{
|
|
{UID: "1", BlockOwnerDeletion: &trueVar},
|
|
{UID: "2", BlockOwnerDeletion: &falseVar},
|
|
{UID: "3"},
|
|
},
|
|
},
|
|
}
|
|
originalData := serilizeOrDie(t, original)
|
|
expected := v1.Pod{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
UID: "100",
|
|
OwnerReferences: []metav1.OwnerReference{
|
|
{UID: "1", BlockOwnerDeletion: &falseVar},
|
|
{UID: "2", BlockOwnerDeletion: &falseVar},
|
|
{UID: "3"},
|
|
},
|
|
},
|
|
}
|
|
accessor, err := meta.Accessor(&original)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
n := node{
|
|
owners: accessor.GetOwnerReferences(),
|
|
}
|
|
patch, err := n.unblockOwnerReferencesStrategicMergePatch()
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
patched, err := strategicpatch.StrategicMergePatch(originalData, patch, v1.Pod{})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
var got v1.Pod
|
|
if err := json.Unmarshal(patched, &got); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !reflect.DeepEqual(expected, got) {
|
|
t.Errorf("expected: %#v,\ngot: %#v", expected, got)
|
|
t.Errorf("expected: %#v,\ngot: %#v", expected.OwnerReferences, got.OwnerReferences)
|
|
for _, ref := range got.OwnerReferences {
|
|
t.Errorf("ref.UID=%s, ref.BlockOwnerDeletion=%v", ref.UID, *ref.BlockOwnerDeletion)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestOrphanDependentsFailure(t *testing.T) {
|
|
testHandler := &fakeActionHandler{
|
|
response: map[string]FakeResponse{
|
|
"PATCH" + "/api/v1/namespaces/ns1/pods/pod": {
|
|
409,
|
|
[]byte{},
|
|
},
|
|
},
|
|
}
|
|
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
|
|
defer srv.Close()
|
|
|
|
gc := setupGC(t, clientConfig)
|
|
defer close(gc.stop)
|
|
|
|
dependents := []*node{
|
|
{
|
|
identity: objectReference{
|
|
OwnerReference: metav1.OwnerReference{
|
|
Kind: "Pod",
|
|
APIVersion: "v1",
|
|
Name: "pod",
|
|
},
|
|
Namespace: "ns1",
|
|
},
|
|
},
|
|
}
|
|
err := gc.orphanDependents(objectReference{}, dependents)
|
|
expected := `the server reported a conflict`
|
|
if err == nil || !strings.Contains(err.Error(), expected) {
|
|
if err != nil {
|
|
t.Errorf("expected error contains text %q, got %q", expected, err.Error())
|
|
} else {
|
|
t.Errorf("expected error contains text %q, got nil", expected)
|
|
}
|
|
}
|
|
}
|
|
|
|
// TestGetDeletableResources ensures GetDeletableResources always returns
|
|
// something usable regardless of discovery output.
|
|
func TestGetDeletableResources(t *testing.T) {
|
|
tests := map[string]struct {
|
|
serverResources []*metav1.APIResourceList
|
|
err error
|
|
deletableResources map[schema.GroupVersionResource]struct{}
|
|
}{
|
|
"no error": {
|
|
serverResources: []*metav1.APIResourceList{
|
|
{
|
|
// Valid GroupVersion
|
|
GroupVersion: "apps/v1",
|
|
APIResources: []metav1.APIResource{
|
|
{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
|
|
{Name: "services", Namespaced: true, Kind: "Service"},
|
|
},
|
|
},
|
|
{
|
|
// Invalid GroupVersion, should be ignored
|
|
GroupVersion: "foo//whatever",
|
|
APIResources: []metav1.APIResource{
|
|
{Name: "bars", Namespaced: true, Kind: "Bar", Verbs: metav1.Verbs{"delete", "list", "watch"}},
|
|
},
|
|
},
|
|
{
|
|
// Valid GroupVersion, missing required verbs, should be ignored
|
|
GroupVersion: "acme/v1",
|
|
APIResources: []metav1.APIResource{
|
|
{Name: "widgets", Namespaced: true, Kind: "Widget", Verbs: metav1.Verbs{"delete"}},
|
|
},
|
|
},
|
|
},
|
|
err: nil,
|
|
deletableResources: map[schema.GroupVersionResource]struct{}{
|
|
{Group: "apps", Version: "v1", Resource: "pods"}: {},
|
|
},
|
|
},
|
|
"nonspecific failure, includes usable results": {
|
|
serverResources: []*metav1.APIResourceList{
|
|
{
|
|
GroupVersion: "apps/v1",
|
|
APIResources: []metav1.APIResource{
|
|
{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
|
|
{Name: "services", Namespaced: true, Kind: "Service"},
|
|
},
|
|
},
|
|
},
|
|
err: fmt.Errorf("internal error"),
|
|
deletableResources: map[schema.GroupVersionResource]struct{}{
|
|
{Group: "apps", Version: "v1", Resource: "pods"}: {},
|
|
},
|
|
},
|
|
"partial discovery failure, includes usable results": {
|
|
serverResources: []*metav1.APIResourceList{
|
|
{
|
|
GroupVersion: "apps/v1",
|
|
APIResources: []metav1.APIResource{
|
|
{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
|
|
{Name: "services", Namespaced: true, Kind: "Service"},
|
|
},
|
|
},
|
|
},
|
|
err: &discovery.ErrGroupDiscoveryFailed{
|
|
Groups: map[schema.GroupVersion]error{
|
|
{Group: "foo", Version: "v1"}: fmt.Errorf("discovery failure"),
|
|
},
|
|
},
|
|
deletableResources: map[schema.GroupVersionResource]struct{}{
|
|
{Group: "apps", Version: "v1", Resource: "pods"}: {},
|
|
},
|
|
},
|
|
"discovery failure, no results": {
|
|
serverResources: nil,
|
|
err: fmt.Errorf("internal error"),
|
|
deletableResources: map[schema.GroupVersionResource]struct{}{},
|
|
},
|
|
}
|
|
|
|
for name, test := range tests {
|
|
t.Logf("testing %q", name)
|
|
client := &fakeServerResources{
|
|
PreferredResources: test.serverResources,
|
|
Error: test.err,
|
|
}
|
|
actual := GetDeletableResources(client)
|
|
if !reflect.DeepEqual(test.deletableResources, actual) {
|
|
t.Errorf("expected resources:\n%v\ngot:\n%v", test.deletableResources, actual)
|
|
}
|
|
}
|
|
}
|
|
|
|
// TestGarbageCollectorSync ensures that a discovery client error
|
|
// will not cause the garbage collector to block infinitely.
|
|
func TestGarbageCollectorSync(t *testing.T) {
|
|
serverResources := []*metav1.APIResourceList{
|
|
{
|
|
GroupVersion: "v1",
|
|
APIResources: []metav1.APIResource{
|
|
{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
|
|
},
|
|
},
|
|
}
|
|
unsyncableServerResources := []*metav1.APIResourceList{
|
|
{
|
|
GroupVersion: "v1",
|
|
APIResources: []metav1.APIResource{
|
|
{Name: "pods", Namespaced: true, Kind: "Pod", Verbs: metav1.Verbs{"delete", "list", "watch"}},
|
|
{Name: "secrets", Namespaced: true, Kind: "Secret", Verbs: metav1.Verbs{"delete", "list", "watch"}},
|
|
},
|
|
},
|
|
}
|
|
fakeDiscoveryClient := &fakeServerResources{
|
|
PreferredResources: serverResources,
|
|
Error: nil,
|
|
Lock: sync.Mutex{},
|
|
InterfaceUsedCount: 0,
|
|
}
|
|
|
|
testHandler := &fakeActionHandler{
|
|
response: map[string]FakeResponse{
|
|
"GET" + "/api/v1/pods": {
|
|
200,
|
|
[]byte("{}"),
|
|
},
|
|
"GET" + "/api/v1/secrets": {
|
|
404,
|
|
[]byte("{}"),
|
|
},
|
|
},
|
|
}
|
|
srv, clientConfig := testServerAndClientConfig(testHandler.ServeHTTP)
|
|
defer srv.Close()
|
|
clientConfig.ContentConfig.NegotiatedSerializer = nil
|
|
client, err := kubernetes.NewForConfig(clientConfig)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
rm := &testRESTMapper{testrestmapper.TestOnlyStaticRESTMapper(legacyscheme.Scheme)}
|
|
metadataClient, err := metadata.NewForConfig(clientConfig)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
sharedInformers := informers.NewSharedInformerFactory(client, 0)
|
|
alwaysStarted := make(chan struct{})
|
|
close(alwaysStarted)
|
|
gc, err := NewGarbageCollector(client, metadataClient, rm, map[schema.GroupResource]struct{}{}, sharedInformers, alwaysStarted)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
|
|
stopCh := make(chan struct{})
|
|
defer close(stopCh)
|
|
go gc.Run(1, stopCh)
|
|
// The pseudo-code of GarbageCollector.Sync():
|
|
// GarbageCollector.Sync(client, period, stopCh):
|
|
// wait.Until() loops with `period` until the `stopCh` is closed :
|
|
// wait.PollImmediateUntil() loops with 100ms (hardcode) util the `stopCh` is closed:
|
|
// GetDeletableResources()
|
|
// gc.resyncMonitors()
|
|
// cache.WaitForNamedCacheSync() loops with `syncedPollPeriod` (hardcoded to 100ms), until either its stop channel is closed after `period`, or all caches synced.
|
|
//
|
|
// Setting the period to 200ms allows the WaitForCacheSync() to check
|
|
// for cache sync ~2 times in every wait.PollImmediateUntil() loop.
|
|
//
|
|
// The 1s sleep in the test allows GetDelableResources and
|
|
// gc.resyncMoitors to run ~5 times to ensure the changes to the
|
|
// fakeDiscoveryClient are picked up.
|
|
go gc.Sync(fakeDiscoveryClient, 200*time.Millisecond, stopCh)
|
|
|
|
// Wait until the sync discovers the initial resources
|
|
time.Sleep(1 * time.Second)
|
|
|
|
err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
|
|
if err != nil {
|
|
t.Fatalf("Expected garbagecollector.Sync to be running but it is blocked: %v", err)
|
|
}
|
|
|
|
// Simulate the discovery client returning an error
|
|
fakeDiscoveryClient.setPreferredResources(nil)
|
|
fakeDiscoveryClient.setError(fmt.Errorf("error calling discoveryClient.ServerPreferredResources()"))
|
|
|
|
// Wait until sync discovers the change
|
|
time.Sleep(1 * time.Second)
|
|
|
|
// Remove the error from being returned and see if the garbage collector sync is still working
|
|
fakeDiscoveryClient.setPreferredResources(serverResources)
|
|
fakeDiscoveryClient.setError(nil)
|
|
|
|
err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
|
|
if err != nil {
|
|
t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err)
|
|
}
|
|
|
|
// Simulate the discovery client returning a resource the restmapper can resolve, but will not sync caches
|
|
fakeDiscoveryClient.setPreferredResources(unsyncableServerResources)
|
|
fakeDiscoveryClient.setError(nil)
|
|
|
|
// Wait until sync discovers the change
|
|
time.Sleep(1 * time.Second)
|
|
|
|
// Put the resources back to normal and ensure garbage collector sync recovers
|
|
fakeDiscoveryClient.setPreferredResources(serverResources)
|
|
fakeDiscoveryClient.setError(nil)
|
|
|
|
err = expectSyncNotBlocked(fakeDiscoveryClient, &gc.workerLock)
|
|
if err != nil {
|
|
t.Fatalf("Expected garbagecollector.Sync to still be running but it is blocked: %v", err)
|
|
}
|
|
}
|
|
|
|
func expectSyncNotBlocked(fakeDiscoveryClient *fakeServerResources, workerLock *sync.RWMutex) error {
|
|
before := fakeDiscoveryClient.getInterfaceUsedCount()
|
|
t := 1 * time.Second
|
|
time.Sleep(t)
|
|
after := fakeDiscoveryClient.getInterfaceUsedCount()
|
|
if before == after {
|
|
return fmt.Errorf("discoveryClient.ServerPreferredResources() called %d times over %v", after-before, t)
|
|
}
|
|
|
|
workerLockAcquired := make(chan struct{})
|
|
go func() {
|
|
workerLock.Lock()
|
|
defer workerLock.Unlock()
|
|
close(workerLockAcquired)
|
|
}()
|
|
select {
|
|
case <-workerLockAcquired:
|
|
return nil
|
|
case <-time.After(t):
|
|
return fmt.Errorf("workerLock blocked for at least %v", t)
|
|
}
|
|
}
|
|
|
|
type fakeServerResources struct {
|
|
PreferredResources []*metav1.APIResourceList
|
|
Error error
|
|
Lock sync.Mutex
|
|
InterfaceUsedCount int
|
|
}
|
|
|
|
func (*fakeServerResources) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) {
|
|
return nil, nil
|
|
}
|
|
|
|
// Deprecated: use ServerGroupsAndResources instead.
|
|
func (*fakeServerResources) ServerResources() ([]*metav1.APIResourceList, error) {
|
|
return nil, nil
|
|
}
|
|
|
|
func (*fakeServerResources) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) {
|
|
return nil, nil, nil
|
|
}
|
|
|
|
func (f *fakeServerResources) ServerPreferredResources() ([]*metav1.APIResourceList, error) {
|
|
f.Lock.Lock()
|
|
defer f.Lock.Unlock()
|
|
f.InterfaceUsedCount++
|
|
return f.PreferredResources, f.Error
|
|
}
|
|
|
|
func (f *fakeServerResources) setPreferredResources(resources []*metav1.APIResourceList) {
|
|
f.Lock.Lock()
|
|
defer f.Lock.Unlock()
|
|
f.PreferredResources = resources
|
|
}
|
|
|
|
func (f *fakeServerResources) setError(err error) {
|
|
f.Lock.Lock()
|
|
defer f.Lock.Unlock()
|
|
f.Error = err
|
|
}
|
|
|
|
func (f *fakeServerResources) getInterfaceUsedCount() int {
|
|
f.Lock.Lock()
|
|
defer f.Lock.Unlock()
|
|
return f.InterfaceUsedCount
|
|
}
|
|
|
|
func (*fakeServerResources) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) {
|
|
return nil, nil
|
|
}
|