From e44fb7333e7adc1626268b480bbaa2754975deb1 Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Thu, 23 May 2019 00:00:35 -0700 Subject: [PATCH 1/4] Define Alpha feature gate for Load Balancer finalizer --- pkg/features/kube_features.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index c70e965c158..b243834b25f 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -453,6 +453,12 @@ const ( // Enables the regional PD feature on GCE. deprecatedGCERegionalPersistentDisk featuregate.Feature = "GCERegionalPersistentDisk" + // owner: @MrHohn + // alpha: v1.15 + // + // Enables Finalizer Protection for Service LoadBalancers. + ServiceLoadBalancerFinalizer featuregate.Feature = "ServiceLoadBalancerFinalizer" + // owner: @RobertKrawitz // alpha: v1.15 // @@ -535,6 +541,7 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS TTLAfterFinished: {Default: false, PreRelease: featuregate.Alpha}, KubeletPodResources: {Default: false, PreRelease: featuregate.Alpha}, WindowsGMSA: {Default: false, PreRelease: featuregate.Alpha}, + ServiceLoadBalancerFinalizer: {Default: false, PreRelease: featuregate.Alpha}, LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha}, // inherited features from generic apiserver, relisted here to get a conflict if it is changed From aa3f81d657c1d4b7c789352af7179c08508adc3c Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Thu, 23 May 2019 00:01:08 -0700 Subject: [PATCH 2/4] Add Load Balancer finalizer support - Always try to remove finalizer upon load balancer cleanup - Add finalizer prior to load balancer creation (feature gated) - Cache logic fix-ups - Event type/message fix-ups - Use runtime.HandleError() on eaten errors Co-authored-by: Josh Horwitz --- pkg/controller/service/service_controller.go | 247 ++++++++++++------ .../cloud-provider/service/helpers/helper.go | 17 +- 2 files changed, 180 insertions(+), 84 deletions(-) diff --git a/pkg/controller/service/service_controller.go b/pkg/controller/service/service_controller.go index 68fe82de0f6..87839dcf0dd 100644 --- a/pkg/controller/service/service_controller.go +++ b/pkg/controller/service/service_controller.go @@ -39,11 +39,13 @@ import ( "k8s.io/client-go/tools/record" "k8s.io/client-go/util/workqueue" cloudprovider "k8s.io/cloud-provider" + servicehelper "k8s.io/cloud-provider/service/helpers" "k8s.io/klog" v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" "k8s.io/kubernetes/pkg/controller" kubefeatures "k8s.io/kubernetes/pkg/features" "k8s.io/kubernetes/pkg/util/metrics" + "k8s.io/kubernetes/pkg/util/slice" ) const ( @@ -135,15 +137,28 @@ func New( serviceInformer.Informer().AddEventHandlerWithResyncPeriod( cache.ResourceEventHandlerFuncs{ - AddFunc: s.enqueueService, - UpdateFunc: func(old, cur interface{}) { - oldSvc, ok1 := old.(*v1.Service) - curSvc, ok2 := cur.(*v1.Service) - if ok1 && ok2 && s.needsUpdate(oldSvc, curSvc) { + AddFunc: func(cur interface{}) { + svc, ok := cur.(*v1.Service) + if ok && (wantsLoadBalancer(svc) || needsCleanup(svc)) { s.enqueueService(cur) } }, - DeleteFunc: s.enqueueService, + UpdateFunc: func(old, cur interface{}) { + oldSvc, ok1 := old.(*v1.Service) + curSvc, ok2 := cur.(*v1.Service) + if ok1 && ok2 && (s.needsUpdate(oldSvc, curSvc) || needsCleanup(curSvc)) { + s.enqueueService(cur) + } + }, + DeleteFunc: func(old interface{}) { + if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.ServiceLoadBalancerFinalizer) { + // No need to handle deletion event if finalizer feature gate is + // enabled. Because the deletion would be handled by the update + // path when the deletion timestamp is added. + return + } + s.enqueueService(old) + }, }, serviceSyncPeriod, ) @@ -160,7 +175,7 @@ func New( func (s *ServiceController) enqueueService(obj interface{}) { key, err := controller.KeyFunc(obj) if err != nil { - klog.Errorf("Couldn't get key for object %#v: %v", obj, err) + runtime.HandleError(fmt.Errorf("couldn't get key for object %#v: %v", obj, err)) return } s.queue.Add(key) @@ -235,95 +250,112 @@ func (s *ServiceController) init() error { return nil } -// processServiceUpdate operates loadbalancers for the incoming service accordingly. +// processServiceCreateOrUpdate operates loadbalancers for the incoming service accordingly. // Returns an error if processing the service update failed. -func (s *ServiceController) processServiceUpdate(cachedService *cachedService, service *v1.Service, key string) error { - if cachedService.state != nil { - if cachedService.state.UID != service.UID { - err := s.processLoadBalancerDelete(cachedService, key) - if err != nil { - return err - } +func (s *ServiceController) processServiceCreateOrUpdate(service *v1.Service, key string) error { + // TODO(@MrHohn): Remove the cache once we get rid of the non-finalizer deletion + // path. Ref https://github.com/kubernetes/enhancements/issues/980. + cachedService := s.cache.getOrCreate(key) + if cachedService.state != nil && cachedService.state.UID != service.UID { + // This happens only when a service is deleted and re-created + // in a short period, which is only possible when it doesn't + // contain finalizer. + if err := s.processLoadBalancerDelete(cachedService.state, key); err != nil { + return err } } - // cache the service, we need the info for service deletion + // Always cache the service, we need the info for service deletion in case + // when load balancer cleanup is not handled via finalizer. cachedService.state = service - err := s.syncLoadBalancerIfNeeded(key, service) + op, err := s.syncLoadBalancerIfNeeded(service, key) if err != nil { - eventType := "CreatingLoadBalancerFailed" - message := "Error creating load balancer (will retry): " - if !wantsLoadBalancer(service) { - eventType = "CleanupLoadBalancerFailed" - message = "Error cleaning up load balancer (will retry): " - } - message += err.Error() - s.eventRecorder.Event(service, v1.EventTypeWarning, eventType, message) + s.eventRecorder.Eventf(service, v1.EventTypeWarning, "SyncLoadBalancerFailed", "Error syncing load balancer: %v", err) return err } - // Always update the cache upon success. - // NOTE: Since we update the cached service if and only if we successfully - // processed it, a cached service being nil implies that it hasn't yet - // been successfully processed. - s.cache.set(key, cachedService) + if op == deleteLoadBalancer { + // Only delete the cache upon successful load balancer deletion. + s.cache.delete(key) + } return nil } +type loadBalancerOperation int + +const ( + deleteLoadBalancer loadBalancerOperation = iota + ensureLoadBalancer +) + // syncLoadBalancerIfNeeded ensures that service's status is synced up with loadbalancer // i.e. creates loadbalancer for service if requested and deletes loadbalancer if the service // doesn't want a loadbalancer no more. Returns whatever error occurred. -func (s *ServiceController) syncLoadBalancerIfNeeded(key string, service *v1.Service) error { +func (s *ServiceController) syncLoadBalancerIfNeeded(service *v1.Service, key string) (loadBalancerOperation, error) { // Note: It is safe to just call EnsureLoadBalancer. But, on some clouds that requires a delete & create, // which may involve service interruption. Also, we would like user-friendly events. // Save the state so we can avoid a write if it doesn't change - previousState := v1helper.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer) - var newState *v1.LoadBalancerStatus + previousStatus := v1helper.LoadBalancerStatusDeepCopy(&service.Status.LoadBalancer) + var newStatus *v1.LoadBalancerStatus + var op loadBalancerOperation var err error - if !wantsLoadBalancer(service) { + if !wantsLoadBalancer(service) || needsCleanup(service) { + // Delete the load balancer if service no longer wants one, or if service needs cleanup. + op = deleteLoadBalancer + newStatus = &v1.LoadBalancerStatus{} _, exists, err := s.balancer.GetLoadBalancer(context.TODO(), s.clusterName, service) if err != nil { - return fmt.Errorf("error getting LB for service %s: %v", key, err) + return op, fmt.Errorf("failed to check if load balancer exists before cleanup: %v", err) } if exists { - klog.Infof("Deleting existing load balancer for service %s that no longer needs a load balancer.", key) + klog.V(2).Infof("Deleting existing load balancer for service %s", key) s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer") if err := s.balancer.EnsureLoadBalancerDeleted(context.TODO(), s.clusterName, service); err != nil { - return err + return op, fmt.Errorf("failed to delete load balancer: %v", err) } - s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer") } - - newState = &v1.LoadBalancerStatus{} + // Always try to remove finalizer when load balancer is deleted. + // It will be a no-op if finalizer does not exist. + // Note this also clears up finalizer if the cluster is downgraded + // from a version that attaches finalizer to a version that doesn't. + if err := s.removeFinalizer(service); err != nil { + return op, fmt.Errorf("failed to remove load balancer cleanup finalizer: %v", err) + } + s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer") } else { - klog.V(2).Infof("Ensuring LB for service %s", key) - - // TODO: We could do a dry-run here if wanted to avoid the spurious cloud-calls & events when we restart - + // Create or update the load balancer if service wants one. + op = ensureLoadBalancer + klog.V(2).Infof("Ensuring load balancer for service %s", key) s.eventRecorder.Event(service, v1.EventTypeNormal, "EnsuringLoadBalancer", "Ensuring load balancer") - newState, err = s.ensureLoadBalancer(service) + if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.ServiceLoadBalancerFinalizer) { + // Always try to add finalizer prior to load balancer creation. + // It will be a no-op if finalizer already exists. + // Note this also retrospectively puts on finalizer if the cluster + // is upgraded from a version that doesn't attach finalizer to a + // version that does. + if err := s.addFinalizer(service); err != nil { + return op, fmt.Errorf("failed to add load balancer cleanup finalizer: %v", err) + } + } + newStatus, err = s.ensureLoadBalancer(service) if err != nil { - return fmt.Errorf("failed to ensure load balancer for service %s: %v", key, err) + return op, fmt.Errorf("failed to ensure load balancer: %v", err) } s.eventRecorder.Event(service, v1.EventTypeNormal, "EnsuredLoadBalancer", "Ensured load balancer") } - // If there are any changes to the status then patch the service. - if !v1helper.LoadBalancerStatusEqual(previousState, newState) { - // Make a copy so we don't mutate the shared informer cache - updated := service.DeepCopy() - updated.Status.LoadBalancer = *newState - - if _, err := patch(s.kubeClient.CoreV1(), service, updated); err != nil { - return fmt.Errorf("failed to patch status for service %s: %v", key, err) + if err := s.patchStatus(service, previousStatus, newStatus); err != nil { + // Only retry error that isn't not found: + // - Not found error mostly happens when service disappears right after + // we remove the finalizer. + // - We can't patch status on non-exist service anyway. + if !errors.IsNotFound(err) { + return op, fmt.Errorf("failed to update load balancer status: %v", err) } - klog.V(4).Infof("Successfully patched status for service %s", key) - } else { - klog.V(4).Infof("Not persisting unchanged LoadBalancerStatus for service %s to registry.", key) } - return nil + return op, nil } func (s *ServiceController) ensureLoadBalancer(service *v1.Service) (*v1.LoadBalancerStatus, error) { @@ -334,7 +366,7 @@ func (s *ServiceController) ensureLoadBalancer(service *v1.Service) (*v1.LoadBal // If there are no available nodes for LoadBalancer service, make a EventTypeWarning event for it. if len(nodes) == 0 { - s.eventRecorder.Eventf(service, v1.EventTypeWarning, "UnAvailableLoadBalancer", "There are no available nodes for LoadBalancer service %s/%s", service.Namespace, service.Name) + s.eventRecorder.Event(service, v1.EventTypeWarning, "UnAvailableLoadBalancer", "There are no available nodes for LoadBalancer") } // - Only one protocol supported per service @@ -407,6 +439,12 @@ func (s *serviceCache) delete(serviceName string) { delete(s.serviceMap, serviceName) } +// needsCleanup checks if load balancer needs to be cleaned up as indicated by finalizer. +func needsCleanup(service *v1.Service) bool { + return service.ObjectMeta.DeletionTimestamp != nil && servicehelper.HasLBFinalizer(service) +} + +// needsUpdate checks if load balancer needs to be updated due to change in attributes. func (s *ServiceController) needsUpdate(oldService *v1.Service, newService *v1.Service) bool { if !wantsLoadBalancer(oldService) && !wantsLoadBalancer(newService) { return false @@ -592,7 +630,7 @@ func getNodeConditionPredicate() corelisters.NodeConditionPredicate { func (s *ServiceController) nodeSyncLoop() { newHosts, err := s.nodeLister.ListWithPredicate(getNodeConditionPredicate()) if err != nil { - klog.Errorf("Failed to retrieve current set of nodes from node lister: %v", err) + runtime.HandleError(fmt.Errorf("Failed to retrieve current set of nodes from node lister: %v", err)) return } if nodeSlicesEqualForLB(newHosts, s.knownHosts) { @@ -602,7 +640,7 @@ func (s *ServiceController) nodeSyncLoop() { return } - klog.Infof("Detected change in list of current cluster nodes. New node set: %v", + klog.V(2).Infof("Detected change in list of current cluster nodes. New node set: %v", nodeNames(newHosts)) // Try updating all services, and save the ones that fail to try again next @@ -610,7 +648,7 @@ func (s *ServiceController) nodeSyncLoop() { s.servicesToUpdate = s.cache.allServices() numServices := len(s.servicesToUpdate) s.servicesToUpdate = s.updateLoadBalancerHosts(s.servicesToUpdate, newHosts) - klog.Infof("Successfully updated %d out of %d load balancers to direct traffic to the updated set of nodes", + klog.V(2).Infof("Successfully updated %d out of %d load balancers to direct traffic to the updated set of nodes", numServices-len(s.servicesToUpdate), numServices) s.knownHosts = newHosts @@ -626,7 +664,7 @@ func (s *ServiceController) updateLoadBalancerHosts(services []*v1.Service, host return } if err := s.lockedUpdateLoadBalancerHosts(service, hosts); err != nil { - klog.Errorf("External error while updating load balancer: %v.", err) + runtime.HandleError(fmt.Errorf("failed to update load balancer hosts for service %s/%s: %v", service.Namespace, service.Name, err)) servicesToRetry = append(servicesToRetry, service) } }() @@ -646,7 +684,7 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *v1.Service, h if err == nil { // If there are no available nodes for LoadBalancer service, make a EventTypeWarning event for it. if len(hosts) == 0 { - s.eventRecorder.Eventf(service, v1.EventTypeWarning, "UnAvailableLoadBalancer", "There are no available nodes for LoadBalancer service %s/%s", service.Namespace, service.Name) + s.eventRecorder.Event(service, v1.EventTypeWarning, "UnAvailableLoadBalancer", "There are no available nodes for LoadBalancer") } else { s.eventRecorder.Event(service, v1.EventTypeNormal, "UpdatedLoadBalancer", "Updated load balancer with new hosts") } @@ -655,12 +693,12 @@ func (s *ServiceController) lockedUpdateLoadBalancerHosts(service *v1.Service, h // It's only an actual error if the load balancer still exists. if _, exists, err := s.balancer.GetLoadBalancer(context.TODO(), s.clusterName, service); err != nil { - klog.Errorf("External error while checking if load balancer %q exists: name, %v", s.balancer.GetLoadBalancerName(context.TODO(), s.clusterName, service), err) + runtime.HandleError(fmt.Errorf("failed to check if load balancer exists for service %s/%s: %v", service.Namespace, service.Name, err)) } else if !exists { return nil } - s.eventRecorder.Eventf(service, v1.EventTypeWarning, "LoadBalancerUpdateFailed", "Error updating load balancer with new hosts %v: %v", nodeNames(hosts), err) + s.eventRecorder.Eventf(service, v1.EventTypeWarning, "UpdateLoadBalancerFailed", "Error updating load balancer with new hosts %v: %v", nodeNames(hosts), err) return err } @@ -677,7 +715,6 @@ func loadBalancerIPsAreEqual(oldService, newService *v1.Service) bool { // invoked concurrently with the same key. func (s *ServiceController) syncService(key string) error { startTime := time.Now() - var cachedService *cachedService defer func() { klog.V(4).Infof("Finished syncing service %q (%v)", key, time.Since(startTime)) }() @@ -692,44 +729,88 @@ func (s *ServiceController) syncService(key string) error { switch { case errors.IsNotFound(err): // service absence in store means watcher caught the deletion, ensure LB info is cleaned - klog.Infof("Service has been deleted %v. Attempting to cleanup load balancer resources", key) err = s.processServiceDeletion(key) case err != nil: - klog.Infof("Unable to retrieve service %v from store: %v", key, err) + runtime.HandleError(fmt.Errorf("Unable to retrieve service %v from store: %v", key, err)) default: - cachedService = s.cache.getOrCreate(key) - err = s.processServiceUpdate(cachedService, service, key) + err = s.processServiceCreateOrUpdate(service, key) } return err } -// Returns an error if processing the service deletion failed, along with a time.Duration -// indicating whether processing should be retried; zero means no-retry; otherwise -// we should retry after that Duration. func (s *ServiceController) processServiceDeletion(key string) error { cachedService, ok := s.cache.get(key) if !ok { - klog.Errorf("service %s not in cache even though the watcher thought it was. Ignoring the deletion", key) + // Cache does not contains the key means: + // - We didn't create a Load Balancer for the deleted service at all. + // - We already deleted the Load Balancer that was created for the service. + // In both cases we have nothing left to do. return nil } - return s.processLoadBalancerDelete(cachedService, key) + klog.V(2).Infof("Service %v has been deleted. Attempting to cleanup load balancer resources", key) + if err := s.processLoadBalancerDelete(cachedService.state, key); err != nil { + return err + } + s.cache.delete(key) + return nil } -func (s *ServiceController) processLoadBalancerDelete(cachedService *cachedService, key string) error { - service := cachedService.state +func (s *ServiceController) processLoadBalancerDelete(service *v1.Service, key string) error { // delete load balancer info only if the service type is LoadBalancer if !wantsLoadBalancer(service) { return nil } s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletingLoadBalancer", "Deleting load balancer") - err := s.balancer.EnsureLoadBalancerDeleted(context.TODO(), s.clusterName, service) - if err != nil { - s.eventRecorder.Eventf(service, v1.EventTypeWarning, "DeletingLoadBalancerFailed", "Error deleting load balancer (will retry): %v", err) + if err := s.balancer.EnsureLoadBalancerDeleted(context.TODO(), s.clusterName, service); err != nil { + s.eventRecorder.Eventf(service, v1.EventTypeWarning, "DeleteLoadBalancerFailed", "Error deleting load balancer: %v", err) return err } s.eventRecorder.Event(service, v1.EventTypeNormal, "DeletedLoadBalancer", "Deleted load balancer") - s.cache.delete(key) - return nil } + +// addFinalizer patches the service to add finalizer. +func (s *ServiceController) addFinalizer(service *v1.Service) error { + if servicehelper.HasLBFinalizer(service) { + return nil + } + + // Make a copy so we don't mutate the shared informer cache. + updated := service.DeepCopy() + updated.ObjectMeta.Finalizers = append(updated.ObjectMeta.Finalizers, servicehelper.LoadBalancerCleanupFinalizer) + + klog.V(2).Infof("Adding finalizer to service %s/%s", updated.Namespace, updated.Name) + _, err := patch(s.kubeClient.CoreV1(), service, updated) + return err +} + +// removeFinalizer patches the service to remove finalizer. +func (s *ServiceController) removeFinalizer(service *v1.Service) error { + if !servicehelper.HasLBFinalizer(service) { + return nil + } + + // Make a copy so we don't mutate the shared informer cache. + updated := service.DeepCopy() + updated.ObjectMeta.Finalizers = slice.RemoveString(updated.ObjectMeta.Finalizers, servicehelper.LoadBalancerCleanupFinalizer, nil) + + klog.V(2).Infof("Removing finalizer from service %s/%s", updated.Namespace, updated.Name) + _, err := patch(s.kubeClient.CoreV1(), service, updated) + return err +} + +// patchStatus patches the service with the given LoadBalancerStatus. +func (s *ServiceController) patchStatus(service *v1.Service, previousStatus, newStatus *v1.LoadBalancerStatus) error { + if v1helper.LoadBalancerStatusEqual(previousStatus, newStatus) { + return nil + } + + // Make a copy so we don't mutate the shared informer cache. + updated := service.DeepCopy() + updated.Status.LoadBalancer = *newStatus + + klog.V(2).Infof("Patching status for service %s/%s", updated.Namespace, updated.Name) + _, err := patch(s.kubeClient.CoreV1(), service, updated) + return err +} diff --git a/staging/src/k8s.io/cloud-provider/service/helpers/helper.go b/staging/src/k8s.io/cloud-provider/service/helpers/helper.go index 680ad3935dd..524880d0d13 100644 --- a/staging/src/k8s.io/cloud-provider/service/helpers/helper.go +++ b/staging/src/k8s.io/cloud-provider/service/helpers/helper.go @@ -20,7 +20,7 @@ import ( "fmt" "strings" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" utilnet "k8s.io/utils/net" ) @@ -31,6 +31,11 @@ in order for in-tree cloud providers to not depend on internal packages. const ( defaultLoadBalancerSourceRanges = "0.0.0.0/0" + + // LoadBalancerCleanupFinalizer is the finalizer added to load balancer + // services to ensure the Service resource is not fully deleted until + // the correlating load balancer resources are deleted. + LoadBalancerCleanupFinalizer = "service.kubernetes.io/load-balancer-cleanup" ) // IsAllowAll checks whether the utilnet.IPNet allows traffic from 0.0.0.0/0 @@ -100,3 +105,13 @@ func NeedsHealthCheck(service *v1.Service) bool { } return RequestsOnlyLocalTraffic(service) } + +// HasLBFinalizer checks if service contains LoadBalancerCleanupFinalizer. +func HasLBFinalizer(service *v1.Service) bool { + for _, finalizer := range service.ObjectMeta.Finalizers { + if finalizer == LoadBalancerCleanupFinalizer { + return true + } + } + return false +} From 2ab3681648326372fbfbcc65d3b9bae8b7753a90 Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Thu, 23 May 2019 00:01:37 -0700 Subject: [PATCH 3/4] Fix-up and add unit test for Load Balancer finalizer --- .../service/service_controller_test.go | 574 ++++++++++++++++-- .../service/helpers/helper_test.go | 52 +- 2 files changed, 562 insertions(+), 64 deletions(-) diff --git a/pkg/controller/service/service_controller_test.go b/pkg/controller/service/service_controller_test.go index c8bb1eb2302..c578879be82 100644 --- a/pkg/controller/service/service_controller_test.go +++ b/pkg/controller/service/service_controller_test.go @@ -22,19 +22,24 @@ import ( "reflect" "strings" "testing" + "time" v1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" core "k8s.io/client-go/testing" "k8s.io/client-go/tools/record" fakecloud "k8s.io/cloud-provider/fake" + servicehelper "k8s.io/cloud-provider/service/helpers" + featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/kubernetes/pkg/api/testapi" "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/features" ) const region = "us-central" @@ -82,13 +87,22 @@ func newController() (*ServiceController, *fakecloud.Cloud, *fake.Clientset) { return controller, cloud, client } -func TestCreateExternalLoadBalancer(t *testing.T) { - table := []struct { - service *v1.Service - expectErr bool - expectCreateAttempt bool +// TODO(@MrHohn): Verify the end state when below issue is resolved: +// https://github.com/kubernetes/client-go/issues/607 +func TestSyncLoadBalancerIfNeeded(t *testing.T) { + testCases := []struct { + desc string + enableFeatureGate bool + service *v1.Service + lbExists bool + expectOp loadBalancerOperation + expectCreateAttempt bool + expectDeleteAttempt bool + expectPatchStatus bool + expectPatchFinalizer bool }{ { + desc: "service doesn't want LB", service: &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "no-external-balancer", @@ -98,10 +112,34 @@ func TestCreateExternalLoadBalancer(t *testing.T) { Type: v1.ServiceTypeClusterIP, }, }, - expectErr: false, - expectCreateAttempt: false, + expectOp: deleteLoadBalancer, + expectPatchStatus: false, }, { + desc: "service no longer wants LB", + service: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no-external-balancer", + Namespace: "default", + }, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeClusterIP, + }, + Status: v1.ServiceStatus{ + LoadBalancer: v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + {IP: "8.8.8.8"}, + }, + }, + }, + }, + lbExists: true, + expectOp: deleteLoadBalancer, + expectDeleteAttempt: true, + expectPatchStatus: true, + }, + { + desc: "udp service that wants LB", service: &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "udp-service", @@ -116,10 +154,12 @@ func TestCreateExternalLoadBalancer(t *testing.T) { Type: v1.ServiceTypeLoadBalancer, }, }, - expectErr: false, + expectOp: ensureLoadBalancer, expectCreateAttempt: true, + expectPatchStatus: true, }, { + desc: "tcp service that wants LB", service: &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "basic-service1", @@ -134,10 +174,12 @@ func TestCreateExternalLoadBalancer(t *testing.T) { Type: v1.ServiceTypeLoadBalancer, }, }, - expectErr: false, + expectOp: ensureLoadBalancer, expectCreateAttempt: true, + expectPatchStatus: true, }, { + desc: "sctp service that wants LB", service: &v1.Service{ ObjectMeta: metav1.ObjectMeta{ Name: "sctp-service", @@ -152,61 +194,206 @@ func TestCreateExternalLoadBalancer(t *testing.T) { Type: v1.ServiceTypeLoadBalancer, }, }, - expectErr: false, + expectOp: ensureLoadBalancer, expectCreateAttempt: true, + expectPatchStatus: true, + }, + // Finalizer test cases below. + { + desc: "service with finalizer that no longer wants LB", + service: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "no-external-balancer", + Namespace: "default", + Finalizers: []string{servicehelper.LoadBalancerCleanupFinalizer}, + }, + Spec: v1.ServiceSpec{ + Type: v1.ServiceTypeClusterIP, + }, + Status: v1.ServiceStatus{ + LoadBalancer: v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + {IP: "8.8.8.8"}, + }, + }, + }, + }, + lbExists: true, + expectOp: deleteLoadBalancer, + expectDeleteAttempt: true, + expectPatchStatus: true, + expectPatchFinalizer: true, + }, + { + desc: "service that needs cleanup", + service: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "basic-service1", + Namespace: "default", + SelfLink: testapi.Default.SelfLink("services", "basic-service1"), + DeletionTimestamp: &metav1.Time{ + Time: time.Now(), + }, + Finalizers: []string{servicehelper.LoadBalancerCleanupFinalizer}, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Port: 80, + Protocol: v1.ProtocolTCP, + }}, + Type: v1.ServiceTypeLoadBalancer, + }, + Status: v1.ServiceStatus{ + LoadBalancer: v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + {IP: "8.8.8.8"}, + }, + }, + }, + }, + lbExists: true, + expectOp: deleteLoadBalancer, + expectDeleteAttempt: true, + expectPatchStatus: true, + expectPatchFinalizer: true, + }, + { + desc: "service without finalizer that wants LB", + enableFeatureGate: true, + service: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "basic-service1", + Namespace: "default", + SelfLink: testapi.Default.SelfLink("services", "basic-service1"), + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Port: 80, + Protocol: v1.ProtocolTCP, + }}, + Type: v1.ServiceTypeLoadBalancer, + }, + }, + expectOp: ensureLoadBalancer, + expectCreateAttempt: true, + expectPatchStatus: true, + expectPatchFinalizer: true, + }, + { + desc: "service with finalizer that wants LB", + enableFeatureGate: true, + service: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "basic-service1", + Namespace: "default", + SelfLink: testapi.Default.SelfLink("services", "basic-service1"), + Finalizers: []string{servicehelper.LoadBalancerCleanupFinalizer}, + }, + Spec: v1.ServiceSpec{ + Ports: []v1.ServicePort{{ + Port: 80, + Protocol: v1.ProtocolTCP, + }}, + Type: v1.ServiceTypeLoadBalancer, + }, + }, + expectOp: ensureLoadBalancer, + expectCreateAttempt: true, + expectPatchStatus: true, + expectPatchFinalizer: false, }, } - for _, item := range table { - controller, cloud, client := newController() - key := fmt.Sprintf("%s/%s", item.service.Namespace, item.service.Name) - if _, err := client.CoreV1().Services(item.service.Namespace).Create(item.service); err != nil { - t.Errorf("Failed to prepare service %s for testing: %v", key, err) - continue - } - client.ClearActions() - err := controller.syncLoadBalancerIfNeeded(key, item.service) - if !item.expectErr && err != nil { - t.Errorf("unexpected error: %v", err) - } else if item.expectErr && err == nil { - t.Errorf("expected error creating %v, got nil", item.service) - } - actions := client.Actions() - if !item.expectCreateAttempt { - if len(cloud.Calls) > 0 { - t.Errorf("unexpected cloud provider calls: %v", cloud.Calls) + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ServiceLoadBalancerFinalizer, tc.enableFeatureGate)() + + controller, cloud, client := newController() + cloud.Exists = tc.lbExists + key := fmt.Sprintf("%s/%s", tc.service.Namespace, tc.service.Name) + if _, err := client.CoreV1().Services(tc.service.Namespace).Create(tc.service); err != nil { + t.Fatalf("Failed to prepare service %s for testing: %v", key, err) } - if len(actions) > 0 { - t.Errorf("unexpected client actions: %v", actions) + client.ClearActions() + + op, err := controller.syncLoadBalancerIfNeeded(tc.service, key) + if err != nil { + t.Errorf("Got error: %v, want nil", err) } - } else { - var balancer *fakecloud.Balancer - for k := range cloud.Balancers { + if op != tc.expectOp { + t.Errorf("Got operation %v, want %v", op, tc.expectOp) + } + // Capture actions from test so it won't be messed up. + actions := client.Actions() + + if !tc.expectCreateAttempt && !tc.expectDeleteAttempt { + if len(cloud.Calls) > 0 { + t.Errorf("Unexpected cloud provider calls: %v", cloud.Calls) + } + if len(actions) > 0 { + t.Errorf("Unexpected client actions: %v", actions) + } + return + } + + if tc.expectCreateAttempt { + createCallFound := false + for _, call := range cloud.Calls { + if call == "create" { + createCallFound = true + } + } + if !createCallFound { + t.Errorf("Got no create call for load balancer, expected one") + } + // TODO(@MrHohn): Clean up the awkward pattern here. + var balancer *fakecloud.Balancer + for k := range cloud.Balancers { + if balancer == nil { + b := cloud.Balancers[k] + balancer = &b + } else { + t.Errorf("Got load balancer %v, expected one to be created", cloud.Balancers) + break + } + } if balancer == nil { - b := cloud.Balancers[k] - balancer = &b - } else { - t.Errorf("expected one load balancer to be created, got %v", cloud.Balancers) - break + t.Errorf("Got no load balancer, expected one to be created") + } else if balancer.Name != controller.loadBalancerName(tc.service) || + balancer.Region != region || + balancer.Ports[0].Port != tc.service.Spec.Ports[0].Port { + t.Errorf("Created load balancer has incorrect parameters: %v", balancer) } } - if balancer == nil { - t.Errorf("expected one load balancer to be created, got none") - } else if balancer.Name != controller.loadBalancerName(item.service) || - balancer.Region != region || - balancer.Ports[0].Port != item.service.Spec.Ports[0].Port { - t.Errorf("created load balancer has incorrect parameters: %v", balancer) + if tc.expectDeleteAttempt { + deleteCallFound := false + for _, call := range cloud.Calls { + if call == "delete" { + deleteCallFound = true + } + } + if !deleteCallFound { + t.Errorf("Got no delete call for load balancer, expected one") + } } - actionFound := false + + expectNumPatches := 0 + if tc.expectPatchStatus { + expectNumPatches++ + } + if tc.expectPatchFinalizer { + expectNumPatches++ + } + numPatches := 0 for _, action := range actions { - if action.GetVerb() == "patch" && action.GetResource().Resource == "services" { - actionFound = true + if action.Matches("patch", "services") { + numPatches++ } } - if !actionFound { - t.Errorf("expected patch service to be sent to client, got these actions instead: %v", actions) + if numPatches != expectNumPatches { + t.Errorf("Expected %d patches, got %d instead. Actions: %v", numPatches, expectNumPatches, actions) } - } + }) } } @@ -339,7 +526,7 @@ func TestGetNodeConditionPredicate(t *testing.T) { } } -func TestProcessServiceUpdate(t *testing.T) { +func TestProcessServiceCreateOrUpdate(t *testing.T) { controller, _, client := newController() //A pair of old and new loadbalancer IP address @@ -420,19 +607,18 @@ func TestProcessServiceUpdate(t *testing.T) { if _, err := client.CoreV1().Services(tc.svc.Namespace).Create(tc.svc); err != nil { t.Fatalf("Failed to prepare service %s for testing: %v", tc.key, err) } - svcCache := controller.cache.getOrCreate(tc.key) - obtErr := controller.processServiceUpdate(svcCache, newSvc, tc.key) + obtErr := controller.processServiceCreateOrUpdate(newSvc, tc.key) if err := tc.expectedFn(newSvc, obtErr); err != nil { - t.Errorf("%v processServiceUpdate() %v", tc.testName, err) + t.Errorf("%v processServiceCreateOrUpdate() %v", tc.testName, err) } } } -// TestConflictWhenProcessServiceUpdate tests if processServiceUpdate will +// TestConflictWhenProcessServiceCreateOrUpdate tests if processServiceCreateOrUpdate will // retry creating the load balancer when the update operation returns a conflict // error. -func TestConflictWhenProcessServiceUpdate(t *testing.T) { +func TestConflictWhenProcessServiceCreateOrUpdate(t *testing.T) { svcName := "conflict-lb" svc := newService(svcName, types.UID("123"), v1.ServiceTypeLoadBalancer) controller, _, client := newController() @@ -441,23 +627,22 @@ func TestConflictWhenProcessServiceUpdate(t *testing.T) { return true, update.GetObject(), apierrors.NewConflict(action.GetResource().GroupResource(), svcName, errors.New("Object changed")) }) - svcCache := controller.cache.getOrCreate(svcName) - if err := controller.processServiceUpdate(svcCache, svc, svcName); err == nil { - t.Fatalf("controller.processServiceUpdate() = nil, want error") + if err := controller.processServiceCreateOrUpdate(svc, svcName); err == nil { + t.Fatalf("controller.processServiceCreateOrUpdate() = nil, want error") } - retryMsg := "Error creating load balancer (will retry)" + errMsg := "Error syncing load balancer" if gotEvent := func() bool { events := controller.eventRecorder.(*record.FakeRecorder).Events for len(events) > 0 { e := <-events - if strings.Contains(e, retryMsg) { + if strings.Contains(e, errMsg) { return true } } return false }(); !gotEvent { - t.Errorf("controller.processServiceUpdate() = can't find retry creating lb event, want event contains %q", retryMsg) + t.Errorf("controller.processServiceCreateOrUpdate() = can't find sync error event, want event contains %q", errMsg) } } @@ -620,7 +805,62 @@ func TestProcessServiceDeletion(t *testing.T) { } -func TestDoesExternalLoadBalancerNeedsUpdate(t *testing.T) { +func TestNeedsCleanup(t *testing.T) { + testCases := []struct { + desc string + svc *v1.Service + expectNeedsCleanup bool + }{ + { + desc: "service without finalizer without timestamp", + svc: &v1.Service{}, + expectNeedsCleanup: false, + }, + { + desc: "service without finalizer with timestamp", + svc: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + DeletionTimestamp: &metav1.Time{ + Time: time.Now(), + }, + }, + }, + expectNeedsCleanup: false, + }, + { + desc: "service with finalizer without timestamp", + svc: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{servicehelper.LoadBalancerCleanupFinalizer}, + }, + }, + expectNeedsCleanup: false, + }, + { + desc: "service with finalizer with timestamp", + svc: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + DeletionTimestamp: &metav1.Time{ + Time: time.Now(), + }, + Finalizers: []string{servicehelper.LoadBalancerCleanupFinalizer, "unrelated"}, + }, + }, + expectNeedsCleanup: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + if gotNeedsCleanup := needsCleanup(tc.svc); gotNeedsCleanup != tc.expectNeedsCleanup { + t.Errorf("needsCleanup() = %t, want %t", gotNeedsCleanup, tc.expectNeedsCleanup) + } + }) + } + +} + +func TestNeedsUpdate(t *testing.T) { var oldSvc, newSvc *v1.Service @@ -861,3 +1101,211 @@ func TestNodeSlicesEqualForLB(t *testing.T) { t.Errorf("nodeSlicesEqualForLB() Expected=false Obtained=true") } } + +// TODO(@MrHohn): Verify the end state when below issue is resolved: +// https://github.com/kubernetes/client-go/issues/607 +func TestAddFinalizer(t *testing.T) { + testCases := []struct { + desc string + svc *v1.Service + expectPatch bool + }{ + { + desc: "no-op add finalizer", + svc: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-patch-finalizer", + Finalizers: []string{servicehelper.LoadBalancerCleanupFinalizer}, + }, + }, + expectPatch: false, + }, + { + desc: "add finalizer", + svc: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-patch-finalizer", + }, + }, + expectPatch: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + c := fake.NewSimpleClientset() + s := &ServiceController{ + kubeClient: c, + } + if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(tc.svc); err != nil { + t.Fatalf("Failed to prepare service for testing: %v", err) + } + if err := s.addFinalizer(tc.svc); err != nil { + t.Fatalf("addFinalizer() = %v, want nil", err) + } + patchActionFound := false + for _, action := range c.Actions() { + if action.Matches("patch", "services") { + patchActionFound = true + } + } + if patchActionFound != tc.expectPatch { + t.Errorf("Got patchActionFound = %t, want %t", patchActionFound, tc.expectPatch) + } + }) + } +} + +// TODO(@MrHohn): Verify the end state when below issue is resolved: +// https://github.com/kubernetes/client-go/issues/607 +func TestRemoveFinalizer(t *testing.T) { + testCases := []struct { + desc string + svc *v1.Service + expectPatch bool + }{ + { + desc: "no-op remove finalizer", + svc: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-patch-finalizer", + }, + }, + expectPatch: false, + }, + { + desc: "remove finalizer", + svc: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-patch-finalizer", + Finalizers: []string{servicehelper.LoadBalancerCleanupFinalizer}, + }, + }, + expectPatch: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + c := fake.NewSimpleClientset() + s := &ServiceController{ + kubeClient: c, + } + if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(tc.svc); err != nil { + t.Fatalf("Failed to prepare service for testing: %v", err) + } + if err := s.removeFinalizer(tc.svc); err != nil { + t.Fatalf("removeFinalizer() = %v, want nil", err) + } + patchActionFound := false + for _, action := range c.Actions() { + if action.Matches("patch", "services") { + patchActionFound = true + } + } + if patchActionFound != tc.expectPatch { + t.Errorf("Got patchActionFound = %t, want %t", patchActionFound, tc.expectPatch) + } + }) + } +} + +// TODO(@MrHohn): Verify the end state when below issue is resolved: +// https://github.com/kubernetes/client-go/issues/607 +func TestPatchStatus(t *testing.T) { + testCases := []struct { + desc string + svc *v1.Service + newStatus *v1.LoadBalancerStatus + expectPatch bool + }{ + { + desc: "no-op add status", + svc: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-patch-status", + }, + Status: v1.ServiceStatus{ + LoadBalancer: v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + {IP: "8.8.8.8"}, + }, + }, + }, + }, + newStatus: &v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + {IP: "8.8.8.8"}, + }, + }, + expectPatch: false, + }, + { + desc: "add status", + svc: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-patch-status", + }, + Status: v1.ServiceStatus{}, + }, + newStatus: &v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + {IP: "8.8.8.8"}, + }, + }, + expectPatch: true, + }, + { + desc: "no-op clear status", + svc: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-patch-status", + }, + Status: v1.ServiceStatus{}, + }, + newStatus: &v1.LoadBalancerStatus{}, + expectPatch: false, + }, + { + desc: "clear status", + svc: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-patch-status", + }, + Status: v1.ServiceStatus{ + LoadBalancer: v1.LoadBalancerStatus{ + Ingress: []v1.LoadBalancerIngress{ + {IP: "8.8.8.8"}, + }, + }, + }, + }, + newStatus: &v1.LoadBalancerStatus{}, + expectPatch: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + c := fake.NewSimpleClientset() + s := &ServiceController{ + kubeClient: c, + } + if _, err := s.kubeClient.CoreV1().Services(tc.svc.Namespace).Create(tc.svc); err != nil { + t.Fatalf("Failed to prepare service for testing: %v", err) + } + if err := s.patchStatus(tc.svc, &tc.svc.Status.LoadBalancer, tc.newStatus); err != nil { + t.Fatalf("patchStatus() = %v, want nil", err) + } + patchActionFound := false + for _, action := range c.Actions() { + if action.Matches("patch", "services") { + patchActionFound = true + } + } + if patchActionFound != tc.expectPatch { + t.Errorf("Got patchActionFound = %t, want %t", patchActionFound, tc.expectPatch) + } + }) + } +} diff --git a/staging/src/k8s.io/cloud-provider/service/helpers/helper_test.go b/staging/src/k8s.io/cloud-provider/service/helpers/helper_test.go index 4d4073c13ab..d342e5a5fa4 100644 --- a/staging/src/k8s.io/cloud-provider/service/helpers/helper_test.go +++ b/staging/src/k8s.io/cloud-provider/service/helpers/helper_test.go @@ -20,7 +20,8 @@ import ( "strings" "testing" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilnet "k8s.io/utils/net" ) @@ -219,3 +220,52 @@ func TestNeedsHealthCheck(t *testing.T) { }, }) } + +func TestHasLBFinalizer(t *testing.T) { + testCases := []struct { + desc string + svc *v1.Service + hasFinalizer bool + }{ + { + desc: "service without finalizer", + svc: &v1.Service{}, + hasFinalizer: false, + }, + { + desc: "service with unrelated finalizer", + svc: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{"unrelated"}, + }, + }, + hasFinalizer: false, + }, + { + desc: "service with one finalizer", + svc: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{LoadBalancerCleanupFinalizer}, + }, + }, + hasFinalizer: true, + }, + { + desc: "service with multiple finalizers", + svc: &v1.Service{ + ObjectMeta: metav1.ObjectMeta{ + Finalizers: []string{LoadBalancerCleanupFinalizer, "unrelated"}, + }, + }, + hasFinalizer: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + if hasFinalizer := HasLBFinalizer(tc.svc); hasFinalizer != tc.hasFinalizer { + t.Errorf("HasLBFinalizer() = %t, want %t", hasFinalizer, tc.hasFinalizer) + } + }) + } +} From 64198a43e780b30ef14d3e4e9d9fa8aedc03446e Mon Sep 17 00:00:00 2001 From: Zihong Zheng Date: Thu, 23 May 2019 12:08:10 -0700 Subject: [PATCH 4/4] Auto-generated BUILD files --- pkg/controller/service/BUILD | 6 ++++++ staging/src/k8s.io/cloud-provider/service/helpers/BUILD | 1 + 2 files changed, 7 insertions(+) diff --git a/pkg/controller/service/BUILD b/pkg/controller/service/BUILD index 98b9daf21ea..883d5037667 100644 --- a/pkg/controller/service/BUILD +++ b/pkg/controller/service/BUILD @@ -19,6 +19,7 @@ go_library( "//pkg/controller:go_default_library", "//pkg/features:go_default_library", "//pkg/util/metrics:go_default_library", + "//pkg/util/slice:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", @@ -36,6 +37,7 @@ go_library( "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/client-go/util/workqueue:go_default_library", "//staging/src/k8s.io/cloud-provider:go_default_library", + "//staging/src/k8s.io/cloud-provider/service/helpers:go_default_library", "//vendor/k8s.io/klog:go_default_library", ], ) @@ -50,16 +52,20 @@ go_test( deps = [ "//pkg/api/testapi:go_default_library", "//pkg/controller:go_default_library", + "//pkg/features:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/api/errors:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", + "//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library", "//staging/src/k8s.io/client-go/informers:go_default_library", "//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library", "//staging/src/k8s.io/client-go/testing:go_default_library", "//staging/src/k8s.io/client-go/tools/record:go_default_library", "//staging/src/k8s.io/cloud-provider/fake:go_default_library", + "//staging/src/k8s.io/cloud-provider/service/helpers:go_default_library", + "//staging/src/k8s.io/component-base/featuregate/testing:go_default_library", ], ) diff --git a/staging/src/k8s.io/cloud-provider/service/helpers/BUILD b/staging/src/k8s.io/cloud-provider/service/helpers/BUILD index dfd2e2a8d16..0761000dcca 100644 --- a/staging/src/k8s.io/cloud-provider/service/helpers/BUILD +++ b/staging/src/k8s.io/cloud-provider/service/helpers/BUILD @@ -32,6 +32,7 @@ go_test( embed = [":go_default_library"], deps = [ "//staging/src/k8s.io/api/core/v1:go_default_library", + "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//vendor/k8s.io/utils/net:go_default_library", ], )