Merge pull request #37870 from madhusudancs/fed-svc-no-cascading-delete-e2e-fix
Automatic merge from submit-queue [Federation] Separate the cleanup phases of service and service shards so that service shards can be cleaned up even after the service is deleted elsewhere. Fixes Federated Service e2e test. This separation is necessary because "Federated Service DNS should be able to discover a federated service" e2e test recently added a case where it deletes the service from federation but not the shards from the underlying clusters. Because of the way cleanup was implemented in the AfterEach block currently, we did not cleanup any of the underlying shards. However, separating the two phases of the cleanup needs this separation. cc @kubernetes/sig-cluster-federation @nikhiljindal
This commit is contained in:
commit
2708f5c7dd
@ -151,6 +151,7 @@ go_library(
|
|||||||
"//pkg/controller/petset:go_default_library",
|
"//pkg/controller/petset:go_default_library",
|
||||||
"//pkg/controller/replicaset:go_default_library",
|
"//pkg/controller/replicaset:go_default_library",
|
||||||
"//pkg/controller/replication:go_default_library",
|
"//pkg/controller/replication:go_default_library",
|
||||||
|
"//pkg/conversion:go_default_library",
|
||||||
"//pkg/dns/federation:go_default_library",
|
"//pkg/dns/federation:go_default_library",
|
||||||
"//pkg/fields:go_default_library",
|
"//pkg/fields:go_default_library",
|
||||||
"//pkg/kubectl:go_default_library",
|
"//pkg/kubectl:go_default_library",
|
||||||
|
@ -24,6 +24,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
|
|
||||||
"k8s.io/kubernetes/pkg/api/v1"
|
"k8s.io/kubernetes/pkg/api/v1"
|
||||||
|
"k8s.io/kubernetes/pkg/conversion"
|
||||||
"k8s.io/kubernetes/test/e2e/framework"
|
"k8s.io/kubernetes/test/e2e/framework"
|
||||||
|
|
||||||
. "github.com/onsi/ginkgo"
|
. "github.com/onsi/ginkgo"
|
||||||
@ -140,6 +141,7 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
service *v1.Service
|
service *v1.Service
|
||||||
|
serviceShard *v1.Service
|
||||||
)
|
)
|
||||||
|
|
||||||
BeforeEach(func() {
|
BeforeEach(func() {
|
||||||
@ -168,8 +170,37 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
createBackendPodsOrFail(clusters, nsName, FederatedServicePodName)
|
createBackendPodsOrFail(clusters, nsName, FederatedServicePodName)
|
||||||
|
|
||||||
service = createServiceOrFail(f.FederationClientset_1_5, nsName, FederatedServiceName)
|
service = createServiceOrFail(f.FederationClientset_1_5, nsName, FederatedServiceName)
|
||||||
waitForServiceShardsOrFail(nsName, service, clusters)
|
obj, err := conversion.NewCloner().DeepCopy(service)
|
||||||
|
// Cloning shouldn't fail. On the off-chance it does, we
|
||||||
|
// should shallow copy service to serviceShard before
|
||||||
|
// failing. If we don't do this we will never really
|
||||||
|
// get a chance to clean up the underlying services
|
||||||
|
// when the cloner fails for reasons not in our
|
||||||
|
// control. For example, cloner bug. That will cause
|
||||||
|
// the resources to leak, which in turn causes the
|
||||||
|
// test project to run out of quota and the entire
|
||||||
|
// suite starts failing. So we must try as hard as
|
||||||
|
// possible to cleanup the underlying services. So
|
||||||
|
// if DeepCopy fails, we are going to try with shallow
|
||||||
|
// copy as a last resort.
|
||||||
|
if err != nil {
|
||||||
|
serviceCopy := *service
|
||||||
|
serviceShard = &serviceCopy
|
||||||
|
framework.ExpectNoError(err, fmt.Sprintf("Error in deep copying service %q", service.Name))
|
||||||
|
}
|
||||||
|
var ok bool
|
||||||
|
serviceShard, ok = obj.(*v1.Service)
|
||||||
|
// Same argument as above about using shallow copy
|
||||||
|
// as a last resort.
|
||||||
|
if !ok {
|
||||||
|
serviceCopy := *service
|
||||||
|
serviceShard = &serviceCopy
|
||||||
|
framework.ExpectNoError(err, fmt.Sprintf("Unexpected service object copied %T", obj))
|
||||||
|
}
|
||||||
|
|
||||||
|
waitForServiceShardsOrFail(nsName, serviceShard, clusters)
|
||||||
})
|
})
|
||||||
|
|
||||||
AfterEach(func() {
|
AfterEach(func() {
|
||||||
@ -180,14 +211,19 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
|
|||||||
|
|
||||||
if service != nil {
|
if service != nil {
|
||||||
deleteServiceOrFail(f.FederationClientset_1_5, nsName, service.Name)
|
deleteServiceOrFail(f.FederationClientset_1_5, nsName, service.Name)
|
||||||
|
|
||||||
By(fmt.Sprintf("Deleting service shards and their provider resources in underlying clusters for service %q in namespace %q", service.Name, nsName))
|
|
||||||
cleanupServiceShardsAndProviderResources(nsName, service, clusters)
|
|
||||||
|
|
||||||
service = nil
|
service = nil
|
||||||
} else {
|
} else {
|
||||||
By("No service to delete. Service is nil")
|
By("No service to delete. Service is nil")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if serviceShard != nil {
|
||||||
|
By(fmt.Sprintf("Deleting service shards and their provider resources in underlying clusters for service %q in namespace %q", service.Name, nsName))
|
||||||
|
cleanupServiceShardsAndProviderResources(nsName, service, clusters)
|
||||||
|
serviceShard = nil
|
||||||
|
} else {
|
||||||
|
By("No service shards to delete. `serviceShard` is nil")
|
||||||
|
}
|
||||||
|
|
||||||
// Delete the kube-dns config map from all clusters.
|
// Delete the kube-dns config map from all clusters.
|
||||||
for clusterName, cluster := range clusters {
|
for clusterName, cluster := range clusters {
|
||||||
By(fmt.Sprintf("Deleting kube dns config map from cluster: %s", clusterName))
|
By(fmt.Sprintf("Deleting kube dns config map from cluster: %s", clusterName))
|
||||||
@ -218,6 +254,9 @@ var _ = framework.KubeDescribe("[Feature:Federation]", func() {
|
|||||||
By("Deleting the service to verify that DNS rules still work")
|
By("Deleting the service to verify that DNS rules still work")
|
||||||
err := f.FederationClientset_1_5.Services(nsName).Delete(FederatedServiceName, &v1.DeleteOptions{})
|
err := f.FederationClientset_1_5.Services(nsName).Delete(FederatedServiceName, &v1.DeleteOptions{})
|
||||||
framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, service.Namespace)
|
framework.ExpectNoError(err, "Error deleting service %q in namespace %q", service.Name, service.Namespace)
|
||||||
|
// Service is deleted, unset the test block-global service variable.
|
||||||
|
service = nil
|
||||||
|
|
||||||
for i, DNSName := range svcDNSNames {
|
for i, DNSName := range svcDNSNames {
|
||||||
discoverService(f, DNSName, true, "federated-service-e2e-discovery-pod-"+strconv.Itoa(i))
|
discoverService(f, DNSName, true, "federated-service-e2e-discovery-pod-"+strconv.Itoa(i))
|
||||||
}
|
}
|
||||||
|
@ -142,6 +142,7 @@ Federated Services DNS non-local federated service should be able to discover a
|
|||||||
Federated Services DNS should be able to discover a federated service,derekwaynecarr,1
|
Federated Services DNS should be able to discover a federated service,derekwaynecarr,1
|
||||||
Federated Services Service creation should create matching services in underlying clusters,jbeda,1
|
Federated Services Service creation should create matching services in underlying clusters,jbeda,1
|
||||||
Federated Services Service creation should not be deleted from underlying clusters when it is deleted,sttts,0
|
Federated Services Service creation should not be deleted from underlying clusters when it is deleted,sttts,0
|
||||||
|
Federated Services Service creation should not be deleted from underlying clusters when it is deleted,madhusudancs,0
|
||||||
Federated Services Service creation should succeed,rmmh,1
|
Federated Services Service creation should succeed,rmmh,1
|
||||||
Federated ingresses Federated Ingresses Ingress connectivity and DNS should be able to connect to a federated ingress via its load balancer,rmmh,1
|
Federated ingresses Federated Ingresses Ingress connectivity and DNS should be able to connect to a federated ingress via its load balancer,rmmh,1
|
||||||
Federated ingresses Federated Ingresses should be created and deleted successfully,dchen1107,1
|
Federated ingresses Federated Ingresses should be created and deleted successfully,dchen1107,1
|
||||||
@ -447,6 +448,7 @@ Services should be able to up and down services,bprashanth,0
|
|||||||
Services should check NodePort out-of-range,bprashanth,0
|
Services should check NodePort out-of-range,bprashanth,0
|
||||||
Services should create endpoints for unready pods,maisem,0
|
Services should create endpoints for unready pods,maisem,0
|
||||||
Services should only allow access from service loadbalancer source ranges,sttts,0
|
Services should only allow access from service loadbalancer source ranges,sttts,0
|
||||||
|
Services should only allow access from service loadbalancer source ranges,madhusudancs,0
|
||||||
Services should preserve source pod IP for traffic thru service cluster IP,Random-Liu,1
|
Services should preserve source pod IP for traffic thru service cluster IP,Random-Liu,1
|
||||||
Services should prevent NodePort collisions,bprashanth,0
|
Services should prevent NodePort collisions,bprashanth,0
|
||||||
Services should provide secure master service,bprashanth,0
|
Services should provide secure master service,bprashanth,0
|
||||||
@ -556,6 +558,9 @@ k8s.io/kubernetes/pkg/api/v1,vulpecula,1
|
|||||||
k8s.io/kubernetes/pkg/api/v1/endpoints,sttts,0
|
k8s.io/kubernetes/pkg/api/v1/endpoints,sttts,0
|
||||||
k8s.io/kubernetes/pkg/api/v1/pod,sttts,0
|
k8s.io/kubernetes/pkg/api/v1/pod,sttts,0
|
||||||
k8s.io/kubernetes/pkg/api/v1/service,sttts,0
|
k8s.io/kubernetes/pkg/api/v1/service,sttts,0
|
||||||
|
k8s.io/kubernetes/pkg/api/v1/endpoints,madhusudancs,0
|
||||||
|
k8s.io/kubernetes/pkg/api/v1/pod,madhusudancs,0
|
||||||
|
k8s.io/kubernetes/pkg/api/v1/service,madhusudancs,0
|
||||||
k8s.io/kubernetes/pkg/api/validation,smarterclayton,1
|
k8s.io/kubernetes/pkg/api/validation,smarterclayton,1
|
||||||
k8s.io/kubernetes/pkg/api/validation/path,luxas,1
|
k8s.io/kubernetes/pkg/api/validation/path,luxas,1
|
||||||
k8s.io/kubernetes/pkg/apimachinery,gmarek,1
|
k8s.io/kubernetes/pkg/apimachinery,gmarek,1
|
||||||
@ -791,6 +796,7 @@ k8s.io/kubernetes/pkg/registry/storage/storageclass,brendandburns,1
|
|||||||
k8s.io/kubernetes/pkg/registry/storage/storageclass/etcd,eparis,1
|
k8s.io/kubernetes/pkg/registry/storage/storageclass/etcd,eparis,1
|
||||||
k8s.io/kubernetes/pkg/runtime,wojtek-t,0
|
k8s.io/kubernetes/pkg/runtime,wojtek-t,0
|
||||||
k8s.io/kubernetes/pkg/runtime/schema,sttts,0
|
k8s.io/kubernetes/pkg/runtime/schema,sttts,0
|
||||||
|
k8s.io/kubernetes/pkg/runtime/schema,madhusudancs,0
|
||||||
k8s.io/kubernetes/pkg/runtime/serializer,wojtek-t,0
|
k8s.io/kubernetes/pkg/runtime/serializer,wojtek-t,0
|
||||||
k8s.io/kubernetes/pkg/runtime/serializer/json,wojtek-t,0
|
k8s.io/kubernetes/pkg/runtime/serializer/json,wojtek-t,0
|
||||||
k8s.io/kubernetes/pkg/runtime/serializer/protobuf,wojtek-t,0
|
k8s.io/kubernetes/pkg/runtime/serializer/protobuf,wojtek-t,0
|
||||||
|
|
Loading…
Reference in New Issue
Block a user