Demote static IPs ASAP for easier cleanup
This exposed bugs in the IP promotion/demotion logic.
This commit is contained in:
@@ -486,15 +486,20 @@ var _ = Describe("Services", func() {
|
||||
// Change the services to LoadBalancer.
|
||||
|
||||
requestedIP := ""
|
||||
staticIPName := ""
|
||||
if providerIs("gce", "gke") {
|
||||
By("creating a static load balancer IP")
|
||||
rand.Seed(time.Now().UTC().UnixNano())
|
||||
staticIPName := fmt.Sprintf("e2e-external-lb-test-%d", rand.Intn(65535))
|
||||
staticIPName = fmt.Sprintf("e2e-external-lb-test-%d", rand.Intn(65535))
|
||||
requestedIP, err = createGCEStaticIP(staticIPName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
defer func() {
|
||||
// Release GCE static IP - this is not kube-managed and will not be automatically released.
|
||||
deleteGCEStaticIP(staticIPName)
|
||||
if staticIPName != "" {
|
||||
// Release GCE static IP - this is not kube-managed and will not be automatically released.
|
||||
if err := deleteGCEStaticIP(staticIPName); err != nil {
|
||||
Logf("failed to release static IP %s: %v", staticIPName, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
Logf("Allocated static load balancer IP: %s", requestedIP)
|
||||
}
|
||||
@@ -523,6 +528,23 @@ var _ = Describe("Services", func() {
|
||||
tcpIngressIP := getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0])
|
||||
Logf("TCP load balancer: %s", tcpIngressIP)
|
||||
|
||||
By("waiting for the UDP service " + serviceName + " to have a load balancer")
|
||||
if providerIs("gce", "gke") {
|
||||
// Do this as early as possible, which overrides the `defer` above.
|
||||
// This is mostly out of fear of leaking the IP in a timeout case
|
||||
// (as of this writing we're not 100% sure where the leaks are
|
||||
// coming from, so this is first-aid rather than surgery).
|
||||
By("demoting the static IP to ephemeral")
|
||||
if staticIPName != "" {
|
||||
// Deleting it after it is attached "demotes" it to an
|
||||
// ephemeral IP, which can be auto-released.
|
||||
if err := deleteGCEStaticIP(staticIPName); err != nil {
|
||||
Failf("failed to release static IP %s: %v", staticIPName, err)
|
||||
}
|
||||
staticIPName = ""
|
||||
}
|
||||
}
|
||||
|
||||
By("waiting for the UDP service to have a load balancer")
|
||||
// 2nd one should be faster since they ran in parallel.
|
||||
udpService = jig.WaitForLoadBalancerOrFail(ns2, udpService.Name)
|
||||
@@ -531,7 +553,7 @@ var _ = Describe("Services", func() {
|
||||
Failf("UDP Spec.Ports[0].NodePort changed (%d -> %d) when not expected", udpNodePort, udpService.Spec.Ports[0].NodePort)
|
||||
}
|
||||
udpIngressIP := getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0])
|
||||
Logf("UDP load balancer: %s", tcpIngressIP)
|
||||
Logf("UDP load balancer: %s", udpIngressIP)
|
||||
|
||||
By("verifying that TCP and UDP use different load balancers")
|
||||
if tcpIngressIP == udpIngressIP {
|
||||
@@ -614,7 +636,6 @@ var _ = Describe("Services", func() {
|
||||
if getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]) != tcpIngressIP {
|
||||
Failf("TCP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", tcpIngressIP, getIngressPoint(&tcpService.Status.LoadBalancer.Ingress[0]))
|
||||
}
|
||||
Logf("service port (TCP and UDP): %d", svcPort)
|
||||
|
||||
By("changing the UDP service's port")
|
||||
udpService = jig.UpdateServiceOrFail(ns2, udpService.Name, func(s *api.Service) {
|
||||
@@ -631,6 +652,8 @@ var _ = Describe("Services", func() {
|
||||
Failf("UDP Status.LoadBalancer.Ingress changed (%s -> %s) when not expected", udpIngressIP, getIngressPoint(&udpService.Status.LoadBalancer.Ingress[0]))
|
||||
}
|
||||
|
||||
Logf("service port (TCP and UDP): %d", svcPort)
|
||||
|
||||
By("hitting the TCP service's NodePort")
|
||||
jig.TestReachableHTTP(nodeIP, tcpNodePort, kubeProxyLagTimeout)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user