Cleanup and fix networking test timeouts for large clusters

This commit is contained in:
wojtekt
2020-04-02 16:57:38 +02:00
parent 4c5a963463
commit 268b51d023
7 changed files with 54 additions and 34 deletions

View File

@@ -167,7 +167,7 @@ var _ = SIGDescribe("Firewall rule", func() {
// Send requests from outside of the cluster because internal traffic is whitelisted
ginkgo.By("Accessing the external service ip from outside, all non-master nodes should be reached")
err = testHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, e2eservice.LoadBalancerPropagationTimeoutDefault, nodesSet)
err = testHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, e2eservice.GetServiceLoadBalancerPropagationTimeout(cs), nodesSet)
framework.ExpectNoError(err)
// Check if there are overlapping tags on the firewall that extend beyond just the vms in our cluster
@@ -188,12 +188,12 @@ var _ = SIGDescribe("Firewall rule", func() {
nodesSet.Insert(nodesNames[0])
gce.SetInstanceTags(cloudConfig, nodesNames[0], zone, removedTags)
// Make sure traffic is recovered before exit
err = testHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, e2eservice.LoadBalancerPropagationTimeoutDefault, nodesSet)
err = testHitNodesFromOutside(svcExternalIP, firewallTestHTTPPort, e2eservice.GetServiceLoadBalancerPropagationTimeout(cs), nodesSet)
framework.ExpectNoError(err)
}()
ginkgo.By("Accessing serivce through the external ip and examine got no response from the node without tags")
err = testHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, e2eservice.LoadBalancerPropagationTimeoutDefault, nodesSet, 15)
err = testHitNodesFromOutsideWithCount(svcExternalIP, firewallTestHTTPPort, e2eservice.GetServiceLoadBalancerPropagationTimeout(cs), nodesSet, 15)
framework.ExpectNoError(err)
})

View File

@@ -172,7 +172,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
}, map[string]string{})
ginkgo.By(fmt.Sprintf("waiting for Ingress %s to get instance group annotation", name))
pollErr := wait.Poll(2*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) {
propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(f.ClientSet)
pollErr := wait.Poll(2*time.Second, propagationTimeout, func() (bool, error) {
ing, err := f.ClientSet.NetworkingV1beta1().Ingresses(ns).Get(context.TODO(), name, metav1.GetOptions{})
framework.ExpectNoError(err)
annotations := ing.Annotations
@@ -287,6 +288,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
ginkgo.It("should be able to switch between IG and NEG modes", func() {
var err error
propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(f.ClientSet)
ginkgo.By("Create a basic HTTP ingress using NEG")
jig.CreateIngress(filepath.Join(e2eingress.IngressManifestPath, "neg"), ns, map[string]string{}, map[string]string{})
jig.WaitForIngress(true)
@@ -301,7 +303,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
_, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{})
framework.ExpectNoError(err)
}
err = wait.Poll(5*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) {
err = wait.Poll(5*time.Second, propagationTimeout, func() (bool, error) {
if err := gceController.BackendServiceUsingIG(jig.GetServicePorts(false)); err != nil {
framework.Logf("ginkgo.Failed to verify IG backend service: %v", err)
return false, nil
@@ -319,7 +321,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
_, err = f.ClientSet.CoreV1().Services(ns).Update(context.TODO(), &svc, metav1.UpdateOptions{})
framework.ExpectNoError(err)
}
err = wait.Poll(5*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) {
err = wait.Poll(5*time.Second, propagationTimeout, func() (bool, error) {
if err := gceController.BackendServiceUsingNEG(jig.GetServicePorts(false)); err != nil {
framework.Logf("ginkgo.Failed to verify NEG backend service: %v", err)
return false, nil
@@ -406,7 +408,8 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
_, err = f.ClientSet.AppsV1().Deployments(ns).UpdateScale(context.TODO(), name, scale, metav1.UpdateOptions{})
framework.ExpectNoError(err)
err = wait.Poll(10*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) {
propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(f.ClientSet)
err = wait.Poll(10*time.Second, propagationTimeout, func() (bool, error) {
res, err := jig.GetDistinctResponseFromIngress()
if err != nil {
return false, nil
@@ -423,7 +426,7 @@ var _ = SIGDescribe("Loadbalancing: L7", func() {
deploy.Spec.Template.Spec.TerminationGracePeriodSeconds = &gracePeriod
_, err = f.ClientSet.AppsV1().Deployments(ns).Update(context.TODO(), deploy, metav1.UpdateOptions{})
framework.ExpectNoError(err)
err = wait.Poll(10*time.Second, e2eservice.LoadBalancerPollTimeout, func() (bool, error) {
err = wait.Poll(10*time.Second, propagationTimeout, func() (bool, error) {
res, err := jig.GetDistinctResponseFromIngress()
framework.ExpectNoError(err)
deploy, err := f.ClientSet.AppsV1().Deployments(ns).Get(context.TODO(), name, metav1.GetOptions{})
@@ -832,12 +835,14 @@ func executeStaticIPHttpsOnlyTest(f *framework.Framework, jig *e2eingress.TestJi
e2eingress.IngressAllowHTTPKey: "false",
}, map[string]string{})
propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(f.ClientSet)
ginkgo.By("waiting for Ingress to come up with ip: " + ip)
httpClient := e2eingress.BuildInsecureClient(e2eingress.IngressReqTimeout)
framework.ExpectNoError(e2eingress.PollURL(fmt.Sprintf("https://%s/", ip), "", e2eservice.LoadBalancerPollTimeout, jig.PollInterval, httpClient, false))
framework.ExpectNoError(e2eingress.PollURL(fmt.Sprintf("https://%s/", ip), "", propagationTimeout, jig.PollInterval, httpClient, false))
ginkgo.By("should reject HTTP traffic")
framework.ExpectNoError(e2eingress.PollURL(fmt.Sprintf("http://%s/", ip), "", e2eservice.LoadBalancerPollTimeout, jig.PollInterval, httpClient, true))
framework.ExpectNoError(e2eingress.PollURL(fmt.Sprintf("http://%s/", ip), "", propagationTimeout, jig.PollInterval, httpClient, true))
}
func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *e2eingress.TestJig, staticIPName string) {
@@ -850,14 +855,15 @@ func executeBacksideBacksideHTTPSTest(f *framework.Framework, jig *e2eingress.Te
}
}()
framework.ExpectNoError(err, "ginkgo.Failed to create re-encryption ingress")
propagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(f.ClientSet)
ginkgo.By(fmt.Sprintf("Waiting for ingress %s to come up", ingCreated.Name))
ingIP, err := jig.WaitForIngressAddress(f.ClientSet, f.Namespace.Name, ingCreated.Name, e2eservice.LoadBalancerPollTimeout)
ingIP, err := jig.WaitForIngressAddress(f.ClientSet, f.Namespace.Name, ingCreated.Name, propagationTimeout)
framework.ExpectNoError(err, "ginkgo.Failed to wait for ingress IP")
ginkgo.By(fmt.Sprintf("Polling on address %s and verify the backend is serving HTTPS", ingIP))
timeoutClient := &http.Client{Timeout: e2eingress.IngressReqTimeout}
err = wait.PollImmediate(e2eservice.LoadBalancerPollInterval, e2eservice.LoadBalancerPollTimeout, func() (bool, error) {
err = wait.PollImmediate(e2eservice.LoadBalancerPollInterval, propagationTimeout, func() (bool, error) {
resp, err := e2eingress.SimpleGET(timeoutClient, fmt.Sprintf("http://%s", ingIP), "")
if err != nil {
framework.Logf("SimpleGET failed: %v", err)

View File

@@ -108,13 +108,13 @@ type portsByPodName map[string][]int
// number of same response observed in a row. If affinity is not expected, the
// test will keep observe until different responses observed. The function will
// return false only in case of unexpected errors.
func checkAffinity(execPod *v1.Pod, serviceIP string, servicePort int, shouldHold bool) bool {
func checkAffinity(cs clientset.Interface, execPod *v1.Pod, serviceIP string, servicePort int, shouldHold bool) bool {
serviceIPPort := net.JoinHostPort(serviceIP, strconv.Itoa(servicePort))
curl := fmt.Sprintf(`curl -q -s --connect-timeout 2 http://%s/`, serviceIPPort)
cmd := fmt.Sprintf("for i in $(seq 0 %d); do echo; %s ; done", AffinityConfirmCount, curl)
timeout := AffinityTimeout
if execPod == nil {
timeout = e2eservice.LoadBalancerPollTimeout
timeout = e2eservice.GetServiceLoadBalancerPropagationTimeout(cs)
}
var tracker affinityTracker
// interval considering a maximum of 2 seconds per connection
@@ -2345,7 +2345,8 @@ var _ = SIGDescribe("Services", func() {
ginkgo.By("health check should be reconciled")
pollInterval := framework.Poll * 10
if pollErr := wait.PollImmediate(pollInterval, e2eservice.LoadBalancerPropagationTimeoutDefault, func() (bool, error) {
loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs)
if pollErr := wait.PollImmediate(pollInterval, loadBalancerPropagationTimeout, func() (bool, error) {
hc, err := gceCloud.GetHTTPHealthCheck(hcName)
if err != nil {
framework.Logf("ginkgo.Failed to get HttpHealthCheck(%q): %v", hcName, err)
@@ -3004,8 +3005,9 @@ var _ = SIGDescribe("ESIPP [Slow] [DisabledForLargeClusters]", func() {
cmd := fmt.Sprintf(`curl -q -s --connect-timeout 30 %v`, path)
var srcIP string
loadBalancerPropagationTimeout := e2eservice.GetServiceLoadBalancerPropagationTimeout(cs)
ginkgo.By(fmt.Sprintf("Hitting external lb %v from pod %v on node %v", ingressIP, pausePod.Name, pausePod.Spec.NodeName))
if pollErr := wait.PollImmediate(framework.Poll, e2eservice.LoadBalancerPropagationTimeoutDefault, func() (bool, error) {
if pollErr := wait.PollImmediate(framework.Poll, loadBalancerPropagationTimeout, func() (bool, error) {
stdout, err := framework.RunHostCmd(pausePod.Namespace, pausePod.Name, cmd)
if err != nil {
framework.Logf("got err: %v, retry until timeout", err)
@@ -3210,7 +3212,7 @@ func execAffinityTestForSessionAffinityTimeout(f *framework.Framework, cs client
framework.ExpectNoError(err)
// the service should be sticky until the timeout expires
framework.ExpectEqual(checkAffinity(execPod, svcIP, servicePort, true), true)
framework.ExpectEqual(checkAffinity(cs, execPod, svcIP, servicePort, true), true)
// but it should return different hostnames after the timeout expires
// try several times to avoid the probability that we hit the same pod twice
hosts := sets.NewString()
@@ -3277,19 +3279,19 @@ func execAffinityTestForNonLBServiceWithOptionalTransition(f *framework.Framewor
framework.ExpectNoError(err)
if !isTransitionTest {
framework.ExpectEqual(checkAffinity(execPod, svcIP, servicePort, true), true)
framework.ExpectEqual(checkAffinity(cs, execPod, svcIP, servicePort, true), true)
}
if isTransitionTest {
_, err = jig.UpdateService(func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
})
framework.ExpectNoError(err)
framework.ExpectEqual(checkAffinity(execPod, svcIP, servicePort, false), true)
framework.ExpectEqual(checkAffinity(cs, execPod, svcIP, servicePort, false), true)
_, err = jig.UpdateService(func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
})
framework.ExpectNoError(err)
framework.ExpectEqual(checkAffinity(execPod, svcIP, servicePort, true), true)
framework.ExpectEqual(checkAffinity(cs, execPod, svcIP, servicePort, true), true)
}
}
@@ -3327,19 +3329,19 @@ func execAffinityTestForLBServiceWithOptionalTransition(f *framework.Framework,
port := int(svc.Spec.Ports[0].Port)
if !isTransitionTest {
framework.ExpectEqual(checkAffinity(nil, ingressIP, port, true), true)
framework.ExpectEqual(checkAffinity(cs, nil, ingressIP, port, true), true)
}
if isTransitionTest {
svc, err = jig.UpdateService(func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityNone
})
framework.ExpectNoError(err)
framework.ExpectEqual(checkAffinity(nil, ingressIP, port, false), true)
framework.ExpectEqual(checkAffinity(cs, nil, ingressIP, port, false), true)
svc, err = jig.UpdateService(func(svc *v1.Service) {
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
})
framework.ExpectNoError(err)
framework.ExpectEqual(checkAffinity(nil, ingressIP, port, true), true)
framework.ExpectEqual(checkAffinity(cs, nil, ingressIP, port, true), true)
}
}