e2e: block all master addresses

This way we can be sure that the kubelet can't communicate with the
master, even if falls-back to the internal/external IP (which seems to
be the case with DNS)

Issue #56787
This commit is contained in:
Justin SB
2018-11-05 22:19:05 -05:00
parent 5656ac754d
commit 0400871df9
5 changed files with 56 additions and 29 deletions

View File

@@ -198,10 +198,12 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name)) By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
host, err := framework.GetNodeExternalIP(&node) host, err := framework.GetNodeExternalIP(&node)
framework.ExpectNoError(err) framework.ExpectNoError(err)
master := framework.GetMasterAddress(c) masterAddresses := framework.GetAllMasterAddresses(c)
defer func() { defer func() {
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name)) By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
framework.UnblockNetwork(host, master) for _, masterAddress := range masterAddresses {
framework.UnblockNetwork(host, masterAddress)
}
if CurrentGinkgoTestDescription().Failed { if CurrentGinkgoTestDescription().Failed {
return return
@@ -214,7 +216,9 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
} }
}() }()
framework.BlockNetwork(host, master) for _, masterAddress := range masterAddresses {
framework.BlockNetwork(host, masterAddress)
}
By("Expect to observe node and pod status change from Ready to NotReady after network partition") By("Expect to observe node and pod status change from Ready to NotReady after network partition")
expectNodeReadiness(false, newNode) expectNodeReadiness(false, newNode)
@@ -576,10 +580,12 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
By(fmt.Sprintf("Block traffic from node %s to the master", node.Name)) By(fmt.Sprintf("Block traffic from node %s to the master", node.Name))
host, err := framework.GetNodeExternalIP(&node) host, err := framework.GetNodeExternalIP(&node)
framework.ExpectNoError(err) framework.ExpectNoError(err)
master := framework.GetMasterAddress(c) masterAddresses := framework.GetAllMasterAddresses(c)
defer func() { defer func() {
By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name)) By(fmt.Sprintf("Unblock traffic from node %s to the master", node.Name))
framework.UnblockNetwork(host, master) for _, masterAddress := range masterAddresses {
framework.UnblockNetwork(host, masterAddress)
}
if CurrentGinkgoTestDescription().Failed { if CurrentGinkgoTestDescription().Failed {
return return
@@ -589,7 +595,9 @@ var _ = SIGDescribe("Network Partition [Disruptive] [Slow]", func() {
expectNodeReadiness(true, newNode) expectNodeReadiness(true, newNode)
}() }()
framework.BlockNetwork(host, master) for _, masterAddress := range masterAddresses {
framework.BlockNetwork(host, masterAddress)
}
By("Expect to observe node and pod status change from Ready to NotReady after network partition") By("Expect to observe node and pod status change from Ready to NotReady after network partition")
expectNodeReadiness(false, newNode) expectNodeReadiness(false, newNode)

View File

@@ -952,7 +952,7 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
if err != nil { if err != nil {
Failf("Error getting node external ip : %v", err) Failf("Error getting node external ip : %v", err)
} }
master := GetMasterAddress(c) masterAddresses := GetAllMasterAddresses(c)
By(fmt.Sprintf("block network traffic from node %s to the master", node.Name)) By(fmt.Sprintf("block network traffic from node %s to the master", node.Name))
defer func() { defer func() {
// This code will execute even if setting the iptables rule failed. // This code will execute even if setting the iptables rule failed.
@@ -960,14 +960,18 @@ func TestUnderTemporaryNetworkFailure(c clientset.Interface, ns string, node *v1
// had been inserted. (yes, we could look at the error code and ssh error // had been inserted. (yes, we could look at the error code and ssh error
// separately, but I prefer to stay on the safe side). // separately, but I prefer to stay on the safe side).
By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name)) By(fmt.Sprintf("Unblock network traffic from node %s to the master", node.Name))
UnblockNetwork(host, master) for _, masterAddress := range masterAddresses {
UnblockNetwork(host, masterAddress)
}
}() }()
Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name) Logf("Waiting %v to ensure node %s is ready before beginning test...", resizeNodeReadyTimeout, node.Name)
if !WaitForNodeToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) { if !WaitForNodeToBe(c, node.Name, v1.NodeReady, true, resizeNodeReadyTimeout) {
Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout) Failf("Node %s did not become ready within %v", node.Name, resizeNodeReadyTimeout)
} }
BlockNetwork(host, master) for _, masterAddress := range masterAddresses {
BlockNetwork(host, masterAddress)
}
Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name) Logf("Waiting %v for node %s to be not ready after simulated network failure", resizeNodeNotReadyTimeout, node.Name)
if !WaitForNodeToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) { if !WaitForNodeToBe(c, node.Name, v1.NodeReady, false, resizeNodeNotReadyTimeout) {

View File

@@ -4934,19 +4934,28 @@ func getMaster(c clientset.Interface) Address {
return master return master
} }
// GetMasterAddress returns the hostname/external IP/internal IP as appropriate for e2e tests on a particular provider // GetAllMasterAddresses returns all IP addresses on which the kubelet can reach the master.
// which is the address of the interface used for communication with the kubelet. // It may return internal and external IPs, even if we expect for
func GetMasterAddress(c clientset.Interface) string { // e.g. internal IPs to be used (issue #56787), so that we can be
// sure to block the master fully during tests.
func GetAllMasterAddresses(c clientset.Interface) []string {
master := getMaster(c) master := getMaster(c)
var ips sets.String
switch TestContext.Provider { switch TestContext.Provider {
case "gce", "gke": case "gce", "gke":
return master.externalIP if master.externalIP != "" {
ips.Insert(master.externalIP)
}
if master.internalIP != "" {
ips.Insert(master.internalIP)
}
case "aws": case "aws":
return awsMasterIP ips.Insert(awsMasterIP)
default: default:
Failf("This test is not supported for provider %s and should be disabled", TestContext.Provider) Failf("This test is not supported for provider %s and should be disabled", TestContext.Provider)
} }
return "" return ips.List()
} }
// GetNodeExternalIP returns node external IP concatenated with port 22 for ssh // GetNodeExternalIP returns node external IP concatenated with port 22 for ssh

View File

@@ -173,16 +173,18 @@ var _ = SIGDescribe("Firewall rule", func() {
By("Checking well known ports on master and nodes are not exposed externally") By("Checking well known ports on master and nodes are not exposed externally")
nodeAddrs := framework.NodeAddresses(nodes, v1.NodeExternalIP) nodeAddrs := framework.NodeAddresses(nodes, v1.NodeExternalIP)
Expect(len(nodeAddrs)).NotTo(BeZero()) Expect(len(nodeAddrs)).NotTo(BeZero())
masterAddr := framework.GetMasterAddress(cs) masterAddresses := framework.GetAllMasterAddresses(cs)
flag, _ := framework.TestNotReachableHTTPTimeout(masterAddr, ports.InsecureKubeControllerManagerPort, gce.FirewallTestTcpTimeout) for _, masterAddr := range masterAddresses {
Expect(flag).To(BeTrue()) flag, _ := framework.TestNotReachableHTTPTimeout(masterAddr, ports.InsecureKubeControllerManagerPort, gce.FirewallTestTcpTimeout)
flag, _ = framework.TestNotReachableHTTPTimeout(masterAddr, ports.SchedulerPort, gce.FirewallTestTcpTimeout) Expect(flag).To(BeTrue())
Expect(flag).To(BeTrue()) flag, _ = framework.TestNotReachableHTTPTimeout(masterAddr, ports.SchedulerPort, gce.FirewallTestTcpTimeout)
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletPort, gce.FirewallTestTcpTimeout) Expect(flag).To(BeTrue())
Expect(flag).To(BeTrue()) flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletPort, gce.FirewallTestTcpTimeout)
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletReadOnlyPort, gce.FirewallTestTcpTimeout) Expect(flag).To(BeTrue())
Expect(flag).To(BeTrue()) flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.KubeletReadOnlyPort, gce.FirewallTestTcpTimeout)
flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.ProxyStatusPort, gce.FirewallTestTcpTimeout) Expect(flag).To(BeTrue())
Expect(flag).To(BeTrue()) flag, _ = framework.TestNotReachableHTTPTimeout(nodeAddrs[0], ports.ProxyStatusPort, gce.FirewallTestTcpTimeout)
Expect(flag).To(BeTrue())
}
}) })
}) })

View File

@@ -128,12 +128,14 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
// host, err = framework.GetNodeInternalIP(&node) // host, err = framework.GetNodeInternalIP(&node)
// } // }
framework.ExpectNoError(err) framework.ExpectNoError(err)
master := framework.GetMasterAddress(cs) masterAddresses := framework.GetAllMasterAddresses(cs)
taint := newUnreachableNoExecuteTaint() taint := newUnreachableNoExecuteTaint()
defer func() { defer func() {
By(fmt.Sprintf("Unblocking traffic from node %s to the master", node.Name)) By(fmt.Sprintf("Unblocking traffic from node %s to the master", node.Name))
framework.UnblockNetwork(host, master) for _, masterAddress := range masterAddresses {
framework.UnblockNetwork(host, masterAddress)
}
if CurrentGinkgoTestDescription().Failed { if CurrentGinkgoTestDescription().Failed {
framework.Failf("Current e2e test has failed, so return from here.") framework.Failf("Current e2e test has failed, so return from here.")
@@ -147,7 +149,9 @@ var _ = SIGDescribe("TaintBasedEvictions [Serial]", func() {
framework.ExpectNoError(err) framework.ExpectNoError(err)
}() }()
framework.BlockNetwork(host, master) for _, masterAddress := range masterAddresses {
framework.BlockNetwork(host, masterAddress)
}
By(fmt.Sprintf("Expecting to see node %q becomes NotReady", nodeName)) By(fmt.Sprintf("Expecting to see node %q becomes NotReady", nodeName))
if !framework.WaitForNodeToBeNotReady(cs, nodeName, time.Minute*3) { if !framework.WaitForNodeToBeNotReady(cs, nodeName, time.Minute*3) {