proxy/iptables: add unit test Test_HealthCheckNodePortWhenTerminating for ensuring health check node port fails when all local endpoints are terminating
Signed-off-by: Andrew Sy Kim <kim.andrewsy@gmail.com>
This commit is contained in:
parent
14cc201b58
commit
ed4fe07375
@ -3160,6 +3160,119 @@ COMMIT
|
|||||||
assert.NotEqual(t, expectedIPTables, fp.iptablesData.String())
|
assert.NotEqual(t, expectedIPTables, fp.iptablesData.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Test_HealthCheckNodePortWhenTerminating tests that health check node ports are not enabled when all local endpoints are terminating
|
||||||
|
func Test_HealthCheckNodePortWhenTerminating(t *testing.T) {
|
||||||
|
ipt := iptablestest.NewFake()
|
||||||
|
fp := NewFakeProxier(ipt, true)
|
||||||
|
fp.OnServiceSynced()
|
||||||
|
fp.OnEndpointsSynced()
|
||||||
|
fp.OnEndpointSlicesSynced()
|
||||||
|
|
||||||
|
serviceName := "svc1"
|
||||||
|
namespaceName := "ns1"
|
||||||
|
|
||||||
|
fp.OnServiceAdd(&v1.Service{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
|
||||||
|
Spec: v1.ServiceSpec{
|
||||||
|
ClusterIP: "172.20.1.1",
|
||||||
|
Selector: map[string]string{"foo": "bar"},
|
||||||
|
Ports: []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt(80), Protocol: v1.ProtocolTCP}},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
|
tcpProtocol := v1.ProtocolTCP
|
||||||
|
endpointSlice := &discovery.EndpointSlice{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: fmt.Sprintf("%s-1", serviceName),
|
||||||
|
Namespace: namespaceName,
|
||||||
|
Labels: map[string]string{discovery.LabelServiceName: serviceName},
|
||||||
|
},
|
||||||
|
Ports: []discovery.EndpointPort{{
|
||||||
|
Name: utilpointer.StringPtr(""),
|
||||||
|
Port: utilpointer.Int32Ptr(80),
|
||||||
|
Protocol: &tcpProtocol,
|
||||||
|
}},
|
||||||
|
AddressType: discovery.AddressTypeIPv4,
|
||||||
|
Endpoints: []discovery.Endpoint{{
|
||||||
|
Addresses: []string{"10.0.1.1"},
|
||||||
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
||||||
|
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
|
||||||
|
}, {
|
||||||
|
Addresses: []string{"10.0.1.2"},
|
||||||
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
||||||
|
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
|
||||||
|
}, {
|
||||||
|
Addresses: []string{"10.0.1.3"},
|
||||||
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
||||||
|
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
|
||||||
|
}, { // not ready endpoints should be ignored
|
||||||
|
Addresses: []string{"10.0.1.4"},
|
||||||
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(false)},
|
||||||
|
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
fp.OnEndpointSliceAdd(endpointSlice)
|
||||||
|
result := fp.endpointsMap.Update(fp.endpointsChanges)
|
||||||
|
if len(result.HCEndpointsLocalIPSize) != 1 {
|
||||||
|
t.Errorf("unexpected number of health check node ports, expected 1 but got: %d", len(result.HCEndpointsLocalIPSize))
|
||||||
|
}
|
||||||
|
|
||||||
|
// set all endpoints to terminating
|
||||||
|
endpointSliceTerminating := &discovery.EndpointSlice{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: fmt.Sprintf("%s-1", serviceName),
|
||||||
|
Namespace: namespaceName,
|
||||||
|
Labels: map[string]string{discovery.LabelServiceName: serviceName},
|
||||||
|
},
|
||||||
|
Ports: []discovery.EndpointPort{{
|
||||||
|
Name: utilpointer.StringPtr(""),
|
||||||
|
Port: utilpointer.Int32Ptr(80),
|
||||||
|
Protocol: &tcpProtocol,
|
||||||
|
}},
|
||||||
|
AddressType: discovery.AddressTypeIPv4,
|
||||||
|
Endpoints: []discovery.Endpoint{{
|
||||||
|
Addresses: []string{"10.0.1.1"},
|
||||||
|
Conditions: discovery.EndpointConditions{
|
||||||
|
Ready: utilpointer.BoolPtr(false),
|
||||||
|
Serving: utilpointer.BoolPtr(true),
|
||||||
|
Terminating: utilpointer.BoolPtr(false),
|
||||||
|
},
|
||||||
|
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
|
||||||
|
}, {
|
||||||
|
Addresses: []string{"10.0.1.2"},
|
||||||
|
Conditions: discovery.EndpointConditions{
|
||||||
|
Ready: utilpointer.BoolPtr(false),
|
||||||
|
Serving: utilpointer.BoolPtr(true),
|
||||||
|
Terminating: utilpointer.BoolPtr(true),
|
||||||
|
},
|
||||||
|
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
|
||||||
|
}, {
|
||||||
|
Addresses: []string{"10.0.1.3"},
|
||||||
|
Conditions: discovery.EndpointConditions{
|
||||||
|
Ready: utilpointer.BoolPtr(false),
|
||||||
|
Serving: utilpointer.BoolPtr(true),
|
||||||
|
Terminating: utilpointer.BoolPtr(true),
|
||||||
|
},
|
||||||
|
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
|
||||||
|
}, { // not ready endpoints should be ignored
|
||||||
|
Addresses: []string{"10.0.1.4"},
|
||||||
|
Conditions: discovery.EndpointConditions{
|
||||||
|
Ready: utilpointer.BoolPtr(false),
|
||||||
|
Serving: utilpointer.BoolPtr(false),
|
||||||
|
Terminating: utilpointer.BoolPtr(true),
|
||||||
|
},
|
||||||
|
Topology: map[string]string{"kubernetes.io/hostname": testHostname},
|
||||||
|
}},
|
||||||
|
}
|
||||||
|
|
||||||
|
fp.OnEndpointSliceUpdate(endpointSlice, endpointSliceTerminating)
|
||||||
|
result = fp.endpointsMap.Update(fp.endpointsChanges)
|
||||||
|
if len(result.HCEndpointsLocalIPSize) != 0 {
|
||||||
|
t.Errorf("unexpected number of health check node ports, expected 0 but got: %d", len(result.HCEndpointsLocalIPSize))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestProxierDeleteNodePortStaleUDP(t *testing.T) {
|
func TestProxierDeleteNodePortStaleUDP(t *testing.T) {
|
||||||
fcmd := fakeexec.FakeCmd{}
|
fcmd := fakeexec.FakeCmd{}
|
||||||
fexec := fakeexec.FakeExec{
|
fexec := fakeexec.FakeExec{
|
||||||
|
Loading…
Reference in New Issue
Block a user