proxy/userspace: respect minSyncInterval and simplify locking
The userspace proxy does not have any ratelimiting and when many services are used will hammer iptables every time a service or endpoint change occurs. Instead build up a map of changed services and process all those changes at once instead of each time an event comes in. This also ensures that no long-running processing happens in the same call chain as the OnService* calls as this blocks other handlers attached to the proxy's parent ServiceConfig object for long periods of time. Locking can also now be simplified as the only accesses to the proxy's serviceMap happen from syncProxyRules(). So instead of locking in many functions just lock once in syncProxyRules() like the other proxies do. https://bugzilla.redhat.com/show_bug.cgi?id=1590589 https://bugzilla.redhat.com/show_bug.cgi?id=1689690
This commit is contained in:
@@ -24,6 +24,7 @@ import (
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
@@ -33,6 +34,7 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/kubernetes/pkg/proxy"
|
||||
ipttest "k8s.io/kubernetes/pkg/util/iptables/testing"
|
||||
"k8s.io/utils/exec"
|
||||
@@ -86,6 +88,16 @@ func waitForClosedPortUDP(p *Proxier, proxyPort int) error {
|
||||
return fmt.Errorf("port %d still open", proxyPort)
|
||||
}
|
||||
|
||||
func waitForServiceInfo(p *Proxier, service proxy.ServicePortName) (*ServiceInfo, bool) {
|
||||
var svcInfo *ServiceInfo
|
||||
var exists bool
|
||||
wait.PollImmediate(50*time.Millisecond, 3*time.Second, func() (bool, error) {
|
||||
svcInfo, exists = p.getServiceInfo(service)
|
||||
return exists, nil
|
||||
})
|
||||
return svcInfo, exists
|
||||
}
|
||||
|
||||
// udpEchoServer is a simple echo server in UDP, intended for testing the proxy.
|
||||
type udpEchoServer struct {
|
||||
net.PacketConn
|
||||
@@ -225,6 +237,15 @@ func waitForNumProxyClients(t *testing.T, s *ServiceInfo, want int, timeout time
|
||||
t.Errorf("expected %d ProxyClients live, got %d", want, got)
|
||||
}
|
||||
|
||||
func startProxier(p *Proxier, t *testing.T) {
|
||||
go func() {
|
||||
p.SyncLoop()
|
||||
}()
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
p.OnServiceSynced()
|
||||
p.OnEndpointsSynced()
|
||||
}
|
||||
|
||||
func TestTCPProxy(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
@@ -242,7 +263,7 @@ func TestTCPProxy(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
startProxier(p, t)
|
||||
defer p.shutdown()
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
@@ -270,7 +291,7 @@ func TestUDPProxy(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
startProxier(p, t)
|
||||
defer p.shutdown()
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
@@ -298,7 +319,7 @@ func TestUDPProxyTimeout(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
startProxier(p, t)
|
||||
defer p.shutdown()
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
@@ -338,7 +359,7 @@ func TestMultiPortProxy(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
startProxier(p, t)
|
||||
defer p.shutdown()
|
||||
|
||||
svcInfoP, err := p.addServiceOnPort(serviceP, "TCP", 0, time.Second)
|
||||
@@ -368,7 +389,7 @@ func TestMultiPortOnServiceAdd(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
startProxier(p, t)
|
||||
defer p.shutdown()
|
||||
|
||||
p.OnServiceAdd(&v1.Service{
|
||||
@@ -384,7 +405,7 @@ func TestMultiPortOnServiceAdd(t *testing.T) {
|
||||
}}},
|
||||
})
|
||||
waitForNumProxyLoops(t, p, 2)
|
||||
svcInfo, exists := p.getServiceInfo(serviceP)
|
||||
svcInfo, exists := waitForServiceInfo(p, serviceP)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo for %s", serviceP)
|
||||
}
|
||||
@@ -392,7 +413,7 @@ func TestMultiPortOnServiceAdd(t *testing.T) {
|
||||
t.Errorf("unexpected serviceInfo for %s: %#v", serviceP, svcInfo)
|
||||
}
|
||||
|
||||
svcInfo, exists = p.getServiceInfo(serviceQ)
|
||||
svcInfo, exists = waitForServiceInfo(p, serviceQ)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo for %s", serviceQ)
|
||||
}
|
||||
@@ -408,7 +429,9 @@ func TestMultiPortOnServiceAdd(t *testing.T) {
|
||||
|
||||
// Helper: Stops the proxy for the named service.
|
||||
func stopProxyByName(proxier *Proxier, service proxy.ServicePortName) error {
|
||||
info, found := proxier.getServiceInfo(service)
|
||||
proxier.mu.Lock()
|
||||
defer proxier.mu.Unlock()
|
||||
info, found := proxier.serviceMap[service]
|
||||
if !found {
|
||||
return fmt.Errorf("unknown service: %s", service)
|
||||
}
|
||||
@@ -432,7 +455,7 @@ func TestTCPProxyStop(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
startProxier(p, t)
|
||||
defer p.shutdown()
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
@@ -477,7 +500,7 @@ func TestUDPProxyStop(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
startProxier(p, t)
|
||||
defer p.shutdown()
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
@@ -501,9 +524,9 @@ func TestUDPProxyStop(t *testing.T) {
|
||||
|
||||
func TestTCPProxyUpdateDelete(t *testing.T) {
|
||||
lb := NewLoadBalancerRR()
|
||||
service := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
servicePortName := proxy.ServicePortName{NamespacedName: types.NamespacedName{Namespace: "testnamespace", Name: "echo"}, Port: "p"}
|
||||
lb.OnEndpointsAdd(&v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: service.Namespace, Name: service.Name},
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: servicePortName.Namespace, Name: servicePortName.Name},
|
||||
Subsets: []v1.EndpointSubset{{
|
||||
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
|
||||
Ports: []v1.EndpointPort{{Name: "p", Port: tcpServerPort}},
|
||||
@@ -516,29 +539,22 @@ func TestTCPProxyUpdateDelete(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
startProxier(p, t)
|
||||
defer p.shutdown()
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
if err != nil {
|
||||
t.Fatalf("error adding new service: %#v", err)
|
||||
}
|
||||
conn, err := net.Dial("tcp", joinHostPort("", svcInfo.proxyPort))
|
||||
if err != nil {
|
||||
t.Fatalf("error connecting to proxy: %v", err)
|
||||
}
|
||||
conn.Close()
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
|
||||
p.OnServiceDelete(&v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: service.Name, Namespace: service.Namespace},
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: servicePortName.Name, Namespace: servicePortName.Namespace},
|
||||
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
|
||||
Name: "p",
|
||||
Port: int32(svcInfo.proxyPort),
|
||||
Port: 9997,
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
})
|
||||
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
|
||||
}
|
||||
|
||||
p.OnServiceAdd(service)
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
p.OnServiceDelete(service)
|
||||
if err := waitForClosedPortTCP(p, int(service.Spec.Ports[0].Port)); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
@@ -561,7 +577,7 @@ func TestUDPProxyUpdateDelete(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
startProxier(p, t)
|
||||
defer p.shutdown()
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
@@ -607,7 +623,7 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
startProxier(p, t)
|
||||
defer p.shutdown()
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
@@ -644,7 +660,7 @@ func TestTCPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
})
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
svcInfo, exists := waitForServiceInfo(p, service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo for %s", service)
|
||||
}
|
||||
@@ -670,7 +686,7 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
startProxier(p, t)
|
||||
defer p.shutdown()
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
@@ -707,7 +723,7 @@ func TestUDPProxyUpdateDeleteUpdate(t *testing.T) {
|
||||
Protocol: "UDP",
|
||||
}}},
|
||||
})
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
svcInfo, exists := waitForServiceInfo(p, service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo")
|
||||
}
|
||||
@@ -732,7 +748,7 @@ func TestTCPProxyUpdatePort(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
startProxier(p, t)
|
||||
defer p.shutdown()
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
@@ -754,7 +770,7 @@ func TestTCPProxyUpdatePort(t *testing.T) {
|
||||
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
svcInfo, exists := waitForServiceInfo(p, service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo")
|
||||
}
|
||||
@@ -781,7 +797,7 @@ func TestUDPProxyUpdatePort(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
startProxier(p, t)
|
||||
defer p.shutdown()
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "UDP", 0, time.Second)
|
||||
@@ -802,7 +818,7 @@ func TestUDPProxyUpdatePort(t *testing.T) {
|
||||
if err := waitForClosedPortUDP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
svcInfo, exists := waitForServiceInfo(p, service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo")
|
||||
}
|
||||
@@ -827,7 +843,7 @@ func TestProxyUpdatePublicIPs(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
startProxier(p, t)
|
||||
defer p.shutdown()
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
@@ -853,7 +869,7 @@ func TestProxyUpdatePublicIPs(t *testing.T) {
|
||||
if err := waitForClosedPortTCP(p, svcInfo.proxyPort); err != nil {
|
||||
t.Fatalf(err.Error())
|
||||
}
|
||||
svcInfo, exists := p.getServiceInfo(service)
|
||||
svcInfo, exists := waitForServiceInfo(p, service)
|
||||
if !exists {
|
||||
t.Fatalf("can't find serviceInfo")
|
||||
}
|
||||
@@ -881,7 +897,7 @@ func TestProxyUpdatePortal(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
waitForNumProxyLoops(t, p, 0)
|
||||
startProxier(p, t)
|
||||
defer p.shutdown()
|
||||
|
||||
svcInfo, err := p.addServiceOnPort(service, "TCP", 0, time.Second)
|
||||
@@ -909,7 +925,16 @@ func TestProxyUpdatePortal(t *testing.T) {
|
||||
}}},
|
||||
}
|
||||
p.OnServiceUpdate(svcv0, svcv1)
|
||||
_, exists := p.getServiceInfo(service)
|
||||
|
||||
// Wait for the service to be removed because it had an empty ClusterIP
|
||||
var exists bool
|
||||
for i := 0; i < 50; i++ {
|
||||
_, exists = p.getServiceInfo(service)
|
||||
if !exists {
|
||||
break
|
||||
}
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
if exists {
|
||||
t.Fatalf("service with empty ClusterIP should not be included in the proxy")
|
||||
}
|
||||
@@ -938,7 +963,7 @@ func TestProxyUpdatePortal(t *testing.T) {
|
||||
}
|
||||
p.OnServiceUpdate(svcv2, svcv3)
|
||||
lb.OnEndpointsAdd(endpoint)
|
||||
svcInfo, exists = p.getServiceInfo(service)
|
||||
svcInfo, exists = waitForServiceInfo(p, service)
|
||||
if !exists {
|
||||
t.Fatalf("service with ClusterIP set not found in the proxy")
|
||||
}
|
||||
@@ -946,6 +971,172 @@ func TestProxyUpdatePortal(t *testing.T) {
|
||||
waitForNumProxyLoops(t, p, 1)
|
||||
}
|
||||
|
||||
type fakeRunner struct{}
|
||||
|
||||
// assert fakeAsyncRunner is a ProxyProvider
|
||||
var _ asyncRunnerInterface = &fakeRunner{}
|
||||
|
||||
func (f fakeRunner) Run() {
|
||||
}
|
||||
|
||||
func (f fakeRunner) Loop(stop <-chan struct{}) {
|
||||
}
|
||||
|
||||
func TestOnServiceAddChangeMap(t *testing.T) {
|
||||
fexec := makeFakeExec()
|
||||
|
||||
// Use long minSyncPeriod so we can test that immediate syncs work
|
||||
p, err := createProxier(NewLoadBalancerRR(), net.ParseIP("0.0.0.0"), ipttest.NewFake(), fexec, net.ParseIP("127.0.0.1"), nil, time.Minute, time.Minute, udpIdleTimeoutForTest, newProxySocket)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Fake out sync runner
|
||||
p.syncRunner = fakeRunner{}
|
||||
|
||||
serviceMeta := metav1.ObjectMeta{Namespace: "testnamespace", Name: "testname"}
|
||||
service := &v1.Service{
|
||||
ObjectMeta: serviceMeta,
|
||||
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.4", Ports: []v1.ServicePort{{
|
||||
Name: "p",
|
||||
Port: 99,
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}
|
||||
|
||||
serviceUpdate := &v1.Service{
|
||||
ObjectMeta: serviceMeta,
|
||||
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.5", Ports: []v1.ServicePort{{
|
||||
Name: "p",
|
||||
Port: 100,
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}
|
||||
|
||||
serviceUpdate2 := &v1.Service{
|
||||
ObjectMeta: serviceMeta,
|
||||
Spec: v1.ServiceSpec{ClusterIP: "1.2.3.6", Ports: []v1.ServicePort{{
|
||||
Name: "p",
|
||||
Port: 101,
|
||||
Protocol: "TCP",
|
||||
}}},
|
||||
}
|
||||
|
||||
type onServiceTest struct {
|
||||
detail string
|
||||
changes []serviceChange
|
||||
expectedChange *serviceChange
|
||||
}
|
||||
|
||||
tests := []onServiceTest{
|
||||
{
|
||||
detail: "add",
|
||||
changes: []serviceChange{
|
||||
{current: service},
|
||||
},
|
||||
expectedChange: &serviceChange{
|
||||
current: service,
|
||||
},
|
||||
},
|
||||
{
|
||||
detail: "add+update=add",
|
||||
changes: []serviceChange{
|
||||
{current: service},
|
||||
{
|
||||
previous: service,
|
||||
current: serviceUpdate,
|
||||
},
|
||||
},
|
||||
expectedChange: &serviceChange{
|
||||
current: serviceUpdate,
|
||||
},
|
||||
},
|
||||
{
|
||||
detail: "add+del=none",
|
||||
changes: []serviceChange{
|
||||
{current: service},
|
||||
{previous: service},
|
||||
},
|
||||
},
|
||||
{
|
||||
detail: "update+update=update",
|
||||
changes: []serviceChange{
|
||||
{
|
||||
previous: service,
|
||||
current: serviceUpdate,
|
||||
},
|
||||
{
|
||||
previous: serviceUpdate,
|
||||
current: serviceUpdate2,
|
||||
},
|
||||
},
|
||||
expectedChange: &serviceChange{
|
||||
previous: service,
|
||||
current: serviceUpdate2,
|
||||
},
|
||||
},
|
||||
{
|
||||
detail: "update+del=del",
|
||||
changes: []serviceChange{
|
||||
{
|
||||
previous: service,
|
||||
current: serviceUpdate,
|
||||
},
|
||||
{previous: serviceUpdate},
|
||||
},
|
||||
// change collapsing always keeps the oldest service
|
||||
// info since correct unmerging depends on the least
|
||||
// recent update, not the most current.
|
||||
expectedChange: &serviceChange{
|
||||
previous: service,
|
||||
},
|
||||
},
|
||||
{
|
||||
detail: "del+add=update",
|
||||
changes: []serviceChange{
|
||||
{previous: service},
|
||||
{current: serviceUpdate},
|
||||
},
|
||||
expectedChange: &serviceChange{
|
||||
previous: service,
|
||||
current: serviceUpdate,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
for _, change := range test.changes {
|
||||
p.serviceChange(change.previous, change.current, test.detail)
|
||||
}
|
||||
|
||||
if test.expectedChange != nil {
|
||||
if len(p.serviceChanges) != 1 {
|
||||
t.Fatalf("[%s] expected 1 service change but found %d", test.detail, len(p.serviceChanges))
|
||||
}
|
||||
expectedService := test.expectedChange.current
|
||||
if expectedService == nil {
|
||||
expectedService = test.expectedChange.previous
|
||||
}
|
||||
svcName := types.NamespacedName{Namespace: expectedService.Namespace, Name: expectedService.Name}
|
||||
|
||||
change, ok := p.serviceChanges[svcName]
|
||||
if !ok {
|
||||
t.Fatalf("[%s] did not find service change for %v", test.detail, svcName)
|
||||
}
|
||||
if !reflect.DeepEqual(change.previous, test.expectedChange.previous) {
|
||||
t.Fatalf("[%s] change previous service and expected previous service don't match\nchange: %+v\nexp: %+v", test.detail, change.previous, test.expectedChange.previous)
|
||||
}
|
||||
if !reflect.DeepEqual(change.current, test.expectedChange.current) {
|
||||
t.Fatalf("[%s] change current service and expected current service don't match\nchange: %+v\nexp: %+v", test.detail, change.current, test.expectedChange.current)
|
||||
}
|
||||
} else {
|
||||
if len(p.serviceChanges) != 0 {
|
||||
t.Fatalf("[%s] expected no service changes but found %d", test.detail, len(p.serviceChanges))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeFakeExec() *fakeexec.FakeExec {
|
||||
fcmd := fakeexec.FakeCmd{
|
||||
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
|
||||
|
||||
Reference in New Issue
Block a user