
The test was sorting the iptables output so as to not depend on the order that services get processed in, but this meant it wasn't checking the relative ordering of rules (and in fact, the ordering of the rules in the "expected" string was wrong, in a way that would break things if the rules had actually been generated in that order). Add a more complicated sorting function that sorts services alphabetically while preserving the ordering of rules within each service.
4553 lines
188 KiB
Go
4553 lines
188 KiB
Go
/*
|
|
Copyright 2015 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package iptables
|
|
|
|
import (
|
|
"bytes"
|
|
"fmt"
|
|
"net"
|
|
"reflect"
|
|
"regexp"
|
|
"sort"
|
|
"strconv"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
v1 "k8s.io/api/core/v1"
|
|
discovery "k8s.io/api/discovery/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/types"
|
|
"k8s.io/apimachinery/pkg/util/intstr"
|
|
"k8s.io/apimachinery/pkg/util/sets"
|
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
"k8s.io/component-base/metrics/testutil"
|
|
"k8s.io/klog/v2"
|
|
"k8s.io/kubernetes/pkg/features"
|
|
"k8s.io/kubernetes/pkg/proxy"
|
|
"k8s.io/kubernetes/pkg/proxy/metrics"
|
|
|
|
"k8s.io/kubernetes/pkg/proxy/healthcheck"
|
|
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
|
|
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
|
|
utilproxytest "k8s.io/kubernetes/pkg/proxy/util/testing"
|
|
"k8s.io/kubernetes/pkg/util/async"
|
|
"k8s.io/kubernetes/pkg/util/conntrack"
|
|
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
|
iptablestest "k8s.io/kubernetes/pkg/util/iptables/testing"
|
|
"k8s.io/utils/exec"
|
|
fakeexec "k8s.io/utils/exec/testing"
|
|
netutils "k8s.io/utils/net"
|
|
utilpointer "k8s.io/utils/pointer"
|
|
)
|
|
|
|
func checkAllLines(t *testing.T, table utiliptables.Table, save []byte, expectedLines map[utiliptables.Chain]string) {
|
|
chainLines := utiliptables.GetChainLines(table, save)
|
|
for chain, lineBytes := range chainLines {
|
|
line := string(lineBytes)
|
|
if expected, exists := expectedLines[chain]; exists {
|
|
if expected != line {
|
|
t.Errorf("getChainLines expected chain line not present. For chain: %s Expected: %s Got: %s", chain, expected, line)
|
|
}
|
|
} else {
|
|
t.Errorf("getChainLines expected chain not present: %s", chain)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestGetChainLines(t *testing.T) {
|
|
iptablesSave := `# Generated by iptables-save v1.4.7 on Wed Oct 29 14:56:01 2014
|
|
*nat
|
|
:PREROUTING ACCEPT [2136997:197881818]
|
|
:POSTROUTING ACCEPT [4284525:258542680]
|
|
:OUTPUT ACCEPT [5901660:357267963]
|
|
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
|
|
COMMIT
|
|
# Completed on Wed Oct 29 14:56:01 2014`
|
|
expected := map[utiliptables.Chain]string{
|
|
utiliptables.ChainPrerouting: ":PREROUTING ACCEPT [2136997:197881818]",
|
|
utiliptables.ChainPostrouting: ":POSTROUTING ACCEPT [4284525:258542680]",
|
|
utiliptables.ChainOutput: ":OUTPUT ACCEPT [5901660:357267963]",
|
|
}
|
|
checkAllLines(t, utiliptables.TableNAT, []byte(iptablesSave), expected)
|
|
}
|
|
|
|
func TestGetChainLinesMultipleTables(t *testing.T) {
|
|
iptablesSave := `# Generated by iptables-save v1.4.21 on Fri Aug 7 14:47:37 2015
|
|
*nat
|
|
:PREROUTING ACCEPT [2:138]
|
|
:INPUT ACCEPT [0:0]
|
|
:OUTPUT ACCEPT [0:0]
|
|
:POSTROUTING ACCEPT [0:0]
|
|
:DOCKER - [0:0]
|
|
:KUBE-NODEPORT-CONTAINER - [0:0]
|
|
:KUBE-NODEPORT-HOST - [0:0]
|
|
:KUBE-PORTALS-CONTAINER - [0:0]
|
|
:KUBE-PORTALS-HOST - [0:0]
|
|
:KUBE-SVC-1111111111111111 - [0:0]
|
|
:KUBE-SVC-2222222222222222 - [0:0]
|
|
:KUBE-SVC-3333333333333333 - [0:0]
|
|
:KUBE-SVC-4444444444444444 - [0:0]
|
|
:KUBE-SVC-5555555555555555 - [0:0]
|
|
:KUBE-SVC-6666666666666666 - [0:0]
|
|
-A PREROUTING -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-CONTAINER
|
|
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
|
|
-A PREROUTING -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-CONTAINER
|
|
-A OUTPUT -m comment --comment "handle ClusterIPs; NOTE: this must be before the NodePort rules" -j KUBE-PORTALS-HOST
|
|
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
|
|
-A OUTPUT -m addrtype --dst-type LOCAL -m comment --comment "handle service NodePorts; NOTE: this must be the last rule in the chain" -j KUBE-NODEPORT-HOST
|
|
-A POSTROUTING -s 10.246.1.0/24 ! -o cbr0 -j MASQUERADE
|
|
-A POSTROUTING -s 10.0.2.15/32 -d 10.0.2.15/32 -m comment --comment "handle pod connecting to self" -j MASQUERADE
|
|
-A KUBE-PORTALS-CONTAINER -d 10.247.0.1/32 -p tcp -m comment --comment "portal for default/kubernetes:" -m state --state NEW -m tcp --dport 443 -j KUBE-SVC-5555555555555555
|
|
-A KUBE-PORTALS-CONTAINER -d 10.247.0.10/32 -p udp -m comment --comment "portal for kube-system/kube-dns:dns" -m state --state NEW -m udp --dport 53 -j KUBE-SVC-6666666666666666
|
|
-A KUBE-PORTALS-CONTAINER -d 10.247.0.10/32 -p tcp -m comment --comment "portal for kube-system/kube-dns:dns-tcp" -m state --state NEW -m tcp --dport 53 -j KUBE-SVC-2222222222222222
|
|
-A KUBE-PORTALS-HOST -d 10.247.0.1/32 -p tcp -m comment --comment "portal for default/kubernetes:" -m state --state NEW -m tcp --dport 443 -j KUBE-SVC-5555555555555555
|
|
-A KUBE-PORTALS-HOST -d 10.247.0.10/32 -p udp -m comment --comment "portal for kube-system/kube-dns:dns" -m state --state NEW -m udp --dport 53 -j KUBE-SVC-6666666666666666
|
|
-A KUBE-PORTALS-HOST -d 10.247.0.10/32 -p tcp -m comment --comment "portal for kube-system/kube-dns:dns-tcp" -m state --state NEW -m tcp --dport 53 -j KUBE-SVC-2222222222222222
|
|
-A KUBE-SVC-1111111111111111 -p udp -m comment --comment "kube-system/kube-dns:dns" -m recent --set --name KUBE-SVC-1111111111111111 --mask 255.255.255.255 --rsource -j DNAT --to-destination 10.246.1.2:53
|
|
-A KUBE-SVC-2222222222222222 -m comment --comment "kube-system/kube-dns:dns-tcp" -j KUBE-SVC-3333333333333333
|
|
-A KUBE-SVC-3333333333333333 -p tcp -m comment --comment "kube-system/kube-dns:dns-tcp" -m recent --set --name KUBE-SVC-3333333333333333 --mask 255.255.255.255 --rsource -j DNAT --to-destination 10.246.1.2:53
|
|
-A KUBE-SVC-4444444444444444 -p tcp -m comment --comment "default/kubernetes:" -m recent --set --name KUBE-SVC-4444444444444444 --mask 255.255.255.255 --rsource -j DNAT --to-destination 10.245.1.2:443
|
|
-A KUBE-SVC-5555555555555555 -m comment --comment "default/kubernetes:" -j KUBE-SVC-4444444444444444
|
|
-A KUBE-SVC-6666666666666666 -m comment --comment "kube-system/kube-dns:dns" -j KUBE-SVC-1111111111111111
|
|
COMMIT
|
|
# Completed on Fri Aug 7 14:47:37 2015
|
|
# Generated by iptables-save v1.4.21 on Fri Aug 7 14:47:37 2015
|
|
*filter
|
|
:INPUT ACCEPT [17514:83115836]
|
|
:FORWARD ACCEPT [0:0]
|
|
:OUTPUT ACCEPT [8909:688225]
|
|
:DOCKER - [0:0]
|
|
-A FORWARD -o cbr0 -j DOCKER
|
|
-A FORWARD -o cbr0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
-A FORWARD -i cbr0 ! -o cbr0 -j ACCEPT
|
|
-A FORWARD -i cbr0 -o cbr0 -j ACCEPT
|
|
COMMIT
|
|
`
|
|
expected := map[utiliptables.Chain]string{
|
|
utiliptables.ChainPrerouting: ":PREROUTING ACCEPT [2:138]",
|
|
utiliptables.Chain("INPUT"): ":INPUT ACCEPT [0:0]",
|
|
utiliptables.Chain("OUTPUT"): ":OUTPUT ACCEPT [0:0]",
|
|
utiliptables.ChainPostrouting: ":POSTROUTING ACCEPT [0:0]",
|
|
utiliptables.Chain("DOCKER"): ":DOCKER - [0:0]",
|
|
utiliptables.Chain("KUBE-NODEPORT-CONTAINER"): ":KUBE-NODEPORT-CONTAINER - [0:0]",
|
|
utiliptables.Chain("KUBE-NODEPORT-HOST"): ":KUBE-NODEPORT-HOST - [0:0]",
|
|
utiliptables.Chain("KUBE-PORTALS-CONTAINER"): ":KUBE-PORTALS-CONTAINER - [0:0]",
|
|
utiliptables.Chain("KUBE-PORTALS-HOST"): ":KUBE-PORTALS-HOST - [0:0]",
|
|
utiliptables.Chain("KUBE-SVC-1111111111111111"): ":KUBE-SVC-1111111111111111 - [0:0]",
|
|
utiliptables.Chain("KUBE-SVC-2222222222222222"): ":KUBE-SVC-2222222222222222 - [0:0]",
|
|
utiliptables.Chain("KUBE-SVC-3333333333333333"): ":KUBE-SVC-3333333333333333 - [0:0]",
|
|
utiliptables.Chain("KUBE-SVC-4444444444444444"): ":KUBE-SVC-4444444444444444 - [0:0]",
|
|
utiliptables.Chain("KUBE-SVC-5555555555555555"): ":KUBE-SVC-5555555555555555 - [0:0]",
|
|
utiliptables.Chain("KUBE-SVC-6666666666666666"): ":KUBE-SVC-6666666666666666 - [0:0]",
|
|
}
|
|
checkAllLines(t, utiliptables.TableNAT, []byte(iptablesSave), expected)
|
|
}
|
|
func TestDeleteEndpointConnectionsIPv4(t *testing.T) {
|
|
const (
|
|
UDP = v1.ProtocolUDP
|
|
TCP = v1.ProtocolTCP
|
|
SCTP = v1.ProtocolSCTP
|
|
)
|
|
|
|
testCases := []struct {
|
|
description string
|
|
svcName string
|
|
svcIP string
|
|
svcPort int32
|
|
protocol v1.Protocol
|
|
endpoint string // IP:port endpoint
|
|
epSvcPair proxy.ServiceEndpoint // Will be generated by test
|
|
simulatedErr string
|
|
}{
|
|
{
|
|
description: "V4 UDP",
|
|
svcName: "v4-udp",
|
|
svcIP: "10.96.1.1",
|
|
svcPort: 80,
|
|
protocol: UDP,
|
|
endpoint: "10.240.0.3:80",
|
|
},
|
|
{
|
|
description: "V4 TCP",
|
|
svcName: "v4-tcp",
|
|
svcIP: "10.96.2.2",
|
|
svcPort: 80,
|
|
protocol: TCP,
|
|
endpoint: "10.240.0.4:80",
|
|
},
|
|
{
|
|
description: "V4 SCTP",
|
|
svcName: "v4-sctp",
|
|
svcIP: "10.96.3.3",
|
|
svcPort: 80,
|
|
protocol: SCTP,
|
|
endpoint: "10.240.0.5:80",
|
|
},
|
|
{
|
|
description: "V4 UDP, nothing to delete, benign error",
|
|
svcName: "v4-udp-nothing-to-delete",
|
|
svcIP: "10.96.1.1",
|
|
svcPort: 80,
|
|
protocol: UDP,
|
|
endpoint: "10.240.0.3:80",
|
|
simulatedErr: conntrack.NoConnectionToDelete,
|
|
},
|
|
{
|
|
description: "V4 UDP, unexpected error, should be glogged",
|
|
svcName: "v4-udp-simulated-error",
|
|
svcIP: "10.96.1.1",
|
|
svcPort: 80,
|
|
protocol: UDP,
|
|
endpoint: "10.240.0.3:80",
|
|
simulatedErr: "simulated error",
|
|
},
|
|
}
|
|
|
|
// Create a fake executor for the conntrack utility. This should only be
|
|
// invoked for UDP and SCTP connections, since no conntrack cleanup is needed for TCP
|
|
fcmd := fakeexec.FakeCmd{}
|
|
fexec := fakeexec.FakeExec{
|
|
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
|
|
}
|
|
execFunc := func(cmd string, args ...string) exec.Cmd {
|
|
return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
|
|
}
|
|
for _, tc := range testCases {
|
|
if conntrack.IsClearConntrackNeeded(tc.protocol) {
|
|
var cmdOutput string
|
|
var simErr error
|
|
if tc.simulatedErr == "" {
|
|
cmdOutput = "1 flow entries have been deleted"
|
|
} else {
|
|
simErr = fmt.Errorf(tc.simulatedErr)
|
|
}
|
|
cmdFunc := func() ([]byte, []byte, error) { return []byte(cmdOutput), nil, simErr }
|
|
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
|
|
fexec.CommandScript = append(fexec.CommandScript, execFunc)
|
|
}
|
|
}
|
|
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.exec = &fexec
|
|
|
|
for _, tc := range testCases {
|
|
makeServiceMap(fp,
|
|
makeTestService("ns1", tc.svcName, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = tc.svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: tc.svcPort,
|
|
Protocol: tc.protocol,
|
|
}}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
}),
|
|
)
|
|
|
|
fp.serviceMap.Update(fp.serviceChanges)
|
|
}
|
|
|
|
// Run the test cases
|
|
for _, tc := range testCases {
|
|
priorExecs := fexec.CommandCalls
|
|
priorGlogErrs := klog.Stats.Error.Lines()
|
|
|
|
svc := proxy.ServicePortName{
|
|
NamespacedName: types.NamespacedName{Namespace: "ns1", Name: tc.svcName},
|
|
Port: "p80",
|
|
Protocol: tc.protocol,
|
|
}
|
|
input := []proxy.ServiceEndpoint{
|
|
{
|
|
Endpoint: tc.endpoint,
|
|
ServicePortName: svc,
|
|
},
|
|
}
|
|
|
|
fp.deleteEndpointConnections(input)
|
|
|
|
// For UDP and SCTP connections, check the executed conntrack command
|
|
var expExecs int
|
|
if conntrack.IsClearConntrackNeeded(tc.protocol) {
|
|
isIPv6 := func(ip string) bool {
|
|
netIP := netutils.ParseIPSloppy(ip)
|
|
return netIP.To4() == nil
|
|
}
|
|
endpointIP := utilproxy.IPPart(tc.endpoint)
|
|
expectCommand := fmt.Sprintf("conntrack -D --orig-dst %s --dst-nat %s -p %s", tc.svcIP, endpointIP, strings.ToLower(string((tc.protocol))))
|
|
if isIPv6(endpointIP) {
|
|
expectCommand += " -f ipv6"
|
|
}
|
|
actualCommand := strings.Join(fcmd.CombinedOutputLog[fexec.CommandCalls-1], " ")
|
|
if actualCommand != expectCommand {
|
|
t.Errorf("%s: Expected command: %s, but executed %s", tc.description, expectCommand, actualCommand)
|
|
}
|
|
expExecs = 1
|
|
}
|
|
|
|
// Check the number of times conntrack was executed
|
|
execs := fexec.CommandCalls - priorExecs
|
|
if execs != expExecs {
|
|
t.Errorf("%s: Expected conntrack to be executed %d times, but got %d", tc.description, expExecs, execs)
|
|
}
|
|
|
|
// Check the number of new glog errors
|
|
var expGlogErrs int64
|
|
if tc.simulatedErr != "" && tc.simulatedErr != conntrack.NoConnectionToDelete {
|
|
expGlogErrs = 1
|
|
}
|
|
glogErrs := klog.Stats.Error.Lines() - priorGlogErrs
|
|
if glogErrs != expGlogErrs {
|
|
t.Errorf("%s: Expected %d glogged errors, but got %d", tc.description, expGlogErrs, glogErrs)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestDeleteEndpointConnectionsIPv6(t *testing.T) {
|
|
const (
|
|
UDP = v1.ProtocolUDP
|
|
TCP = v1.ProtocolTCP
|
|
SCTP = v1.ProtocolSCTP
|
|
)
|
|
|
|
testCases := []struct {
|
|
description string
|
|
svcName string
|
|
svcIP string
|
|
svcPort int32
|
|
protocol v1.Protocol
|
|
endpoint string // IP:port endpoint
|
|
epSvcPair proxy.ServiceEndpoint // Will be generated by test
|
|
simulatedErr string
|
|
}{
|
|
{
|
|
description: "V6 UDP",
|
|
svcName: "v6-udp",
|
|
svcIP: "fd00:1234::20",
|
|
svcPort: 80,
|
|
protocol: UDP,
|
|
endpoint: "[2001:db8::2]:80",
|
|
},
|
|
{
|
|
description: "V6 TCP",
|
|
svcName: "v6-tcp",
|
|
svcIP: "fd00:1234::30",
|
|
svcPort: 80,
|
|
protocol: TCP,
|
|
endpoint: "[2001:db8::3]:80",
|
|
},
|
|
{
|
|
description: "V6 SCTP",
|
|
svcName: "v6-sctp",
|
|
svcIP: "fd00:1234::40",
|
|
svcPort: 80,
|
|
protocol: SCTP,
|
|
endpoint: "[2001:db8::4]:80",
|
|
},
|
|
}
|
|
|
|
// Create a fake executor for the conntrack utility. This should only be
|
|
// invoked for UDP and SCTP connections, since no conntrack cleanup is needed for TCP
|
|
fcmd := fakeexec.FakeCmd{}
|
|
fexec := fakeexec.FakeExec{
|
|
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
|
|
}
|
|
execFunc := func(cmd string, args ...string) exec.Cmd {
|
|
return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
|
|
}
|
|
for _, tc := range testCases {
|
|
if conntrack.IsClearConntrackNeeded(tc.protocol) {
|
|
var cmdOutput string
|
|
var simErr error
|
|
if tc.simulatedErr == "" {
|
|
cmdOutput = "1 flow entries have been deleted"
|
|
} else {
|
|
simErr = fmt.Errorf(tc.simulatedErr)
|
|
}
|
|
cmdFunc := func() ([]byte, []byte, error) { return []byte(cmdOutput), nil, simErr }
|
|
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
|
|
fexec.CommandScript = append(fexec.CommandScript, execFunc)
|
|
}
|
|
}
|
|
|
|
ipt := iptablestest.NewIPv6Fake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.exec = &fexec
|
|
|
|
for _, tc := range testCases {
|
|
makeServiceMap(fp,
|
|
makeTestService("ns1", tc.svcName, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = tc.svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: tc.svcPort,
|
|
Protocol: tc.protocol,
|
|
}}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
}),
|
|
)
|
|
|
|
fp.serviceMap.Update(fp.serviceChanges)
|
|
}
|
|
|
|
// Run the test cases
|
|
for _, tc := range testCases {
|
|
priorExecs := fexec.CommandCalls
|
|
priorGlogErrs := klog.Stats.Error.Lines()
|
|
|
|
svc := proxy.ServicePortName{
|
|
NamespacedName: types.NamespacedName{Namespace: "ns1", Name: tc.svcName},
|
|
Port: "p80",
|
|
Protocol: tc.protocol,
|
|
}
|
|
input := []proxy.ServiceEndpoint{
|
|
{
|
|
Endpoint: tc.endpoint,
|
|
ServicePortName: svc,
|
|
},
|
|
}
|
|
|
|
fp.deleteEndpointConnections(input)
|
|
|
|
// For UDP and SCTP connections, check the executed conntrack command
|
|
var expExecs int
|
|
if conntrack.IsClearConntrackNeeded(tc.protocol) {
|
|
isIPv6 := func(ip string) bool {
|
|
netIP := netutils.ParseIPSloppy(ip)
|
|
return netIP.To4() == nil
|
|
}
|
|
endpointIP := utilproxy.IPPart(tc.endpoint)
|
|
expectCommand := fmt.Sprintf("conntrack -D --orig-dst %s --dst-nat %s -p %s", tc.svcIP, endpointIP, strings.ToLower(string((tc.protocol))))
|
|
if isIPv6(endpointIP) {
|
|
expectCommand += " -f ipv6"
|
|
}
|
|
actualCommand := strings.Join(fcmd.CombinedOutputLog[fexec.CommandCalls-1], " ")
|
|
if actualCommand != expectCommand {
|
|
t.Errorf("%s: Expected command: %s, but executed %s", tc.description, expectCommand, actualCommand)
|
|
}
|
|
expExecs = 1
|
|
}
|
|
|
|
// Check the number of times conntrack was executed
|
|
execs := fexec.CommandCalls - priorExecs
|
|
if execs != expExecs {
|
|
t.Errorf("%s: Expected conntrack to be executed %d times, but got %d", tc.description, expExecs, execs)
|
|
}
|
|
|
|
// Check the number of new glog errors
|
|
var expGlogErrs int64
|
|
if tc.simulatedErr != "" && tc.simulatedErr != conntrack.NoConnectionToDelete {
|
|
expGlogErrs = 1
|
|
}
|
|
glogErrs := klog.Stats.Error.Lines() - priorGlogErrs
|
|
if glogErrs != expGlogErrs {
|
|
t.Errorf("%s: Expected %d glogged errors, but got %d", tc.description, expGlogErrs, glogErrs)
|
|
}
|
|
}
|
|
}
|
|
|
|
// fakeCloseable implements utilproxy.Closeable
|
|
type fakeCloseable struct{}
|
|
|
|
// Close fakes out the close() used by syncProxyRules to release a local port.
|
|
func (f *fakeCloseable) Close() error {
|
|
return nil
|
|
}
|
|
|
|
// fakePortOpener implements portOpener.
|
|
type fakePortOpener struct {
|
|
openPorts []*netutils.LocalPort
|
|
}
|
|
|
|
// OpenLocalPort fakes out the listen() and bind() used by syncProxyRules
|
|
// to lock a local port.
|
|
func (f *fakePortOpener) OpenLocalPort(lp *netutils.LocalPort) (netutils.Closeable, error) {
|
|
f.openPorts = append(f.openPorts, lp)
|
|
return &fakeCloseable{}, nil
|
|
}
|
|
|
|
const testHostname = "test-hostname"
|
|
|
|
func NewFakeProxier(ipt utiliptables.Interface) *Proxier {
|
|
// TODO: Call NewProxier after refactoring out the goroutine
|
|
// invocation into a Run() method.
|
|
ipfamily := v1.IPv4Protocol
|
|
if ipt.IsIPv6() {
|
|
ipfamily = v1.IPv6Protocol
|
|
}
|
|
detectLocal, _ := proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/24", ipt)
|
|
p := &Proxier{
|
|
exec: &fakeexec.FakeExec{},
|
|
serviceMap: make(proxy.ServiceMap),
|
|
serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipfamily, nil, nil),
|
|
endpointsMap: make(proxy.EndpointsMap),
|
|
endpointsChanges: proxy.NewEndpointChangeTracker(testHostname, newEndpointInfo, ipfamily, nil, nil),
|
|
iptables: ipt,
|
|
masqueradeMark: "0x4000",
|
|
localDetector: detectLocal,
|
|
hostname: testHostname,
|
|
portsMap: make(map[netutils.LocalPort]netutils.Closeable),
|
|
portMapper: &fakePortOpener{[]*netutils.LocalPort{}},
|
|
serviceHealthServer: healthcheck.NewFakeServiceHealthServer(),
|
|
precomputedProbabilities: make([]string, 0, 1001),
|
|
iptablesData: bytes.NewBuffer(nil),
|
|
existingFilterChainsData: bytes.NewBuffer(nil),
|
|
filterChains: bytes.NewBuffer(nil),
|
|
filterRules: bytes.NewBuffer(nil),
|
|
natChains: bytes.NewBuffer(nil),
|
|
natRules: bytes.NewBuffer(nil),
|
|
nodePortAddresses: make([]string, 0),
|
|
networkInterfacer: utilproxytest.NewFakeNetwork(),
|
|
}
|
|
p.setInitialized(true)
|
|
p.syncRunner = async.NewBoundedFrequencyRunner("test-sync-runner", p.syncProxyRules, 0, time.Minute, 1)
|
|
return p
|
|
}
|
|
|
|
func hasSessionAffinityRule(rules []iptablestest.Rule) bool {
|
|
for _, r := range rules {
|
|
if _, ok := r[iptablestest.Recent]; ok {
|
|
return true
|
|
}
|
|
}
|
|
return false
|
|
}
|
|
|
|
func hasJump(rules []iptablestest.Rule, destChain, destIP string, destPort int) bool {
|
|
destPortStr := strconv.Itoa(destPort)
|
|
match := false
|
|
for _, r := range rules {
|
|
if r[iptablestest.Jump] == destChain {
|
|
match = true
|
|
if destIP != "" {
|
|
if strings.Contains(r[iptablestest.Destination], destIP) && (strings.Contains(r[iptablestest.DPort], destPortStr) || r[iptablestest.DPort] == "") {
|
|
return true
|
|
}
|
|
match = false
|
|
}
|
|
if destPort != 0 {
|
|
if strings.Contains(r[iptablestest.DPort], destPortStr) && (strings.Contains(r[iptablestest.Destination], destIP) || r[iptablestest.Destination] == "") {
|
|
return true
|
|
}
|
|
match = false
|
|
}
|
|
}
|
|
}
|
|
return match
|
|
}
|
|
|
|
func hasSrcType(rules []iptablestest.Rule, srcType string) bool {
|
|
for _, r := range rules {
|
|
if r[iptablestest.SrcType] != srcType {
|
|
continue
|
|
}
|
|
|
|
return true
|
|
}
|
|
|
|
return false
|
|
}
|
|
|
|
func hasMasqRandomFully(rules []iptablestest.Rule) bool {
|
|
for _, r := range rules {
|
|
if r[iptablestest.Masquerade] == "--random-fully" {
|
|
return true
|
|
}
|
|
}
|
|
return false
|
|
}
|
|
|
|
func TestHasJump(t *testing.T) {
|
|
testCases := map[string]struct {
|
|
rules []iptablestest.Rule
|
|
destChain string
|
|
destIP string
|
|
destPort int
|
|
expected bool
|
|
}{
|
|
"case 1": {
|
|
// Match the 1st rule(both dest IP and dest Port)
|
|
rules: []iptablestest.Rule{
|
|
{"-d ": "10.20.30.41/32", "--dport ": "80", "-p ": "tcp", "-j ": "REJECT"},
|
|
{"--dport ": "3001", "-p ": "tcp", "-j ": "KUBE-MARK-MASQ"},
|
|
},
|
|
destChain: "REJECT",
|
|
destIP: "10.20.30.41",
|
|
destPort: 80,
|
|
expected: true,
|
|
},
|
|
"case 2": {
|
|
// Match the 2nd rule(dest Port)
|
|
rules: []iptablestest.Rule{
|
|
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
|
|
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
|
|
},
|
|
destChain: "REJECT",
|
|
destIP: "",
|
|
destPort: 3001,
|
|
expected: true,
|
|
},
|
|
"case 3": {
|
|
// Match both dest IP and dest Port
|
|
rules: []iptablestest.Rule{
|
|
{"-d ": "1.2.3.4/32", "--dport ": "80", "-p ": "tcp", "-j ": "KUBE-XLB-GF53O3C2HZEXL2XN"},
|
|
},
|
|
destChain: "KUBE-XLB-GF53O3C2HZEXL2XN",
|
|
destIP: "1.2.3.4",
|
|
destPort: 80,
|
|
expected: true,
|
|
},
|
|
"case 4": {
|
|
// Match dest IP but doesn't match dest Port
|
|
rules: []iptablestest.Rule{
|
|
{"-d ": "1.2.3.4/32", "--dport ": "80", "-p ": "tcp", "-j ": "KUBE-XLB-GF53O3C2HZEXL2XN"},
|
|
},
|
|
destChain: "KUBE-XLB-GF53O3C2HZEXL2XN",
|
|
destIP: "1.2.3.4",
|
|
destPort: 8080,
|
|
expected: false,
|
|
},
|
|
"case 5": {
|
|
// Match dest Port but doesn't match dest IP
|
|
rules: []iptablestest.Rule{
|
|
{"-d ": "1.2.3.4/32", "--dport ": "80", "-p ": "tcp", "-j ": "KUBE-XLB-GF53O3C2HZEXL2XN"},
|
|
},
|
|
destChain: "KUBE-XLB-GF53O3C2HZEXL2XN",
|
|
destIP: "10.20.30.40",
|
|
destPort: 80,
|
|
expected: false,
|
|
},
|
|
"case 6": {
|
|
// Match the 2nd rule(dest IP)
|
|
rules: []iptablestest.Rule{
|
|
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
|
|
{"-d ": "1.2.3.4/32", "-p ": "tcp", "-j ": "REJECT"},
|
|
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
|
|
},
|
|
destChain: "REJECT",
|
|
destIP: "1.2.3.4",
|
|
destPort: 8080,
|
|
expected: true,
|
|
},
|
|
"case 7": {
|
|
// Match the 2nd rule(dest Port)
|
|
rules: []iptablestest.Rule{
|
|
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
|
|
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
|
|
},
|
|
destChain: "REJECT",
|
|
destIP: "1.2.3.4",
|
|
destPort: 3001,
|
|
expected: true,
|
|
},
|
|
"case 8": {
|
|
// Match the 1st rule(dest IP)
|
|
rules: []iptablestest.Rule{
|
|
{"-d ": "10.20.30.41/32", "-p ": "tcp", "-j ": "REJECT"},
|
|
{"--dport ": "3001", "-p ": "tcp", "-j ": "REJECT"},
|
|
},
|
|
destChain: "REJECT",
|
|
destIP: "10.20.30.41",
|
|
destPort: 8080,
|
|
expected: true,
|
|
},
|
|
"case 9": {
|
|
rules: []iptablestest.Rule{
|
|
{"-j ": "KUBE-SEP-LWSOSDSHMKPJHHJV"},
|
|
},
|
|
destChain: "KUBE-SEP-LWSOSDSHMKPJHHJV",
|
|
destIP: "",
|
|
destPort: 0,
|
|
expected: true,
|
|
},
|
|
"case 10": {
|
|
rules: []iptablestest.Rule{
|
|
{"-j ": "KUBE-SEP-FOO"},
|
|
},
|
|
destChain: "KUBE-SEP-BAR",
|
|
destIP: "",
|
|
destPort: 0,
|
|
expected: false,
|
|
},
|
|
}
|
|
|
|
for k, tc := range testCases {
|
|
if got := hasJump(tc.rules, tc.destChain, tc.destIP, tc.destPort); got != tc.expected {
|
|
t.Errorf("%v: expected %v, got %v", k, tc.expected, got)
|
|
}
|
|
}
|
|
}
|
|
|
|
func hasDNAT(rules []iptablestest.Rule, endpoint string) bool {
|
|
for _, r := range rules {
|
|
if r[iptablestest.ToDest] == endpoint {
|
|
return true
|
|
}
|
|
}
|
|
return false
|
|
}
|
|
|
|
func errorf(msg string, rules []iptablestest.Rule, t *testing.T) {
|
|
for _, r := range rules {
|
|
t.Logf("%q", r)
|
|
}
|
|
t.Errorf("%v", msg)
|
|
}
|
|
|
|
// findAllMatches takes an array of lines and a pattern with one parenthesized group, and
|
|
// returns a sorted array of all of the unique matches of the parenthesized group.
|
|
func findAllMatches(lines []string, pattern string) []string {
|
|
regex := regexp.MustCompile(pattern)
|
|
allMatches := sets.NewString()
|
|
for _, line := range lines {
|
|
match := regex.FindStringSubmatch(line)
|
|
if len(match) == 2 {
|
|
allMatches.Insert(match[1])
|
|
}
|
|
}
|
|
return allMatches.List()
|
|
}
|
|
|
|
// moveMatchingLines moves lines that match pattern from input to output
|
|
func moveMatchingLines(pattern string, input, output []string) ([]string, []string) {
|
|
var newIn []string
|
|
regex := regexp.MustCompile(pattern)
|
|
for _, line := range input {
|
|
if regex.FindString(line) != "" {
|
|
output = append(output, line)
|
|
} else {
|
|
newIn = append(newIn, line)
|
|
}
|
|
}
|
|
return newIn, output
|
|
}
|
|
|
|
// sortIPTablesRules sorts `iptables-restore` output so as to not depend on the order that
|
|
// Services get processed in, while preserving the relative ordering of related rules.
|
|
func sortIPTablesRules(ruleData string) (string, error) {
|
|
tables := strings.Split(strings.TrimPrefix(ruleData, "\n"), "COMMIT\n")
|
|
if len(tables) != 3 || tables[2] != "" {
|
|
return "", fmt.Errorf("wrong number of tables (%d) in ruleData\n%s", len(tables)-1, ruleData)
|
|
}
|
|
tables = tables[:2]
|
|
var output []string
|
|
|
|
for _, table := range tables {
|
|
lines := strings.Split(strings.Trim(table, "\n"), "\n")
|
|
|
|
// Move "*TABLENAME" line
|
|
lines, output = moveMatchingLines(`^\*`, lines, output)
|
|
|
|
// findAllMatches() returns a sorted list of unique matches. So for
|
|
// each of the following, we find all the matches for the regex, then
|
|
// for each unique match (in sorted order), move all of the lines that
|
|
// contain that match.
|
|
|
|
// Move and sort ":CHAINNAME" lines
|
|
for _, chainName := range findAllMatches(lines, `^(:[^ ]*) `) {
|
|
lines, output = moveMatchingLines(chainName, lines, output)
|
|
}
|
|
|
|
// Move KUBE-NODEPORTS rules for each service, sorted by service name
|
|
for _, nextNodePortService := range findAllMatches(lines, `-A KUBE-NODEPORTS.*--comment "?([^ ]*)`) {
|
|
lines, output = moveMatchingLines(fmt.Sprintf(`^-A KUBE-NODEPORTS.*%s`, nextNodePortService), lines, output)
|
|
}
|
|
|
|
// Move KUBE-SERVICES rules for each service, sorted by service name
|
|
for _, nextService := range findAllMatches(lines, `-A KUBE-SERVICES.*--comment "?([^ ]*)`) {
|
|
lines, output = moveMatchingLines(fmt.Sprintf(`^-A KUBE-SERVICES.*%s`, nextService), lines, output)
|
|
}
|
|
|
|
// Move remaining chains, sorted by chain name
|
|
for _, nextChain := range findAllMatches(lines, `(-A KUBE-[^ ]* )`) {
|
|
lines, output = moveMatchingLines(nextChain, lines, output)
|
|
}
|
|
|
|
// There should not be anything left, but if there is, just move it over now
|
|
// and it will show up in the diff later.
|
|
_, output = moveMatchingLines(".", lines, output)
|
|
|
|
// The "COMMIT" line got eaten by strings.Split() above, so put it back
|
|
output = append(output, "COMMIT")
|
|
}
|
|
|
|
// Input ended with a "\n", so make sure the output does too
|
|
output = append(output, "")
|
|
|
|
return strings.Join(output, "\n"), nil
|
|
}
|
|
|
|
// assertIPTablesRulesEqual asserts that the generated rules in result match the rules in
|
|
// expected, ignoring irrelevant ordering differences.
|
|
func assertIPTablesRulesEqual(t *testing.T, expected, result string) {
|
|
expected, err := sortIPTablesRules(expected)
|
|
if err != nil {
|
|
t.Fatalf("%s", err)
|
|
}
|
|
result, err = sortIPTablesRules(result)
|
|
if err != nil {
|
|
t.Fatalf("%s", err)
|
|
}
|
|
|
|
assert.Equal(t, expected, result)
|
|
}
|
|
|
|
func Test_sortIPTablesRules(t *testing.T) {
|
|
for _, tc := range []struct {
|
|
name string
|
|
input string
|
|
output string
|
|
error string
|
|
}{
|
|
{
|
|
name: "basic test using each match type",
|
|
input: `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-XLB-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-FW-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
|
|
:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
|
|
:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
|
|
:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
|
|
:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
|
|
:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41/32 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 10.20.30.42/32 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 1.2.3.4/32 --dport 80 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4/32 --dport 80 -j KUBE-FW-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 10.20.30.42/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment ns2/svc2:p80 -j KUBE-SEP-RS4RBKLTHTF2IUXJ
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -s 1.2.3.4/28 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -s 127.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/24 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 has no local endpoints" -j KUBE-MARK-DROP
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 10.20.30.43/32 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 10.20.30.43/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3001 -j KUBE-SVC-X27LE4BHSL4DOUIK
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3001 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -j KUBE-SEP-OYPFS5VJICHGATKP
|
|
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 10.20.30.44/32 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 50.60.70.81/32 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 10.20.30.44/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 50.60.70.81/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -j KUBE-SEP-C6EBXVWJJZMIWKLZ
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`,
|
|
output: `
|
|
*filter
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-FW-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
|
|
:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
|
|
:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
|
|
:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-XLB-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -s 127.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3001 -j KUBE-SVC-X27LE4BHSL4DOUIK
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41/32 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 10.20.30.42/32 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 1.2.3.4/32 --dport 80 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4/32 --dport 80 -j KUBE-FW-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 10.20.30.43/32 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 10.20.30.44/32 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 50.60.70.81/32 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -s 1.2.3.4/28 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
|
|
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 10.20.30.44/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 50.60.70.81/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -j KUBE-SEP-C6EBXVWJJZMIWKLZ
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 10.20.30.42/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment ns2/svc2:p80 -j KUBE-SEP-RS4RBKLTHTF2IUXJ
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 10.20.30.43/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3001 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -j KUBE-SEP-OYPFS5VJICHGATKP
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/24 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 has no local endpoints" -j KUBE-MARK-DROP
|
|
COMMIT
|
|
`,
|
|
},
|
|
{
|
|
name: "not enough tables",
|
|
input: `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
`,
|
|
error: "wrong number of tables (1)",
|
|
},
|
|
{
|
|
name: "too many tables",
|
|
input: `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*mangle
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
`,
|
|
error: "wrong number of tables (3)",
|
|
},
|
|
{
|
|
name: "correctly match same service name in different styles of comments",
|
|
input: `
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
|
|
-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" svc1 line 1
|
|
-A KUBE-SERVICES -m comment --comment ns1/svc1 svc1 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 blah" svc1 line 3
|
|
-A KUBE-SERVICES -m comment --comment ns4/svc4 svc4 line 1
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" svc4 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4 blah" svc4 line 3
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" svc3 line 1
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3 blah" svc3 line 2
|
|
-A KUBE-SERVICES -m comment --comment ns3/svc3 svc3 line 3
|
|
COMMIT
|
|
`,
|
|
output: `
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" svc1 line 1
|
|
-A KUBE-SERVICES -m comment --comment ns1/svc1 svc1 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 blah" svc1 line 3
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
|
|
-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" svc3 line 1
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3 blah" svc3 line 2
|
|
-A KUBE-SERVICES -m comment --comment ns3/svc3 svc3 line 3
|
|
-A KUBE-SERVICES -m comment --comment ns4/svc4 svc4 line 1
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" svc4 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4 blah" svc4 line 3
|
|
COMMIT
|
|
`,
|
|
},
|
|
{
|
|
name: "unexpected junk lines are preserved",
|
|
input: `
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
|
|
:KUBE-AAAAA - [0:0]
|
|
:KUBE-ZZZZZ - [0:0]
|
|
:WHY-IS-THIS-CHAIN-HERE - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
|
|
-A KUBE-ZZZZZ -m comment --comment "mystery chain number 1"
|
|
-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
|
|
-A WHY-IS-THIS-CHAIN-HERE -j ACCEPT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
|
|
-A KUBE-AAAAA -m comment --comment "mystery chain number 2"
|
|
COMMIT
|
|
`,
|
|
output: `
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-AAAAA - [0:0]
|
|
:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-ZZZZZ - [0:0]
|
|
:WHY-IS-THIS-CHAIN-HERE - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
|
|
-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
|
|
-A KUBE-AAAAA -m comment --comment "mystery chain number 2"
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
|
|
-A KUBE-ZZZZZ -m comment --comment "mystery chain number 1"
|
|
-A WHY-IS-THIS-CHAIN-HERE -j ACCEPT
|
|
COMMIT
|
|
`,
|
|
},
|
|
} {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
out, err := sortIPTablesRules(tc.input)
|
|
if err == nil {
|
|
if tc.error != "" {
|
|
t.Errorf("unexpectedly did not get error")
|
|
} else {
|
|
assert.Equal(t, strings.TrimPrefix(tc.output, "\n"), out)
|
|
}
|
|
} else {
|
|
if tc.error == "" {
|
|
t.Errorf("got unexpected error: %v", err)
|
|
} else if !strings.HasPrefix(err.Error(), tc.error) {
|
|
t.Errorf("got wrong error: %v (expected %q)", err, tc.error)
|
|
}
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
// TestOverallIPTablesRulesWithMultipleServices creates 4 types of services: ClusterIP,
|
|
// LoadBalancer, ExternalIP and NodePort and verifies if the NAT table rules created
|
|
// are exactly the same as what is expected. This test provides an overall view of how
|
|
// the NAT table rules look like with the different jumps.
|
|
func TestOverallIPTablesRulesWithMultipleServices(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
metrics.RegisterMetrics()
|
|
tcpProtocol := v1.ProtocolTCP
|
|
|
|
makeServiceMap(fp,
|
|
// create ClusterIP service
|
|
makeTestService("ns1", "svc1", func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = "10.20.30.41"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
}}
|
|
}),
|
|
// create LoadBalancer service
|
|
makeTestService("ns2", "svc2", func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
svc.Spec.ClusterIP = "10.20.30.42"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: 3001,
|
|
}}
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: "1.2.3.4",
|
|
}}
|
|
// Also ensure that invalid LoadBalancerSourceRanges will not result
|
|
// in a crash.
|
|
svc.Spec.ExternalIPs = []string{"1.2.3.4"}
|
|
svc.Spec.LoadBalancerSourceRanges = []string{" 1.2.3.4/28"}
|
|
svc.Spec.HealthCheckNodePort = 30000
|
|
}),
|
|
// create NodePort service
|
|
makeTestService("ns3", "svc3", func(svc *v1.Service) {
|
|
svc.Spec.Type = "NodePort"
|
|
svc.Spec.ClusterIP = "10.20.30.43"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: 3001,
|
|
}}
|
|
}),
|
|
// create ExternalIP service
|
|
makeTestService("ns4", "svc4", func(svc *v1.Service) {
|
|
svc.Spec.Type = "NodePort"
|
|
svc.Spec.ClusterIP = "10.20.30.44"
|
|
svc.Spec.ExternalIPs = []string{"50.60.70.81"}
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
TargetPort: intstr.FromInt(80),
|
|
}}
|
|
}),
|
|
)
|
|
populateEndpointSlices(fp,
|
|
// create ClusterIP service endpoints
|
|
makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.180.0.1"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
// create LoadBalancer endpoints
|
|
makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.180.0.2"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
// create NodePort service endpoints
|
|
makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.180.0.3"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
// create ExternalIP service endpoints
|
|
makeTestEndpointSlice("ns4", "svc4", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.180.0.4"},
|
|
}, {
|
|
Addresses: []string{"10.180.0.5"},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-XLB-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-FW-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
|
|
:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
|
|
:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
|
|
:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
|
|
:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
|
|
:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41/32 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 10.20.30.42/32 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 1.2.3.4/32 --dport 80 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4/32 --dport 80 -j KUBE-FW-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 10.20.30.42/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment ns2/svc2:p80 -j KUBE-SEP-RS4RBKLTHTF2IUXJ
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -s 1.2.3.4/28 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -s 127.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/24 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 has no local endpoints" -j KUBE-MARK-DROP
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 10.20.30.43/32 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 10.20.30.43/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3001 -j KUBE-SVC-X27LE4BHSL4DOUIK
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3001 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -j KUBE-SEP-OYPFS5VJICHGATKP
|
|
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 10.20.30.44/32 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 50.60.70.81/32 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 10.20.30.44/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 50.60.70.81/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -j KUBE-SEP-C6EBXVWJJZMIWKLZ
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
assertIPTablesRulesEqual(t, expected, fp.iptablesData.String())
|
|
|
|
nNatRules, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)))
|
|
if err != nil {
|
|
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
|
|
}
|
|
// 42 here is test specific and corresponds to one more than the number of -A lines after `*nat` in `expected`.
|
|
if nNatRules != 42.0 {
|
|
t.Fatalf("Wrong number of nat rules: expected 42 received %f", nNatRules)
|
|
}
|
|
}
|
|
|
|
func TestClusterIPReject(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "10.20.30.41"
|
|
svcPort := 80
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
}}
|
|
}),
|
|
)
|
|
fp.syncProxyRules()
|
|
|
|
svcChain := string(servicePortChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP))))
|
|
svcRules := ipt.GetRules(svcChain)
|
|
if len(svcRules) != 0 {
|
|
errorf(fmt.Sprintf("Unexpected rule for chain %v service %v without endpoints", svcChain, svcPortName), svcRules, t)
|
|
}
|
|
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
|
if !hasJump(kubeSvcRules, iptablestest.Reject, svcIP, svcPort) {
|
|
errorf(fmt.Sprintf("Failed to find a %v rule for service %v with no endpoints", iptablestest.Reject, svcPortName), kubeSvcRules, t)
|
|
}
|
|
}
|
|
|
|
func TestClusterIPEndpointsJump(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "10.20.30.41"
|
|
svcPort := 80
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
epIP := "10.180.0.1"
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
epStr := fmt.Sprintf("%s:%d", epIP, svcPort)
|
|
svcChain := string(servicePortChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP))))
|
|
epChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP)), epStr))
|
|
|
|
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
|
if !hasJump(kubeSvcRules, svcChain, svcIP, svcPort) {
|
|
errorf(fmt.Sprintf("Failed to find jump from KUBE-SERVICES to %v chain", svcChain), kubeSvcRules, t)
|
|
}
|
|
|
|
svcRules := ipt.GetRules(svcChain)
|
|
if !hasJump(svcRules, string(KubeMarkMasqChain), svcIP, svcPort) {
|
|
errorf(fmt.Sprintf("Failed to find jump from %v to KUBE-MARK-MASQ chain", svcChain), svcRules, t)
|
|
}
|
|
|
|
if !hasJump(svcRules, epChain, "", 0) {
|
|
errorf(fmt.Sprintf("Failed to jump to ep chain %v", epChain), svcRules, t)
|
|
}
|
|
epRules := ipt.GetRules(epChain)
|
|
if !hasDNAT(epRules, epStr) {
|
|
errorf(fmt.Sprintf("Endpoint chain %v lacks DNAT to %v", epChain, epStr), epRules, t)
|
|
}
|
|
}
|
|
|
|
func TestLoadBalancer(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "10.20.30.41"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcLBIP := "1.2.3.4"
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: svcLBIP,
|
|
}}
|
|
// Also ensure that invalid LoadBalancerSourceRanges will not result
|
|
// in a crash.
|
|
svc.Spec.ExternalIPs = []string{svcLBIP}
|
|
svc.Spec.LoadBalancerSourceRanges = []string{" 1.2.3.4/28"}
|
|
}),
|
|
)
|
|
|
|
epIP := "10.180.0.1"
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
proto := strings.ToLower(string(v1.ProtocolTCP))
|
|
fwChain := string(serviceFirewallChainName(svcPortName.String(), proto))
|
|
svcChain := string(servicePortChainName(svcPortName.String(), proto))
|
|
|
|
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
|
if !hasJump(kubeSvcRules, fwChain, svcLBIP, svcPort) {
|
|
errorf(fmt.Sprintf("Failed to find jump to firewall chain %v", fwChain), kubeSvcRules, t)
|
|
}
|
|
|
|
fwRules := ipt.GetRules(fwChain)
|
|
if !hasJump(fwRules, svcChain, "", 0) || !hasJump(fwRules, string(KubeMarkMasqChain), "", 0) {
|
|
errorf(fmt.Sprintf("Failed to find jump from firewall chain %v to svc chain %v", fwChain, svcChain), fwRules, t)
|
|
}
|
|
}
|
|
|
|
func TestNodePort(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "10.20.30.41"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "NodePort"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
}),
|
|
)
|
|
|
|
epIP := "10.180.0.1"
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
|
|
addrs := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(16, 32)}}
|
|
itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0}
|
|
addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("::1/128"), Mask: net.CIDRMask(128, 128)}}
|
|
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
|
|
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
|
|
fp.nodePortAddresses = []string{}
|
|
|
|
fp.syncProxyRules()
|
|
|
|
proto := strings.ToLower(string(v1.ProtocolTCP))
|
|
svcChain := string(servicePortChainName(svcPortName.String(), proto))
|
|
|
|
kubeNodePortRules := ipt.GetRules(string(kubeNodePortsChain))
|
|
if !hasJump(kubeNodePortRules, svcChain, "", svcNodePort) {
|
|
errorf(fmt.Sprintf("Failed to find jump to svc chain %v", svcChain), kubeNodePortRules, t)
|
|
}
|
|
expectedNodePortNonLocalTrafficMasqueradeRule := `-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-MARK-MASQ`
|
|
svcRules := ipt.GetRules(svcChain)
|
|
if !strings.Contains(fp.iptablesData.String(), expectedNodePortNonLocalTrafficMasqueradeRule) {
|
|
errorf(fmt.Sprintf("Didn't find the masquerade rule for node port non-local traffic in svc chain %v", svcChain), svcRules, t)
|
|
}
|
|
}
|
|
|
|
func TestHealthCheckNodePort(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "10.20.30.42"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcHealthCheckNodePort := 30000
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
svc.Spec.HealthCheckNodePort = int32(svcHealthCheckNodePort)
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
}),
|
|
)
|
|
|
|
itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
|
|
addrs := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(16, 32)}}
|
|
itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth1", HardwareAddr: nil, Flags: 0}
|
|
addrs1 := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("::1"), Mask: net.CIDRMask(128, 128)}}
|
|
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
|
|
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf1, addrs1)
|
|
fp.nodePortAddresses = []string{"127.0.0.1/16"}
|
|
|
|
fp.syncProxyRules()
|
|
|
|
kubeNodePortsRules := ipt.GetRules(string(kubeNodePortsChain))
|
|
if !hasJump(kubeNodePortsRules, iptablestest.Accept, "", svcHealthCheckNodePort) {
|
|
errorf(fmt.Sprintf("Failed to find Accept rule"), kubeNodePortsRules, t)
|
|
}
|
|
}
|
|
|
|
func TestMasqueradeRule(t *testing.T) {
|
|
for _, testcase := range []bool{false, true} {
|
|
ipt := iptablestest.NewFake().SetHasRandomFully(testcase)
|
|
fp := NewFakeProxier(ipt)
|
|
fp.syncProxyRules()
|
|
|
|
postRoutingRules := ipt.GetRules(string(kubePostroutingChain))
|
|
if !hasJump(postRoutingRules, "MASQUERADE", "", 0) {
|
|
errorf(fmt.Sprintf("Failed to find -j MASQUERADE in %s chain", kubePostroutingChain), postRoutingRules, t)
|
|
}
|
|
if hasMasqRandomFully(postRoutingRules) != testcase {
|
|
probs := map[bool]string{false: "found", true: "did not find"}
|
|
errorf(fmt.Sprintf("%s --random-fully in -j MASQUERADE rule in %s chain when HasRandomFully()==%v", probs[testcase], kubePostroutingChain, testcase), postRoutingRules, t)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestExternalIPsReject(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "10.20.30.41"
|
|
svcPort := 80
|
|
svcExternalIPs := "50.60.70.81"
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "ClusterIP"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.ExternalIPs = []string{svcExternalIPs}
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
TargetPort: intstr.FromInt(svcPort),
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
kubeSvcRules := ipt.GetRules(string(kubeExternalServicesChain))
|
|
if !hasJump(kubeSvcRules, iptablestest.Reject, svcExternalIPs, svcPort) {
|
|
errorf(fmt.Sprintf("Failed to find a %v rule for externalIP %v with no endpoints", iptablestest.Reject, svcPortName), kubeSvcRules, t)
|
|
}
|
|
}
|
|
|
|
func TestOnlyLocalExternalIPs(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "10.20.30.41"
|
|
svcPort := 80
|
|
svcExternalIPs := "50.60.70.81"
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "NodePort"
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.ExternalIPs = []string{svcExternalIPs}
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
TargetPort: intstr.FromInt(svcPort),
|
|
}}
|
|
}),
|
|
)
|
|
epIP1 := "10.180.0.1"
|
|
epIP2 := "10.180.2.1"
|
|
epStrLocal := fmt.Sprintf("%s:%d", epIP1, svcPort)
|
|
epStrNonLocal := fmt.Sprintf("%s:%d", epIP2, svcPort)
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP1},
|
|
}, {
|
|
Addresses: []string{epIP2},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
proto := strings.ToLower(string(v1.ProtocolTCP))
|
|
lbChain := string(serviceLBChainName(svcPortName.String(), proto))
|
|
|
|
nonLocalEpChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP)), epStrLocal))
|
|
localEpChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP)), epStrNonLocal))
|
|
|
|
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
|
if !hasJump(kubeSvcRules, lbChain, svcExternalIPs, svcPort) {
|
|
errorf(fmt.Sprintf("Failed to find jump to xlb chain %v", lbChain), kubeSvcRules, t)
|
|
}
|
|
|
|
lbRules := ipt.GetRules(lbChain)
|
|
if hasJump(lbRules, nonLocalEpChain, "", 0) {
|
|
errorf(fmt.Sprintf("Found jump from lb chain %v to non-local ep %v", lbChain, epStrLocal), lbRules, t)
|
|
}
|
|
if !hasJump(lbRules, localEpChain, "", 0) {
|
|
errorf(fmt.Sprintf("Didn't find jump from lb chain %v to local ep %v", lbChain, epStrNonLocal), lbRules, t)
|
|
}
|
|
}
|
|
|
|
// TestNonLocalExternalIPs tests if we add the masquerade rule into svcChain in order to
|
|
// SNAT packets to external IPs if externalTrafficPolicy is cluster and the traffic is NOT Local.
|
|
func TestNonLocalExternalIPs(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "10.20.30.41"
|
|
svcPort := 80
|
|
svcExternalIPs := "50.60.70.81"
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.ExternalIPs = []string{svcExternalIPs}
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
TargetPort: intstr.FromInt(svcPort),
|
|
}}
|
|
}),
|
|
)
|
|
epIP1 := "10.180.0.1"
|
|
epIP2 := "10.180.2.1"
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP1},
|
|
NodeName: nil,
|
|
}, {
|
|
Addresses: []string{epIP2},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
proto := strings.ToLower(string(v1.ProtocolTCP))
|
|
lbChain := string(serviceLBChainName(svcPortName.String(), proto))
|
|
svcChain := string(servicePortChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP))))
|
|
|
|
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
|
if !hasJump(kubeSvcRules, svcChain, svcExternalIPs, svcPort) {
|
|
errorf(fmt.Sprintf("Failed to find jump to svc chain %v", svcChain), kubeSvcRules, t)
|
|
}
|
|
|
|
svcRules := ipt.GetRules(svcChain)
|
|
if len(svcRules) != 4 {
|
|
t.Errorf("expected svcChain %v to have 4 rules, got %v", svcChain, len(svcRules))
|
|
}
|
|
if !hasJump(svcRules, string(KubeMarkMasqChain), svcIP, svcPort) {
|
|
errorf(fmt.Sprintf("Failed to find jump from %v to KUBE-MARK-MASQ chain", svcChain), svcRules, t)
|
|
}
|
|
expectedExternalIPNonLocalTrafficMasqueradeRule := `-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 external IP" -m tcp -p tcp -d 50.60.70.81/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ`
|
|
|
|
if !strings.Contains(fp.iptablesData.String(), expectedExternalIPNonLocalTrafficMasqueradeRule) {
|
|
errorf(fmt.Sprintf("Didn't find the masquerade rule for external-ip non-local traffic in svc chain %v", svcChain), svcRules, t)
|
|
}
|
|
|
|
lbRules := ipt.GetRules(lbChain)
|
|
if len(lbRules) != 0 {
|
|
t.Errorf("expected svclbChain %v to have 0 rules, got %v", lbChain, len(lbRules))
|
|
}
|
|
}
|
|
|
|
func TestNodePortReject(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "10.20.30.41"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "NodePort"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
kubeSvcRules := ipt.GetRules(string(kubeExternalServicesChain))
|
|
if !hasJump(kubeSvcRules, iptablestest.Reject, svcIP, svcNodePort) {
|
|
errorf(fmt.Sprintf("Failed to find a %v rule for service %v with no endpoints", iptablestest.Reject, svcPortName), kubeSvcRules, t)
|
|
}
|
|
}
|
|
|
|
func TestLoadBalancerReject(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "10.20.30.41"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcLBIP := "1.2.3.4"
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
svcSessionAffinityTimeout := int32(10800)
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: svcLBIP,
|
|
}}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
|
|
svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{
|
|
ClientIP: &v1.ClientIPConfig{TimeoutSeconds: &svcSessionAffinityTimeout},
|
|
}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
kubeSvcExtRules := ipt.GetRules(string(kubeExternalServicesChain))
|
|
if !hasJump(kubeSvcExtRules, iptablestest.Reject, svcLBIP, svcPort) {
|
|
errorf(fmt.Sprintf("Failed to find a %v rule for LoadBalancer %v with no endpoints", iptablestest.Reject, svcPortName), kubeSvcExtRules, t)
|
|
}
|
|
|
|
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
|
if hasJump(kubeSvcRules, iptablestest.Reject, svcLBIP, svcPort) {
|
|
errorf(fmt.Sprintf("Found a %v rule for LoadBalancer %v with no endpoints in kubeServicesChain", iptablestest.Reject, svcPortName), kubeSvcRules, t)
|
|
}
|
|
|
|
}
|
|
|
|
func TestOnlyLocalLoadBalancing(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "10.20.30.41"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcLBIP := "1.2.3.4"
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
svcSessionAffinityTimeout := int32(10800)
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: svcLBIP,
|
|
}}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
|
|
svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{
|
|
ClientIP: &v1.ClientIPConfig{TimeoutSeconds: &svcSessionAffinityTimeout},
|
|
}
|
|
}),
|
|
)
|
|
|
|
epIP1 := "10.180.0.1"
|
|
epIP2 := "10.180.2.1"
|
|
epStrLocal := fmt.Sprintf("%s:%d", epIP1, svcPort)
|
|
epStrNonLocal := fmt.Sprintf("%s:%d", epIP2, svcPort)
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP1},
|
|
}, {
|
|
Addresses: []string{epIP2},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
proto := strings.ToLower(string(v1.ProtocolTCP))
|
|
fwChain := string(serviceFirewallChainName(svcPortName.String(), proto))
|
|
lbChain := string(serviceLBChainName(svcPortName.String(), proto))
|
|
|
|
nonLocalEpChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP)), epStrLocal))
|
|
localEpChain := string(servicePortEndpointChainName(svcPortName.String(), strings.ToLower(string(v1.ProtocolTCP)), epStrNonLocal))
|
|
|
|
kubeSvcRules := ipt.GetRules(string(kubeServicesChain))
|
|
if !hasJump(kubeSvcRules, fwChain, svcLBIP, svcPort) {
|
|
errorf(fmt.Sprintf("Failed to find jump to firewall chain %v", fwChain), kubeSvcRules, t)
|
|
}
|
|
|
|
fwRules := ipt.GetRules(fwChain)
|
|
if !hasJump(fwRules, lbChain, "", 0) {
|
|
errorf(fmt.Sprintf("Failed to find jump from firewall chain %v to svc chain %v", fwChain, lbChain), fwRules, t)
|
|
}
|
|
if hasJump(fwRules, string(KubeMarkMasqChain), "", 0) {
|
|
errorf(fmt.Sprintf("Found jump from fw chain %v to MASQUERADE", fwChain), fwRules, t)
|
|
}
|
|
|
|
lbRules := ipt.GetRules(lbChain)
|
|
if hasJump(lbRules, nonLocalEpChain, "", 0) {
|
|
errorf(fmt.Sprintf("Found jump from lb chain %v to non-local ep %v", lbChain, epStrLocal), lbRules, t)
|
|
}
|
|
if !hasJump(lbRules, localEpChain, "", 0) {
|
|
errorf(fmt.Sprintf("Didn't find jump from lb chain %v to local ep %v", lbChain, epStrNonLocal), lbRules, t)
|
|
}
|
|
if !hasSessionAffinityRule(lbRules) {
|
|
errorf(fmt.Sprintf("Didn't find session affinity rule from lb chain %v", lbChain), lbRules, t)
|
|
}
|
|
}
|
|
|
|
func TestOnlyLocalNodePortsNoClusterCIDR(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
onlyLocalNodePorts(t, fp, ipt)
|
|
}
|
|
|
|
func TestOnlyLocalNodePorts(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
onlyLocalNodePorts(t, fp, ipt)
|
|
}
|
|
|
|
func onlyLocalNodePorts(t *testing.T, fp *Proxier, ipt *iptablestest.FakeIPTables) {
|
|
svcIP := "10.20.30.41"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "NodePort"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
}),
|
|
)
|
|
|
|
epIP1 := "10.180.0.1"
|
|
epIP2 := "10.180.2.1"
|
|
epStrLocal := fmt.Sprintf("%s:%d", epIP1, svcPort)
|
|
epStrNonLocal := fmt.Sprintf("%s:%d", epIP2, svcPort)
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP1},
|
|
NodeName: nil,
|
|
}, {
|
|
Addresses: []string{epIP2},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
itf := net.Interface{Index: 0, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}
|
|
addrs := []net.Addr{&net.IPNet{IP: netutils.ParseIPSloppy("10.20.30.51"), Mask: net.CIDRMask(24, 32)}}
|
|
fp.networkInterfacer.(*utilproxytest.FakeNetwork).AddInterfaceAddr(&itf, addrs)
|
|
fp.nodePortAddresses = []string{"10.20.30.0/24"}
|
|
|
|
fp.syncProxyRules()
|
|
|
|
proto := strings.ToLower(string(v1.ProtocolTCP))
|
|
lbChain := string(serviceLBChainName(svcPortName.String(), proto))
|
|
|
|
nonLocalEpChain := string(servicePortEndpointChainName(svcPortName.String(), proto, epStrLocal))
|
|
localEpChain := string(servicePortEndpointChainName(svcPortName.String(), proto, epStrNonLocal))
|
|
|
|
kubeNodePortRules := ipt.GetRules(string(kubeNodePortsChain))
|
|
if !hasJump(kubeNodePortRules, lbChain, "", svcNodePort) {
|
|
errorf(fmt.Sprintf("Failed to find jump to lb chain %v", lbChain), kubeNodePortRules, t)
|
|
}
|
|
if !hasJump(kubeNodePortRules, string(KubeMarkMasqChain), "", svcNodePort) {
|
|
errorf(fmt.Sprintf("Failed to find jump to %s chain for destination IP %d", KubeMarkMasqChain, svcNodePort), kubeNodePortRules, t)
|
|
}
|
|
|
|
kubeServiceRules := ipt.GetRules(string(kubeServicesChain))
|
|
if !hasJump(kubeServiceRules, string(kubeNodePortsChain), "10.20.30.51", 0) {
|
|
errorf(fmt.Sprintf("Failed to find jump to KUBE-NODEPORTS chain %v", string(kubeNodePortsChain)), kubeServiceRules, t)
|
|
}
|
|
|
|
svcChain := string(servicePortChainName(svcPortName.String(), proto))
|
|
lbRules := ipt.GetRules(lbChain)
|
|
if hasJump(lbRules, nonLocalEpChain, "", 0) {
|
|
errorf(fmt.Sprintf("Found jump from lb chain %v to non-local ep %v", lbChain, epStrLocal), lbRules, t)
|
|
}
|
|
if !hasJump(lbRules, svcChain, "", 0) || !hasSrcType(lbRules, "LOCAL") {
|
|
errorf(fmt.Sprintf("Did not find jump from lb chain %v to svc %v with src-type LOCAL", lbChain, svcChain), lbRules, t)
|
|
}
|
|
if !hasJump(lbRules, localEpChain, "", 0) {
|
|
errorf(fmt.Sprintf("Didn't find jump from lb chain %v to local ep %v", lbChain, epStrLocal), lbRules, t)
|
|
}
|
|
}
|
|
|
|
func TestComputeProbability(t *testing.T) {
|
|
expectedProbabilities := map[int]string{
|
|
1: "1.0000000000",
|
|
2: "0.5000000000",
|
|
10: "0.1000000000",
|
|
100: "0.0100000000",
|
|
1000: "0.0010000000",
|
|
10000: "0.0001000000",
|
|
100000: "0.0000100000",
|
|
100001: "0.0000099999",
|
|
}
|
|
|
|
for num, expected := range expectedProbabilities {
|
|
actual := computeProbability(num)
|
|
if actual != expected {
|
|
t.Errorf("Expected computeProbability(%d) to be %s, got: %s", num, expected, actual)
|
|
}
|
|
}
|
|
|
|
prevProbability := float64(0)
|
|
for i := 100000; i > 1; i-- {
|
|
currProbability, err := strconv.ParseFloat(computeProbability(i), 64)
|
|
if err != nil {
|
|
t.Fatalf("Error parsing float probability for %d: %v", i, err)
|
|
}
|
|
if currProbability <= prevProbability {
|
|
t.Fatalf("Probability unexpectedly <= to previous probability for %d: (%0.10f <= %0.10f)", i, currProbability, prevProbability)
|
|
}
|
|
prevProbability = currProbability
|
|
}
|
|
}
|
|
|
|
func makeTestService(namespace, name string, svcFunc func(*v1.Service)) *v1.Service {
|
|
svc := &v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: name,
|
|
Namespace: namespace,
|
|
Annotations: map[string]string{},
|
|
},
|
|
Spec: v1.ServiceSpec{},
|
|
Status: v1.ServiceStatus{},
|
|
}
|
|
svcFunc(svc)
|
|
return svc
|
|
}
|
|
|
|
func addTestPort(array []v1.ServicePort, name string, protocol v1.Protocol, port, nodeport int32, targetPort int) []v1.ServicePort {
|
|
svcPort := v1.ServicePort{
|
|
Name: name,
|
|
Protocol: protocol,
|
|
Port: port,
|
|
NodePort: nodeport,
|
|
TargetPort: intstr.FromInt(targetPort),
|
|
}
|
|
return append(array, svcPort)
|
|
}
|
|
|
|
func TestBuildServiceMapAddRemove(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
|
|
services := []*v1.Service{
|
|
makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = "172.16.55.4"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "sctpport", "SCTP", 1236, 6321, 0)
|
|
}),
|
|
makeTestService("somewhere-else", "node-port", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeNodePort
|
|
svc.Spec.ClusterIP = "172.16.55.10"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blahblah", "UDP", 345, 678, 0)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "moreblahblah", "TCP", 344, 677, 0)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "muchmoreblah", "SCTP", 343, 676, 0)
|
|
}),
|
|
makeTestService("somewhere", "load-balancer", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
|
svc.Spec.ClusterIP = "172.16.55.11"
|
|
svc.Spec.LoadBalancerIP = "5.6.7.8"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar", "UDP", 8675, 30061, 7000)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8676, 30062, 7001)
|
|
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
|
|
Ingress: []v1.LoadBalancerIngress{
|
|
{IP: "10.1.2.4"},
|
|
},
|
|
}
|
|
}),
|
|
makeTestService("somewhere", "only-local-load-balancer", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
|
svc.Spec.ClusterIP = "172.16.55.12"
|
|
svc.Spec.LoadBalancerIP = "5.6.7.8"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar2", "UDP", 8677, 30063, 7002)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8678, 30064, 7003)
|
|
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
|
|
Ingress: []v1.LoadBalancerIngress{
|
|
{IP: "10.1.2.3"},
|
|
},
|
|
}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
svc.Spec.HealthCheckNodePort = 345
|
|
}),
|
|
}
|
|
|
|
for i := range services {
|
|
fp.OnServiceAdd(services[i])
|
|
}
|
|
result := fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 10 {
|
|
t.Errorf("expected service map length 10, got %v", fp.serviceMap)
|
|
}
|
|
|
|
// The only-local-loadbalancer ones get added
|
|
if len(result.HCServiceNodePorts) != 1 {
|
|
t.Errorf("expected 1 healthcheck port, got %v", result.HCServiceNodePorts)
|
|
} else {
|
|
nsn := makeNSN("somewhere", "only-local-load-balancer")
|
|
if port, found := result.HCServiceNodePorts[nsn]; !found || port != 345 {
|
|
t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, result.HCServiceNodePorts)
|
|
}
|
|
}
|
|
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
// Services only added, so nothing stale yet
|
|
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
|
|
}
|
|
|
|
// Remove some stuff
|
|
// oneService is a modification of services[0] with removed first port.
|
|
oneService := makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = "172.16.55.4"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
|
|
})
|
|
|
|
fp.OnServiceUpdate(services[0], oneService)
|
|
fp.OnServiceDelete(services[1])
|
|
fp.OnServiceDelete(services[2])
|
|
fp.OnServiceDelete(services[3])
|
|
|
|
result = fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 1 {
|
|
t.Errorf("expected service map length 1, got %v", fp.serviceMap)
|
|
}
|
|
|
|
if len(result.HCServiceNodePorts) != 0 {
|
|
t.Errorf("expected 0 healthcheck ports, got %v", result.HCServiceNodePorts)
|
|
}
|
|
|
|
// All services but one were deleted. While you'd expect only the ClusterIPs
|
|
// from the three deleted services here, we still have the ClusterIP for
|
|
// the not-deleted service, because one of it's ServicePorts was deleted.
|
|
expectedStaleUDPServices := []string{"172.16.55.10", "172.16.55.4", "172.16.55.11", "172.16.55.12"}
|
|
if len(result.UDPStaleClusterIP) != len(expectedStaleUDPServices) {
|
|
t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.UDPStaleClusterIP.UnsortedList())
|
|
}
|
|
for _, ip := range expectedStaleUDPServices {
|
|
if !result.UDPStaleClusterIP.Has(ip) {
|
|
t.Errorf("expected stale UDP service service %s", ip)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestBuildServiceMapServiceHeadless(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService("somewhere-else", "headless", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = v1.ClusterIPNone
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "rpc", "UDP", 1234, 0, 0)
|
|
}),
|
|
makeTestService("somewhere-else", "headless-without-port", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = v1.ClusterIPNone
|
|
}),
|
|
)
|
|
|
|
// Headless service should be ignored
|
|
result := fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 0 {
|
|
t.Errorf("expected service map length 0, got %d", len(fp.serviceMap))
|
|
}
|
|
|
|
// No proxied services, so no healthchecks
|
|
if len(result.HCServiceNodePorts) != 0 {
|
|
t.Errorf("expected healthcheck ports length 0, got %d", len(result.HCServiceNodePorts))
|
|
}
|
|
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
|
|
}
|
|
}
|
|
|
|
func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService("somewhere-else", "external-name", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeExternalName
|
|
svc.Spec.ClusterIP = "172.16.55.4" // Should be ignored
|
|
svc.Spec.ExternalName = "foo2.bar.com"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blah", "UDP", 1235, 5321, 0)
|
|
}),
|
|
)
|
|
|
|
result := fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 0 {
|
|
t.Errorf("expected service map length 0, got %v", fp.serviceMap)
|
|
}
|
|
// No proxied services, so no healthchecks
|
|
if len(result.HCServiceNodePorts) != 0 {
|
|
t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts)
|
|
}
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP)
|
|
}
|
|
}
|
|
|
|
func TestBuildServiceMapServiceUpdate(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
|
|
servicev1 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = "172.16.55.4"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 0)
|
|
})
|
|
servicev2 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
|
svc.Spec.ClusterIP = "172.16.55.4"
|
|
svc.Spec.LoadBalancerIP = "5.6.7.8"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 7002)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 7003)
|
|
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
|
|
Ingress: []v1.LoadBalancerIngress{
|
|
{IP: "10.1.2.3"},
|
|
},
|
|
}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
svc.Spec.HealthCheckNodePort = 345
|
|
})
|
|
|
|
fp.OnServiceAdd(servicev1)
|
|
|
|
result := fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 2 {
|
|
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
|
|
}
|
|
if len(result.HCServiceNodePorts) != 0 {
|
|
t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts)
|
|
}
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
// Services only added, so nothing stale yet
|
|
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
|
|
}
|
|
|
|
// Change service to load-balancer
|
|
fp.OnServiceUpdate(servicev1, servicev2)
|
|
result = fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 2 {
|
|
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
|
|
}
|
|
if len(result.HCServiceNodePorts) != 1 {
|
|
t.Errorf("expected healthcheck ports length 1, got %v", result.HCServiceNodePorts)
|
|
}
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP.UnsortedList())
|
|
}
|
|
|
|
// No change; make sure the service map stays the same and there are
|
|
// no health-check changes
|
|
fp.OnServiceUpdate(servicev2, servicev2)
|
|
result = fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 2 {
|
|
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
|
|
}
|
|
if len(result.HCServiceNodePorts) != 1 {
|
|
t.Errorf("expected healthcheck ports length 1, got %v", result.HCServiceNodePorts)
|
|
}
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP.UnsortedList())
|
|
}
|
|
|
|
// And back to ClusterIP
|
|
fp.OnServiceUpdate(servicev2, servicev1)
|
|
result = fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 2 {
|
|
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
|
|
}
|
|
if len(result.HCServiceNodePorts) != 0 {
|
|
t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts)
|
|
}
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
// Services only added, so nothing stale yet
|
|
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
|
|
}
|
|
}
|
|
|
|
func populateEndpointSlices(proxier *Proxier, allEndpointSlices ...*discovery.EndpointSlice) {
|
|
for i := range allEndpointSlices {
|
|
proxier.OnEndpointSliceAdd(allEndpointSlices[i])
|
|
}
|
|
}
|
|
|
|
func makeTestEndpointSlice(namespace, name string, sliceNum int, epsFunc func(*discovery.EndpointSlice)) *discovery.EndpointSlice {
|
|
eps := &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-%d", name, sliceNum),
|
|
Namespace: namespace,
|
|
Labels: map[string]string{discovery.LabelServiceName: name},
|
|
},
|
|
}
|
|
epsFunc(eps)
|
|
return eps
|
|
}
|
|
|
|
func makeNSN(namespace, name string) types.NamespacedName {
|
|
return types.NamespacedName{Namespace: namespace, Name: name}
|
|
}
|
|
|
|
func makeServicePortName(ns, name, port string, protocol v1.Protocol) proxy.ServicePortName {
|
|
return proxy.ServicePortName{
|
|
NamespacedName: makeNSN(ns, name),
|
|
Port: port,
|
|
Protocol: protocol,
|
|
}
|
|
}
|
|
|
|
func makeServiceMap(proxier *Proxier, allServices ...*v1.Service) {
|
|
for i := range allServices {
|
|
proxier.OnServiceAdd(allServices[i])
|
|
}
|
|
|
|
proxier.mu.Lock()
|
|
defer proxier.mu.Unlock()
|
|
proxier.servicesSynced = true
|
|
}
|
|
|
|
func compareEndpointsMaps(t *testing.T, tci int, newMap proxy.EndpointsMap, expected map[proxy.ServicePortName][]*endpointsInfo) {
|
|
if len(newMap) != len(expected) {
|
|
t.Errorf("[%d] expected %d results, got %d: %v", tci, len(expected), len(newMap), newMap)
|
|
}
|
|
for x := range expected {
|
|
if len(newMap[x]) != len(expected[x]) {
|
|
t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(expected[x]), x, len(newMap[x]))
|
|
} else {
|
|
for i := range expected[x] {
|
|
newEp, ok := newMap[x][i].(*endpointsInfo)
|
|
if !ok {
|
|
t.Errorf("Failed to cast endpointsInfo")
|
|
continue
|
|
}
|
|
if newEp.Endpoint != expected[x][i].Endpoint ||
|
|
newEp.IsLocal != expected[x][i].IsLocal ||
|
|
newEp.protocol != expected[x][i].protocol ||
|
|
newEp.chainName != expected[x][i].chainName {
|
|
t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, expected[x][i], newEp)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
func Test_updateEndpointsMap(t *testing.T) {
|
|
var nodeName = testHostname
|
|
udpProtocol := v1.ProtocolUDP
|
|
|
|
emptyEndpointSlices := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, func(*discovery.EndpointSlice) {}),
|
|
}
|
|
subset1 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"1.1.1.1"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
subset2 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"1.1.1.2"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p12"),
|
|
Port: utilpointer.Int32(12),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
namedPortLocal := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1,
|
|
func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"1.1.1.1"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}),
|
|
}
|
|
namedPort := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, subset1),
|
|
}
|
|
namedPortRenamed := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1,
|
|
func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"1.1.1.1"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11-2"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}),
|
|
}
|
|
namedPortRenumbered := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1,
|
|
func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"1.1.1.1"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(22),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}),
|
|
}
|
|
namedPortsLocalNoLocal := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1,
|
|
func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"1.1.1.1"},
|
|
}, {
|
|
Addresses: []string{"1.1.1.2"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}, {
|
|
Name: utilpointer.String("p12"),
|
|
Port: utilpointer.Int32(12),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}),
|
|
}
|
|
multipleSubsets := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, subset1),
|
|
makeTestEndpointSlice("ns1", "ep1", 2, subset2),
|
|
}
|
|
subsetLocal := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"1.1.1.2"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p12"),
|
|
Port: utilpointer.Int32(12),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
multipleSubsetsWithLocal := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, subset1),
|
|
makeTestEndpointSlice("ns1", "ep1", 2, subsetLocal),
|
|
}
|
|
subsetMultiplePortsLocal := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"1.1.1.1"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}, {
|
|
Name: utilpointer.String("p12"),
|
|
Port: utilpointer.Int32(12),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
subset3 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"1.1.1.3"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p13"),
|
|
Port: utilpointer.Int32(13),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
multipleSubsetsMultiplePortsLocal := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, subsetMultiplePortsLocal),
|
|
makeTestEndpointSlice("ns1", "ep1", 2, subset3),
|
|
}
|
|
subsetMultipleIPsPorts1 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"1.1.1.1"},
|
|
}, {
|
|
Addresses: []string{"1.1.1.2"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}, {
|
|
Name: utilpointer.String("p12"),
|
|
Port: utilpointer.Int32(12),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
subsetMultipleIPsPorts2 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"1.1.1.3"},
|
|
}, {
|
|
Addresses: []string{"1.1.1.4"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p13"),
|
|
Port: utilpointer.Int32(13),
|
|
Protocol: &udpProtocol,
|
|
}, {
|
|
Name: utilpointer.String("p14"),
|
|
Port: utilpointer.Int32(14),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
subsetMultipleIPsPorts3 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"2.2.2.1"},
|
|
}, {
|
|
Addresses: []string{"2.2.2.2"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p21"),
|
|
Port: utilpointer.Int32(21),
|
|
Protocol: &udpProtocol,
|
|
}, {
|
|
Name: utilpointer.String("p22"),
|
|
Port: utilpointer.Int32(22),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
multipleSubsetsIPsPorts := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, subsetMultipleIPsPorts1),
|
|
makeTestEndpointSlice("ns1", "ep1", 2, subsetMultipleIPsPorts2),
|
|
makeTestEndpointSlice("ns2", "ep2", 1, subsetMultipleIPsPorts3),
|
|
}
|
|
complexSubset1 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"2.2.2.2"},
|
|
NodeName: &nodeName,
|
|
}, {
|
|
Addresses: []string{"2.2.2.22"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p22"),
|
|
Port: utilpointer.Int32(22),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset2 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"2.2.2.3"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p23"),
|
|
Port: utilpointer.Int32(23),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset3 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"4.4.4.4"},
|
|
NodeName: &nodeName,
|
|
}, {
|
|
Addresses: []string{"4.4.4.5"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p44"),
|
|
Port: utilpointer.Int32(44),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset4 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"4.4.4.6"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p45"),
|
|
Port: utilpointer.Int32(45),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset5 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"1.1.1.1"},
|
|
}, {
|
|
Addresses: []string{"1.1.1.11"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset6 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"1.1.1.2"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p12"),
|
|
Port: utilpointer.Int32(12),
|
|
Protocol: &udpProtocol,
|
|
}, {
|
|
Name: utilpointer.String("p122"),
|
|
Port: utilpointer.Int32(122),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset7 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"3.3.3.3"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p33"),
|
|
Port: utilpointer.Int32(33),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset8 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"4.4.4.4"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p44"),
|
|
Port: utilpointer.Int32(44),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexBefore := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, subset1),
|
|
nil,
|
|
makeTestEndpointSlice("ns2", "ep2", 1, complexSubset1),
|
|
makeTestEndpointSlice("ns2", "ep2", 2, complexSubset2),
|
|
nil,
|
|
makeTestEndpointSlice("ns4", "ep4", 1, complexSubset3),
|
|
makeTestEndpointSlice("ns4", "ep4", 2, complexSubset4),
|
|
}
|
|
complexAfter := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, complexSubset5),
|
|
makeTestEndpointSlice("ns1", "ep1", 2, complexSubset6),
|
|
nil,
|
|
nil,
|
|
makeTestEndpointSlice("ns3", "ep3", 1, complexSubset7),
|
|
makeTestEndpointSlice("ns4", "ep4", 1, complexSubset8),
|
|
nil,
|
|
}
|
|
|
|
testCases := []struct {
|
|
// previousEndpoints and currentEndpoints are used to call appropriate
|
|
// handlers OnEndpoints* (based on whether corresponding values are nil
|
|
// or non-nil) and must be of equal length.
|
|
previousEndpoints []*discovery.EndpointSlice
|
|
currentEndpoints []*discovery.EndpointSlice
|
|
oldEndpoints map[proxy.ServicePortName][]*endpointsInfo
|
|
expectedResult map[proxy.ServicePortName][]*endpointsInfo
|
|
expectedStaleEndpoints []proxy.ServiceEndpoint
|
|
expectedStaleServiceNames map[proxy.ServicePortName]bool
|
|
expectedHealthchecks map[types.NamespacedName]int
|
|
}{{
|
|
// Case[0]: nothing
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[1]: no change, named port, local
|
|
previousEndpoints: namedPortLocal,
|
|
currentEndpoints: namedPortLocal,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns1", "ep1"): 1,
|
|
},
|
|
}, {
|
|
// Case[2]: no change, multiple subsets
|
|
previousEndpoints: multipleSubsets,
|
|
currentEndpoints: multipleSubsets,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[3]: no change, multiple subsets, multiple ports, local
|
|
previousEndpoints: multipleSubsetsMultiplePortsLocal,
|
|
currentEndpoints: multipleSubsetsMultiplePortsLocal,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns1", "ep1"): 1,
|
|
},
|
|
}, {
|
|
// Case[4]: no change, multiple endpoints, subsets, IPs, and ports
|
|
previousEndpoints: multipleSubsetsIPsPorts,
|
|
currentEndpoints: multipleSubsetsIPsPorts,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.4:13", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.3:14", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.4:14", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.1:21", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.2:21", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.4:13", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.3:14", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.4:14", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.1:21", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.2:21", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns1", "ep1"): 2,
|
|
makeNSN("ns2", "ep2"): 1,
|
|
},
|
|
}, {
|
|
// Case[5]: add an Endpoints
|
|
previousEndpoints: []*discovery.EndpointSlice{nil},
|
|
currentEndpoints: namedPortLocal,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): true,
|
|
},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns1", "ep1"): 1,
|
|
},
|
|
}, {
|
|
// Case[6]: remove an Endpoints
|
|
previousEndpoints: namedPortLocal,
|
|
currentEndpoints: []*discovery.EndpointSlice{nil},
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
|
|
Endpoint: "1.1.1.1:11",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
|
|
}},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[7]: add an IP and port
|
|
previousEndpoints: namedPort,
|
|
currentEndpoints: namedPortsLocalNoLocal,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
|
|
},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns1", "ep1"): 1,
|
|
},
|
|
}, {
|
|
// Case[8]: remove an IP and port
|
|
previousEndpoints: namedPortsLocalNoLocal,
|
|
currentEndpoints: namedPort,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
|
|
Endpoint: "1.1.1.2:11",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
|
|
}, {
|
|
Endpoint: "1.1.1.1:12",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
|
|
}, {
|
|
Endpoint: "1.1.1.2:12",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
|
|
}},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[9]: add a subset
|
|
previousEndpoints: []*discovery.EndpointSlice{namedPort[0], nil},
|
|
currentEndpoints: multipleSubsetsWithLocal,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
|
|
},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns1", "ep1"): 1,
|
|
},
|
|
}, {
|
|
// Case[10]: remove a subset
|
|
previousEndpoints: multipleSubsets,
|
|
currentEndpoints: []*discovery.EndpointSlice{namedPort[0], nil},
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
|
|
Endpoint: "1.1.1.2:12",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
|
|
}},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[11]: rename a port
|
|
previousEndpoints: namedPort,
|
|
currentEndpoints: namedPortRenamed,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
|
|
Endpoint: "1.1.1.1:11",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
|
|
}},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
|
|
makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): true,
|
|
},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[12]: renumber a port
|
|
previousEndpoints: namedPort,
|
|
currentEndpoints: namedPortRenumbered,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
|
|
Endpoint: "1.1.1.1:11",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
|
|
}},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[13]: complex add and remove
|
|
previousEndpoints: complexBefore,
|
|
currentEndpoints: complexAfter,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.22:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "2.2.2.3:23", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "4.4.4.4:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "4.4.4.5:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "4.4.4.6:45", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.11:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.2:122", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "3.3.3.3:33", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "4.4.4.4:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
|
|
Endpoint: "2.2.2.2:22",
|
|
ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
|
|
}, {
|
|
Endpoint: "2.2.2.22:22",
|
|
ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
|
|
}, {
|
|
Endpoint: "2.2.2.3:23",
|
|
ServicePortName: makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP),
|
|
}, {
|
|
Endpoint: "4.4.4.5:44",
|
|
ServicePortName: makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP),
|
|
}, {
|
|
Endpoint: "4.4.4.6:45",
|
|
ServicePortName: makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP),
|
|
}},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
|
|
makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): true,
|
|
makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): true,
|
|
},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns4", "ep4"): 1,
|
|
},
|
|
}, {
|
|
// Case[14]: change from 0 endpoint address to 1 unnamed port
|
|
previousEndpoints: emptyEndpointSlices,
|
|
currentEndpoints: namedPort,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "1.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): true,
|
|
},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
},
|
|
}
|
|
|
|
for tci, tc := range testCases {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.hostname = nodeName
|
|
|
|
// First check that after adding all previous versions of endpoints,
|
|
// the fp.oldEndpoints is as we expect.
|
|
for i := range tc.previousEndpoints {
|
|
if tc.previousEndpoints[i] != nil {
|
|
fp.OnEndpointSliceAdd(tc.previousEndpoints[i])
|
|
}
|
|
}
|
|
fp.endpointsMap.Update(fp.endpointsChanges)
|
|
compareEndpointsMaps(t, tci, fp.endpointsMap, tc.oldEndpoints)
|
|
|
|
// Now let's call appropriate handlers to get to state we want to be.
|
|
if len(tc.previousEndpoints) != len(tc.currentEndpoints) {
|
|
t.Fatalf("[%d] different lengths of previous and current endpoints", tci)
|
|
continue
|
|
}
|
|
|
|
for i := range tc.previousEndpoints {
|
|
prev, curr := tc.previousEndpoints[i], tc.currentEndpoints[i]
|
|
switch {
|
|
case prev == nil:
|
|
fp.OnEndpointSliceAdd(curr)
|
|
case curr == nil:
|
|
fp.OnEndpointSliceDelete(prev)
|
|
default:
|
|
fp.OnEndpointSliceUpdate(prev, curr)
|
|
}
|
|
}
|
|
result := fp.endpointsMap.Update(fp.endpointsChanges)
|
|
newMap := fp.endpointsMap
|
|
compareEndpointsMaps(t, tci, newMap, tc.expectedResult)
|
|
if len(result.StaleEndpoints) != len(tc.expectedStaleEndpoints) {
|
|
t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedStaleEndpoints), len(result.StaleEndpoints), result.StaleEndpoints)
|
|
}
|
|
for _, x := range tc.expectedStaleEndpoints {
|
|
found := false
|
|
for _, stale := range result.StaleEndpoints {
|
|
if stale == x {
|
|
found = true
|
|
break
|
|
}
|
|
}
|
|
if !found {
|
|
t.Errorf("[%d] expected staleEndpoints[%v], but didn't find it: %v", tci, x, result.StaleEndpoints)
|
|
}
|
|
}
|
|
if len(result.StaleServiceNames) != len(tc.expectedStaleServiceNames) {
|
|
t.Errorf("[%d] expected %d staleServiceNames, got %d: %v", tci, len(tc.expectedStaleServiceNames), len(result.StaleServiceNames), result.StaleServiceNames)
|
|
}
|
|
for svcName := range tc.expectedStaleServiceNames {
|
|
found := false
|
|
for _, stale := range result.StaleServiceNames {
|
|
if stale == svcName {
|
|
found = true
|
|
}
|
|
}
|
|
if !found {
|
|
t.Errorf("[%d] expected staleServiceNames[%v], but didn't find it: %v", tci, svcName, result.StaleServiceNames)
|
|
}
|
|
}
|
|
if !reflect.DeepEqual(result.HCEndpointsLocalIPSize, tc.expectedHealthchecks) {
|
|
t.Errorf("[%d] expected healthchecks %v, got %v", tci, tc.expectedHealthchecks, result.HCEndpointsLocalIPSize)
|
|
}
|
|
}
|
|
}
|
|
|
|
// The majority of EndpointSlice specific tests are not iptables specific and focus on
|
|
// the shared EndpointChangeTracker and EndpointSliceCache. This test ensures that the
|
|
// iptables proxier supports translating EndpointSlices to iptables output.
|
|
func TestEndpointSliceE2E(t *testing.T) {
|
|
expectedIPTablesWithSlice := `*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
|
|
:KUBE-SEP-VLJB2F747S6W7EX4 - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-XGJFVO3L2O5SRFNT
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
|
|
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -s 10.0.1.4/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.4:80
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.OnServiceSynced()
|
|
fp.OnEndpointSlicesSynced()
|
|
|
|
serviceName := "svc1"
|
|
namespaceName := "ns1"
|
|
|
|
fp.OnServiceAdd(&v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
|
|
Spec: v1.ServiceSpec{
|
|
ClusterIP: "172.20.1.1",
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Ports: []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt(80), Protocol: v1.ProtocolTCP}},
|
|
},
|
|
})
|
|
|
|
tcpProtocol := v1.ProtocolTCP
|
|
endpointSlice := &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", serviceName),
|
|
Namespace: namespaceName,
|
|
Labels: map[string]string{discovery.LabelServiceName: serviceName},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, {
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr("node2"),
|
|
}, {
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr("node3"),
|
|
}, {
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(false)},
|
|
NodeName: utilpointer.StringPtr("node4"),
|
|
}},
|
|
}
|
|
|
|
fp.OnEndpointSliceAdd(endpointSlice)
|
|
fp.syncProxyRules()
|
|
assert.Equal(t, expectedIPTablesWithSlice, fp.iptablesData.String())
|
|
|
|
fp.OnEndpointSliceDelete(endpointSlice)
|
|
fp.syncProxyRules()
|
|
assert.NotEqual(t, expectedIPTablesWithSlice, fp.iptablesData.String())
|
|
}
|
|
|
|
func TestHealthCheckNodePortE2E(t *testing.T) {
|
|
expectedIPTables := `*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns1/svc1 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-XLB-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
|
|
:KUBE-SEP-VLJB2F747S6W7EX4 - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 0 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-NODEPORTS -m comment --comment ns1/svc1 -m tcp -p tcp --dport 30010 -s 127.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns1/svc1 -m tcp -p tcp --dport 30010 -j KUBE-XLB-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-XGJFVO3L2O5SRFNT
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
|
|
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -s 10.0.1.4/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.4:80
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/24 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Balancing rule 0 for ns1/svc1" -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.OnServiceSynced()
|
|
fp.OnEndpointSlicesSynced()
|
|
|
|
serviceName := "svc1"
|
|
namespaceName := "ns1"
|
|
|
|
svc := &v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
|
|
Spec: v1.ServiceSpec{
|
|
ClusterIP: "172.20.1.1",
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Ports: []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt(80), NodePort: 30010, Protocol: v1.ProtocolTCP}},
|
|
Type: "LoadBalancer",
|
|
HealthCheckNodePort: 30000,
|
|
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeLocal,
|
|
},
|
|
}
|
|
fp.OnServiceAdd(svc)
|
|
|
|
tcpProtocol := v1.ProtocolTCP
|
|
endpointSlice := &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", serviceName),
|
|
Namespace: namespaceName,
|
|
Labels: map[string]string{discovery.LabelServiceName: serviceName},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, {
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr("node2"),
|
|
}, {
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr("node3"),
|
|
}, {
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(false)},
|
|
NodeName: utilpointer.StringPtr("node4"),
|
|
}},
|
|
}
|
|
fp.OnEndpointSliceAdd(endpointSlice)
|
|
fp.syncProxyRules()
|
|
assert.Equal(t, expectedIPTables, fp.iptablesData.String())
|
|
|
|
fp.OnServiceDelete(svc)
|
|
fp.syncProxyRules()
|
|
assert.NotEqual(t, expectedIPTables, fp.iptablesData.String())
|
|
}
|
|
|
|
// Test_HealthCheckNodePortWhenTerminating tests that health check node ports are not enabled when all local endpoints are terminating
|
|
func Test_HealthCheckNodePortWhenTerminating(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.OnServiceSynced()
|
|
fp.OnEndpointSlicesSynced()
|
|
|
|
serviceName := "svc1"
|
|
namespaceName := "ns1"
|
|
|
|
fp.OnServiceAdd(&v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
|
|
Spec: v1.ServiceSpec{
|
|
ClusterIP: "172.20.1.1",
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Ports: []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt(80), Protocol: v1.ProtocolTCP}},
|
|
},
|
|
})
|
|
|
|
tcpProtocol := v1.ProtocolTCP
|
|
endpointSlice := &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", serviceName),
|
|
Namespace: namespaceName,
|
|
Labels: map[string]string{discovery.LabelServiceName: serviceName},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, {
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, {
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, { // not ready endpoints should be ignored
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(false)},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}},
|
|
}
|
|
|
|
fp.OnEndpointSliceAdd(endpointSlice)
|
|
result := fp.endpointsMap.Update(fp.endpointsChanges)
|
|
if len(result.HCEndpointsLocalIPSize) != 1 {
|
|
t.Errorf("unexpected number of health check node ports, expected 1 but got: %d", len(result.HCEndpointsLocalIPSize))
|
|
}
|
|
|
|
// set all endpoints to terminating
|
|
endpointSliceTerminating := &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", serviceName),
|
|
Namespace: namespaceName,
|
|
Labels: map[string]string{discovery.LabelServiceName: serviceName},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, {
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, {
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, { // not ready endpoints should be ignored
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}},
|
|
}
|
|
|
|
fp.OnEndpointSliceUpdate(endpointSlice, endpointSliceTerminating)
|
|
result = fp.endpointsMap.Update(fp.endpointsChanges)
|
|
if len(result.HCEndpointsLocalIPSize) != 0 {
|
|
t.Errorf("unexpected number of health check node ports, expected 0 but got: %d", len(result.HCEndpointsLocalIPSize))
|
|
}
|
|
}
|
|
|
|
func TestProxierDeleteNodePortStaleUDP(t *testing.T) {
|
|
fcmd := fakeexec.FakeCmd{}
|
|
fexec := fakeexec.FakeExec{
|
|
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
|
|
}
|
|
execFunc := func(cmd string, args ...string) exec.Cmd {
|
|
return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
|
|
}
|
|
cmdOutput := "1 flow entries have been deleted"
|
|
cmdFunc := func() ([]byte, []byte, error) { return []byte(cmdOutput), nil, nil }
|
|
|
|
// Delete ClusterIP entries
|
|
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
|
|
fexec.CommandScript = append(fexec.CommandScript, execFunc)
|
|
// Delete ExternalIP entries
|
|
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
|
|
fexec.CommandScript = append(fexec.CommandScript, execFunc)
|
|
// Delete LoadBalancerIP entries
|
|
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
|
|
fexec.CommandScript = append(fexec.CommandScript, execFunc)
|
|
// Delete NodePort entries
|
|
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
|
|
fexec.CommandScript = append(fexec.CommandScript, execFunc)
|
|
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.exec = &fexec
|
|
|
|
svcIP := "10.20.30.41"
|
|
extIP := "1.1.1.1"
|
|
lbIngressIP := "2.2.2.2"
|
|
svcPort := 80
|
|
nodePort := 31201
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolUDP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.ExternalIPs = []string{extIP}
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolUDP,
|
|
NodePort: int32(nodePort),
|
|
}}
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: lbIngressIP,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
if fexec.CommandCalls != 0 {
|
|
t.Fatalf("Created service without endpoints must not clear conntrack entries")
|
|
}
|
|
|
|
epIP := "10.180.0.1"
|
|
udpProtocol := v1.ProtocolUDP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
if fexec.CommandCalls != 4 {
|
|
t.Fatalf("Updated UDP service with new endpoints must clear UDP entries 4 times: ClusterIP, NodePort, ExternalIP and LB")
|
|
}
|
|
|
|
// the order is not guaranteed so we have to compare the strings in any order
|
|
expectedCommands := []string{
|
|
// Delete ClusterIP Conntrack entries
|
|
fmt.Sprintf("conntrack -D --orig-dst %s -p %s", svcIP, strings.ToLower(string((v1.ProtocolUDP)))),
|
|
// Delete ExternalIP Conntrack entries
|
|
fmt.Sprintf("conntrack -D --orig-dst %s -p %s", extIP, strings.ToLower(string((v1.ProtocolUDP)))),
|
|
// Delete LoadBalancerIP Conntrack entries
|
|
fmt.Sprintf("conntrack -D --orig-dst %s -p %s", lbIngressIP, strings.ToLower(string((v1.ProtocolUDP)))),
|
|
// Delete NodePort Conntrack entrie
|
|
fmt.Sprintf("conntrack -D -p %s --dport %d", strings.ToLower(string((v1.ProtocolUDP))), nodePort),
|
|
}
|
|
actualCommands := []string{
|
|
strings.Join(fcmd.CombinedOutputLog[0], " "),
|
|
strings.Join(fcmd.CombinedOutputLog[1], " "),
|
|
strings.Join(fcmd.CombinedOutputLog[2], " "),
|
|
strings.Join(fcmd.CombinedOutputLog[3], " "),
|
|
}
|
|
sort.Strings(expectedCommands)
|
|
sort.Strings(actualCommands)
|
|
|
|
if !reflect.DeepEqual(expectedCommands, actualCommands) {
|
|
t.Errorf("Expected commands: %v, but executed %v", expectedCommands, actualCommands)
|
|
}
|
|
}
|
|
|
|
func TestProxierMetricsIptablesTotalRules(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
tcpProtocol := v1.ProtocolTCP
|
|
|
|
metrics.RegisterMetrics()
|
|
|
|
svcIP := "10.20.30.41"
|
|
svcPort := 80
|
|
nodePort := 31201
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(nodePort),
|
|
}}
|
|
}),
|
|
)
|
|
fp.syncProxyRules()
|
|
|
|
nFilterRules, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableFilter)))
|
|
if err != nil {
|
|
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
|
|
}
|
|
// -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m udp -p udp -d 10.20.30.41/32 --dport 80 -j REJECT
|
|
// -A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m addrtype --dst-type LOCAL -m udp -p udp --dport 31201 -j REJECT
|
|
// -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
// -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
// -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
// -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
|
|
if nFilterRules != 6.0 {
|
|
t.Fatalf("Wrong number of filter rule: expected 6 received %f", nFilterRules)
|
|
}
|
|
|
|
nNatRules, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)))
|
|
if err != nil {
|
|
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
|
|
}
|
|
|
|
// -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
// -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
// -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
// -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
// -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
if nNatRules != 5.0 {
|
|
t.Fatalf("Wrong number of nat rules: expected 5 received %f", nNatRules)
|
|
}
|
|
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.0.0.2"},
|
|
}, {
|
|
Addresses: []string{"10.0.0.5"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
nFilterRules, err = testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableFilter)))
|
|
if err != nil {
|
|
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
|
|
}
|
|
// -A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
// -A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
// -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
// -A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
if nFilterRules != 4.0 {
|
|
t.Fatalf("Wrong number of filter rule: expected 4 received %f", nFilterRules)
|
|
}
|
|
nNatRules, err = testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)))
|
|
if err != nil {
|
|
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
|
|
}
|
|
// -A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
// -A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
// -A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
// -A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
// -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m udp -p udp -d 10.20.30.41/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
// -A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m udp -p udp -d 10.20.30.41/32 --dport 80 -j KUBE-SVC-OJWW7NSBVZTDHXNW
|
|
// -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m udp -p udp --dport 31201 -j KUBE-MARK-MASQ
|
|
// -A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m udp -p udp --dport 31201 -j KUBE-SVC-OJWW7NSBVZTDHXNW
|
|
// -A KUBE-SVC-OJWW7NSBVZTDHXNW -m comment --comment ns1/svc1:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-AMT2SNW3YUNHJFJG
|
|
// -A KUBE-SEP-AMT2SNW3YUNHJFJG -m comment --comment ns1/svc1:p80 -s 10.0.0.2/32 -j KUBE-MARK-MASQ
|
|
// -A KUBE-SEP-AMT2SNW3YUNHJFJG -m comment --comment ns1/svc1:p80 -m udp -p udp -j DNAT --to-destination 10.0.0.2:80
|
|
// -A KUBE-SVC-OJWW7NSBVZTDHXNW -m comment --comment ns1/svc1:p80 -j KUBE-SEP-OUFLBLJVR33W4FIZ
|
|
// -A KUBE-SEP-OUFLBLJVR33W4FIZ -m comment --comment ns1/svc1:p80 -s 10.0.0.5/32 -j KUBE-MARK-MASQ
|
|
// -A KUBE-SEP-OUFLBLJVR33W4FIZ -m comment --comment ns1/svc1:p80 -m udp -p udp -j DNAT --to-destination 10.0.0.5:80
|
|
// -A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
if nNatRules != 15.0 {
|
|
t.Fatalf("Wrong number of nat rules: expected 15 received %f", nNatRules)
|
|
}
|
|
}
|
|
|
|
// TODO(thockin): add *more* tests for syncProxyRules() or break it down further and test the pieces.
|
|
|
|
// This test ensures that the iptables proxier supports translating Endpoints to
|
|
// iptables output when internalTrafficPolicy is specified
|
|
func TestInternalTrafficPolicyE2E(t *testing.T) {
|
|
type endpoint struct {
|
|
ip string
|
|
hostname string
|
|
}
|
|
|
|
cluster := v1.ServiceInternalTrafficPolicyCluster
|
|
local := v1.ServiceInternalTrafficPolicyLocal
|
|
|
|
clusterExpectedIPTables := `*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-XGJFVO3L2O5SRFNT
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
|
|
testCases := []struct {
|
|
name string
|
|
internalTrafficPolicy *v1.ServiceInternalTrafficPolicyType
|
|
featureGateOn bool
|
|
endpoints []endpoint
|
|
expectEndpointRule bool
|
|
expectedIPTablesWithSlice string
|
|
}{
|
|
{
|
|
name: "internalTrafficPolicy is cluster",
|
|
internalTrafficPolicy: &cluster,
|
|
featureGateOn: true,
|
|
endpoints: []endpoint{
|
|
{"10.0.1.1", testHostname},
|
|
{"10.0.1.2", "host1"},
|
|
{"10.0.1.3", "host2"},
|
|
},
|
|
expectEndpointRule: true,
|
|
expectedIPTablesWithSlice: clusterExpectedIPTables,
|
|
},
|
|
{
|
|
name: "internalTrafficPolicy is local and there is non-zero local endpoints",
|
|
internalTrafficPolicy: &local,
|
|
featureGateOn: true,
|
|
endpoints: []endpoint{
|
|
{"10.0.1.1", testHostname},
|
|
{"10.0.1.2", "host1"},
|
|
{"10.0.1.3", "host2"},
|
|
},
|
|
expectEndpointRule: true,
|
|
expectedIPTablesWithSlice: `*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`,
|
|
},
|
|
{
|
|
name: "internalTrafficPolicy is local and there is zero local endpoint",
|
|
internalTrafficPolicy: &local,
|
|
featureGateOn: true,
|
|
endpoints: []endpoint{
|
|
{"10.0.1.1", "host0"},
|
|
{"10.0.1.2", "host1"},
|
|
{"10.0.1.3", "host2"},
|
|
},
|
|
expectEndpointRule: false,
|
|
expectedIPTablesWithSlice: `*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 has no endpoints" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 -j REJECT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`,
|
|
},
|
|
{
|
|
name: "internalTrafficPolicy is local and there is non-zero local endpoint with feature gate off",
|
|
internalTrafficPolicy: &local,
|
|
featureGateOn: false,
|
|
endpoints: []endpoint{
|
|
{"10.0.1.1", testHostname},
|
|
{"10.0.1.2", "host1"},
|
|
{"10.0.1.3", "host2"},
|
|
},
|
|
expectEndpointRule: false,
|
|
expectedIPTablesWithSlice: clusterExpectedIPTables,
|
|
},
|
|
}
|
|
|
|
for _, tc := range testCases {
|
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ServiceInternalTrafficPolicy, tc.featureGateOn)()
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.OnServiceSynced()
|
|
fp.OnEndpointSlicesSynced()
|
|
|
|
serviceName := "svc1"
|
|
namespaceName := "ns1"
|
|
|
|
svc := &v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
|
|
Spec: v1.ServiceSpec{
|
|
ClusterIP: "172.20.1.1",
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Ports: []v1.ServicePort{{Name: "", Port: 80, Protocol: v1.ProtocolTCP}},
|
|
},
|
|
}
|
|
if tc.internalTrafficPolicy != nil {
|
|
svc.Spec.InternalTrafficPolicy = tc.internalTrafficPolicy
|
|
}
|
|
|
|
fp.OnServiceAdd(svc)
|
|
|
|
tcpProtocol := v1.ProtocolTCP
|
|
endpointSlice := &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", serviceName),
|
|
Namespace: namespaceName,
|
|
Labels: map[string]string{discovery.LabelServiceName: serviceName},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
}
|
|
for _, ep := range tc.endpoints {
|
|
endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{
|
|
Addresses: []string{ep.ip},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr(ep.hostname),
|
|
})
|
|
}
|
|
|
|
fp.OnEndpointSliceAdd(endpointSlice)
|
|
fp.syncProxyRules()
|
|
assert.Equal(t, tc.expectedIPTablesWithSlice, fp.iptablesData.String())
|
|
|
|
if tc.expectEndpointRule {
|
|
fp.OnEndpointSliceDelete(endpointSlice)
|
|
fp.syncProxyRules()
|
|
assert.NotEqual(t, tc.expectedIPTablesWithSlice, fp.iptablesData.String())
|
|
}
|
|
}
|
|
}
|
|
|
|
// Test_EndpointSliceWithTerminatingEndpoints tests that when there are local ready and ready + terminating
|
|
// endpoints, only the ready endpoints are used.
|
|
func Test_EndpointSliceWithTerminatingEndpoints(t *testing.T) {
|
|
tcpProtocol := v1.ProtocolTCP
|
|
testcases := []struct {
|
|
name string
|
|
terminatingFeatureGate bool
|
|
service *v1.Service
|
|
endpointslice *discovery.EndpointSlice
|
|
expectedIPTables string
|
|
}{
|
|
{
|
|
name: "feature gate ProxyTerminatingEndpoints enabled, ready endpoints exist",
|
|
terminatingFeatureGate: true,
|
|
service: &v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"},
|
|
Spec: v1.ServiceSpec{
|
|
ClusterIP: "172.20.1.1",
|
|
Type: v1.ServiceTypeNodePort,
|
|
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeLocal,
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Ports: []v1.ServicePort{
|
|
{
|
|
Name: "",
|
|
TargetPort: intstr.FromInt(80),
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored for node ports since there are ready non-terminating endpoints
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored for node ports since there are ready non-terminating endpoints
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored for node ports since it's not local
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr("host-1"),
|
|
},
|
|
},
|
|
},
|
|
expectedIPTables: `*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-XLB-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
|
|
:KUBE-SEP-VLJB2F747S6W7EX4 - [0:0]
|
|
:KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
|
|
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -s 10.0.1.4/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.4:80
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/24 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Balancing rule 0 for ns1/svc1" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Balancing rule 1 for ns1/svc1" -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`,
|
|
},
|
|
{
|
|
name: "feature gate ProxyTerminatingEndpoints disabled, ready endpoints exist",
|
|
terminatingFeatureGate: false,
|
|
service: &v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"},
|
|
Spec: v1.ServiceSpec{
|
|
ClusterIP: "172.20.1.1",
|
|
Type: v1.ServiceTypeNodePort,
|
|
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeLocal,
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Ports: []v1.ServicePort{
|
|
{
|
|
Name: "",
|
|
TargetPort: intstr.FromInt(80),
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored for node ports since there are ready non-terminating endpoints
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored for node ports since there are ready non-terminating endpoints
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored for node ports since it's not local
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr("host-1"),
|
|
},
|
|
},
|
|
},
|
|
expectedIPTables: `*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-XLB-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
|
|
:KUBE-SEP-VLJB2F747S6W7EX4 - [0:0]
|
|
:KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
|
|
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -s 10.0.1.4/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.4:80
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/24 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Balancing rule 0 for ns1/svc1" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Balancing rule 1 for ns1/svc1" -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`,
|
|
},
|
|
{
|
|
name: "feature gate ProxyTerminatingEndpoints enabled, only terminating endpoints exist",
|
|
terminatingFeatureGate: true,
|
|
service: &v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"},
|
|
Spec: v1.ServiceSpec{
|
|
ClusterIP: "172.20.1.1",
|
|
Type: v1.ServiceTypeNodePort,
|
|
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeLocal,
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Ports: []v1.ServicePort{
|
|
{
|
|
Name: "",
|
|
TargetPort: intstr.FromInt(80),
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
// this endpoint should be used since there are only ready terminating endpoints
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be used since there are only ready terminating endpoints
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should not be used since it is both terminating and not ready.
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored for node ports since it's not local
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr("host-1"),
|
|
},
|
|
},
|
|
},
|
|
expectedIPTables: `*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-XLB-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
|
|
:KUBE-SEP-VLJB2F747S6W7EX4 - [0:0]
|
|
:KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
|
|
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -s 10.0.1.4/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.4:80
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/24 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Balancing rule 0 for ns1/svc1" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Balancing rule 1 for ns1/svc1" -j KUBE-SEP-XGJFVO3L2O5SRFNT
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`,
|
|
},
|
|
{
|
|
name: "with ProxyTerminatingEndpoints disabled, only terminating endpoints exist",
|
|
terminatingFeatureGate: false,
|
|
service: &v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"},
|
|
Spec: v1.ServiceSpec{
|
|
ClusterIP: "172.20.1.1",
|
|
Type: v1.ServiceTypeNodePort,
|
|
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeLocal,
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Ports: []v1.ServicePort{
|
|
{
|
|
Name: "",
|
|
TargetPort: intstr.FromInt(80),
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
},
|
|
},
|
|
},
|
|
},
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr("host-1"),
|
|
},
|
|
},
|
|
},
|
|
expectedIPTables: `*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod source rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack pod destination rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-XLB-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
|
|
:KUBE-SEP-VLJB2F747S6W7EX4 - [0:0]
|
|
:KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.20.1.1/32 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
|
|
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -s 10.0.1.4/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-VLJB2F747S6W7EX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.4:80
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5/32 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/24 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 has no local endpoints" -j KUBE-MARK-DROP
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`,
|
|
},
|
|
}
|
|
|
|
for _, testcase := range testcases {
|
|
t.Run(testcase.name, func(t *testing.T) {
|
|
|
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ProxyTerminatingEndpoints, testcase.terminatingFeatureGate)()
|
|
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.OnServiceSynced()
|
|
fp.OnEndpointSlicesSynced()
|
|
|
|
fp.OnServiceAdd(testcase.service)
|
|
|
|
fp.OnEndpointSliceAdd(testcase.endpointslice)
|
|
fp.syncProxyRules()
|
|
t.Log(fp.iptablesData.String())
|
|
assert.Equal(t, testcase.expectedIPTables, fp.iptablesData.String())
|
|
|
|
fp.OnEndpointSliceDelete(testcase.endpointslice)
|
|
fp.syncProxyRules()
|
|
assert.NotEqual(t, testcase.expectedIPTables, fp.iptablesData.String())
|
|
})
|
|
}
|
|
}
|