
All of the tests used a localDetector that considered the pod IP range to be 10.0.0.0/24, but lots of the tests used pod IPs in 10.180.0.0/16 or 10.0.1.0/24, meaning the generated iptables rules were somewhat inconsistent. Fix this by expanding the localDetector's pod IP range to 10.0.0.0/8. (Changing the pod IPs to all be in 10.0.0.0/24 instead would be a much larger change since it would result in the SEP chain names changing.) Meanwhile, the different tests were also horribly inconsistent about what values they used for other IPs, and some of them even used the same IPs (or ports) for different things in the same test case. Fix these all up and create a consistent set of IP assignments: // Pod IPs: 10.0.0.0/8 // Service ClusterIPs: 172.30.0.0/16 // Node IPs: 192.168.0.0/24 // Local Node IP: 192.168.0.2 // Service ExternalIPs: 192.168.99.0/24 // LoadBalancer IPs: 1.2.3.4, 5.6.7.8, 9.10.11.12 // Non-cluster IPs: 203.0.113.0/24 // LB Source Range: 203.0.113.0/25
5415 lines
226 KiB
Go
5415 lines
226 KiB
Go
/*
|
|
Copyright 2015 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package iptables
|
|
|
|
import (
|
|
"bytes"
|
|
"fmt"
|
|
"net"
|
|
"reflect"
|
|
"regexp"
|
|
"sort"
|
|
"strconv"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/stretchr/testify/assert"
|
|
v1 "k8s.io/api/core/v1"
|
|
discovery "k8s.io/api/discovery/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/types"
|
|
"k8s.io/apimachinery/pkg/util/intstr"
|
|
"k8s.io/apimachinery/pkg/util/sets"
|
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
"k8s.io/component-base/metrics/testutil"
|
|
"k8s.io/klog/v2"
|
|
"k8s.io/kubernetes/pkg/features"
|
|
"k8s.io/kubernetes/pkg/proxy"
|
|
"k8s.io/kubernetes/pkg/proxy/metrics"
|
|
|
|
"k8s.io/kubernetes/pkg/proxy/healthcheck"
|
|
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
|
|
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
|
|
utilproxytest "k8s.io/kubernetes/pkg/proxy/util/testing"
|
|
"k8s.io/kubernetes/pkg/util/async"
|
|
"k8s.io/kubernetes/pkg/util/conntrack"
|
|
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
|
iptablestest "k8s.io/kubernetes/pkg/util/iptables/testing"
|
|
"k8s.io/utils/exec"
|
|
fakeexec "k8s.io/utils/exec/testing"
|
|
netutils "k8s.io/utils/net"
|
|
utilpointer "k8s.io/utils/pointer"
|
|
)
|
|
|
|
func TestDeleteEndpointConnectionsIPv4(t *testing.T) {
|
|
const (
|
|
UDP = v1.ProtocolUDP
|
|
TCP = v1.ProtocolTCP
|
|
SCTP = v1.ProtocolSCTP
|
|
)
|
|
|
|
testCases := []struct {
|
|
description string
|
|
svcName string
|
|
svcIP string
|
|
svcPort int32
|
|
protocol v1.Protocol
|
|
endpoint string // IP:port endpoint
|
|
epSvcPair proxy.ServiceEndpoint // Will be generated by test
|
|
simulatedErr string
|
|
}{
|
|
{
|
|
description: "V4 UDP",
|
|
svcName: "v4-udp",
|
|
svcIP: "172.30.1.1",
|
|
svcPort: 80,
|
|
protocol: UDP,
|
|
endpoint: "10.240.0.3:80",
|
|
},
|
|
{
|
|
description: "V4 TCP",
|
|
svcName: "v4-tcp",
|
|
svcIP: "172.30.2.2",
|
|
svcPort: 80,
|
|
protocol: TCP,
|
|
endpoint: "10.240.0.4:80",
|
|
},
|
|
{
|
|
description: "V4 SCTP",
|
|
svcName: "v4-sctp",
|
|
svcIP: "172.30.3.3",
|
|
svcPort: 80,
|
|
protocol: SCTP,
|
|
endpoint: "10.240.0.5:80",
|
|
},
|
|
{
|
|
description: "V4 UDP, nothing to delete, benign error",
|
|
svcName: "v4-udp-nothing-to-delete",
|
|
svcIP: "172.30.1.1",
|
|
svcPort: 80,
|
|
protocol: UDP,
|
|
endpoint: "10.240.0.3:80",
|
|
simulatedErr: conntrack.NoConnectionToDelete,
|
|
},
|
|
{
|
|
description: "V4 UDP, unexpected error, should be glogged",
|
|
svcName: "v4-udp-simulated-error",
|
|
svcIP: "172.30.1.1",
|
|
svcPort: 80,
|
|
protocol: UDP,
|
|
endpoint: "10.240.0.3:80",
|
|
simulatedErr: "simulated error",
|
|
},
|
|
}
|
|
|
|
// Create a fake executor for the conntrack utility. This should only be
|
|
// invoked for UDP and SCTP connections, since no conntrack cleanup is needed for TCP
|
|
fcmd := fakeexec.FakeCmd{}
|
|
fexec := fakeexec.FakeExec{
|
|
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
|
|
}
|
|
execFunc := func(cmd string, args ...string) exec.Cmd {
|
|
return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
|
|
}
|
|
for _, tc := range testCases {
|
|
if conntrack.IsClearConntrackNeeded(tc.protocol) {
|
|
var cmdOutput string
|
|
var simErr error
|
|
if tc.simulatedErr == "" {
|
|
cmdOutput = "1 flow entries have been deleted"
|
|
} else {
|
|
simErr = fmt.Errorf(tc.simulatedErr)
|
|
}
|
|
cmdFunc := func() ([]byte, []byte, error) { return []byte(cmdOutput), nil, simErr }
|
|
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
|
|
fexec.CommandScript = append(fexec.CommandScript, execFunc)
|
|
}
|
|
}
|
|
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.exec = &fexec
|
|
|
|
for _, tc := range testCases {
|
|
makeServiceMap(fp,
|
|
makeTestService("ns1", tc.svcName, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = tc.svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: tc.svcPort,
|
|
Protocol: tc.protocol,
|
|
}}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
}),
|
|
)
|
|
|
|
fp.serviceMap.Update(fp.serviceChanges)
|
|
}
|
|
|
|
// Run the test cases
|
|
for _, tc := range testCases {
|
|
priorExecs := fexec.CommandCalls
|
|
priorGlogErrs := klog.Stats.Error.Lines()
|
|
|
|
svc := proxy.ServicePortName{
|
|
NamespacedName: types.NamespacedName{Namespace: "ns1", Name: tc.svcName},
|
|
Port: "p80",
|
|
Protocol: tc.protocol,
|
|
}
|
|
input := []proxy.ServiceEndpoint{
|
|
{
|
|
Endpoint: tc.endpoint,
|
|
ServicePortName: svc,
|
|
},
|
|
}
|
|
|
|
fp.deleteEndpointConnections(input)
|
|
|
|
// For UDP and SCTP connections, check the executed conntrack command
|
|
var expExecs int
|
|
if conntrack.IsClearConntrackNeeded(tc.protocol) {
|
|
isIPv6 := func(ip string) bool {
|
|
netIP := netutils.ParseIPSloppy(ip)
|
|
return netIP.To4() == nil
|
|
}
|
|
endpointIP := utilproxy.IPPart(tc.endpoint)
|
|
expectCommand := fmt.Sprintf("conntrack -D --orig-dst %s --dst-nat %s -p %s", tc.svcIP, endpointIP, strings.ToLower(string((tc.protocol))))
|
|
if isIPv6(endpointIP) {
|
|
expectCommand += " -f ipv6"
|
|
}
|
|
actualCommand := strings.Join(fcmd.CombinedOutputLog[fexec.CommandCalls-1], " ")
|
|
if actualCommand != expectCommand {
|
|
t.Errorf("%s: Expected command: %s, but executed %s", tc.description, expectCommand, actualCommand)
|
|
}
|
|
expExecs = 1
|
|
}
|
|
|
|
// Check the number of times conntrack was executed
|
|
execs := fexec.CommandCalls - priorExecs
|
|
if execs != expExecs {
|
|
t.Errorf("%s: Expected conntrack to be executed %d times, but got %d", tc.description, expExecs, execs)
|
|
}
|
|
|
|
// Check the number of new glog errors
|
|
var expGlogErrs int64
|
|
if tc.simulatedErr != "" && tc.simulatedErr != conntrack.NoConnectionToDelete {
|
|
expGlogErrs = 1
|
|
}
|
|
glogErrs := klog.Stats.Error.Lines() - priorGlogErrs
|
|
if glogErrs != expGlogErrs {
|
|
t.Errorf("%s: Expected %d glogged errors, but got %d", tc.description, expGlogErrs, glogErrs)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestDeleteEndpointConnectionsIPv6(t *testing.T) {
|
|
const (
|
|
UDP = v1.ProtocolUDP
|
|
TCP = v1.ProtocolTCP
|
|
SCTP = v1.ProtocolSCTP
|
|
)
|
|
|
|
testCases := []struct {
|
|
description string
|
|
svcName string
|
|
svcIP string
|
|
svcPort int32
|
|
protocol v1.Protocol
|
|
endpoint string // IP:port endpoint
|
|
epSvcPair proxy.ServiceEndpoint // Will be generated by test
|
|
simulatedErr string
|
|
}{
|
|
{
|
|
description: "V6 UDP",
|
|
svcName: "v6-udp",
|
|
svcIP: "fd00:1234::20",
|
|
svcPort: 80,
|
|
protocol: UDP,
|
|
endpoint: "[2001:db8::2]:80",
|
|
},
|
|
{
|
|
description: "V6 TCP",
|
|
svcName: "v6-tcp",
|
|
svcIP: "fd00:1234::30",
|
|
svcPort: 80,
|
|
protocol: TCP,
|
|
endpoint: "[2001:db8::3]:80",
|
|
},
|
|
{
|
|
description: "V6 SCTP",
|
|
svcName: "v6-sctp",
|
|
svcIP: "fd00:1234::40",
|
|
svcPort: 80,
|
|
protocol: SCTP,
|
|
endpoint: "[2001:db8::4]:80",
|
|
},
|
|
}
|
|
|
|
// Create a fake executor for the conntrack utility. This should only be
|
|
// invoked for UDP and SCTP connections, since no conntrack cleanup is needed for TCP
|
|
fcmd := fakeexec.FakeCmd{}
|
|
fexec := fakeexec.FakeExec{
|
|
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
|
|
}
|
|
execFunc := func(cmd string, args ...string) exec.Cmd {
|
|
return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
|
|
}
|
|
for _, tc := range testCases {
|
|
if conntrack.IsClearConntrackNeeded(tc.protocol) {
|
|
var cmdOutput string
|
|
var simErr error
|
|
if tc.simulatedErr == "" {
|
|
cmdOutput = "1 flow entries have been deleted"
|
|
} else {
|
|
simErr = fmt.Errorf(tc.simulatedErr)
|
|
}
|
|
cmdFunc := func() ([]byte, []byte, error) { return []byte(cmdOutput), nil, simErr }
|
|
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
|
|
fexec.CommandScript = append(fexec.CommandScript, execFunc)
|
|
}
|
|
}
|
|
|
|
ipt := iptablestest.NewIPv6Fake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.exec = &fexec
|
|
|
|
for _, tc := range testCases {
|
|
makeServiceMap(fp,
|
|
makeTestService("ns1", tc.svcName, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = tc.svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: tc.svcPort,
|
|
Protocol: tc.protocol,
|
|
}}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
}),
|
|
)
|
|
|
|
fp.serviceMap.Update(fp.serviceChanges)
|
|
}
|
|
|
|
// Run the test cases
|
|
for _, tc := range testCases {
|
|
priorExecs := fexec.CommandCalls
|
|
priorGlogErrs := klog.Stats.Error.Lines()
|
|
|
|
svc := proxy.ServicePortName{
|
|
NamespacedName: types.NamespacedName{Namespace: "ns1", Name: tc.svcName},
|
|
Port: "p80",
|
|
Protocol: tc.protocol,
|
|
}
|
|
input := []proxy.ServiceEndpoint{
|
|
{
|
|
Endpoint: tc.endpoint,
|
|
ServicePortName: svc,
|
|
},
|
|
}
|
|
|
|
fp.deleteEndpointConnections(input)
|
|
|
|
// For UDP and SCTP connections, check the executed conntrack command
|
|
var expExecs int
|
|
if conntrack.IsClearConntrackNeeded(tc.protocol) {
|
|
isIPv6 := func(ip string) bool {
|
|
netIP := netutils.ParseIPSloppy(ip)
|
|
return netIP.To4() == nil
|
|
}
|
|
endpointIP := utilproxy.IPPart(tc.endpoint)
|
|
expectCommand := fmt.Sprintf("conntrack -D --orig-dst %s --dst-nat %s -p %s", tc.svcIP, endpointIP, strings.ToLower(string((tc.protocol))))
|
|
if isIPv6(endpointIP) {
|
|
expectCommand += " -f ipv6"
|
|
}
|
|
actualCommand := strings.Join(fcmd.CombinedOutputLog[fexec.CommandCalls-1], " ")
|
|
if actualCommand != expectCommand {
|
|
t.Errorf("%s: Expected command: %s, but executed %s", tc.description, expectCommand, actualCommand)
|
|
}
|
|
expExecs = 1
|
|
}
|
|
|
|
// Check the number of times conntrack was executed
|
|
execs := fexec.CommandCalls - priorExecs
|
|
if execs != expExecs {
|
|
t.Errorf("%s: Expected conntrack to be executed %d times, but got %d", tc.description, expExecs, execs)
|
|
}
|
|
|
|
// Check the number of new glog errors
|
|
var expGlogErrs int64
|
|
if tc.simulatedErr != "" && tc.simulatedErr != conntrack.NoConnectionToDelete {
|
|
expGlogErrs = 1
|
|
}
|
|
glogErrs := klog.Stats.Error.Lines() - priorGlogErrs
|
|
if glogErrs != expGlogErrs {
|
|
t.Errorf("%s: Expected %d glogged errors, but got %d", tc.description, expGlogErrs, glogErrs)
|
|
}
|
|
}
|
|
}
|
|
|
|
// fakeCloseable implements utilproxy.Closeable
|
|
type fakeCloseable struct{}
|
|
|
|
// Close fakes out the close() used by syncProxyRules to release a local port.
|
|
func (f *fakeCloseable) Close() error {
|
|
return nil
|
|
}
|
|
|
|
// fakePortOpener implements portOpener.
|
|
type fakePortOpener struct {
|
|
openPorts []*netutils.LocalPort
|
|
}
|
|
|
|
// OpenLocalPort fakes out the listen() and bind() used by syncProxyRules
|
|
// to lock a local port.
|
|
func (f *fakePortOpener) OpenLocalPort(lp *netutils.LocalPort) (netutils.Closeable, error) {
|
|
f.openPorts = append(f.openPorts, lp)
|
|
return &fakeCloseable{}, nil
|
|
}
|
|
|
|
// Conventions for tests using NewFakeProxier:
|
|
//
|
|
// Pod IPs: 10.0.0.0/8
|
|
// Service ClusterIPs: 172.30.0.0/16
|
|
// Node IPs: 192.168.0.0/24
|
|
// Local Node IP: 192.168.0.2
|
|
// Service ExternalIPs: 192.168.99.0/24
|
|
// LoadBalancer IPs: 1.2.3.4, 5.6.7.8, 9.10.11.12
|
|
// Non-cluster IPs: 203.0.113.0/24
|
|
// LB Source Range: 203.0.113.0/25
|
|
|
|
const testHostname = "test-hostname"
|
|
const testNodeIP = "192.168.0.2"
|
|
|
|
func NewFakeProxier(ipt utiliptables.Interface) *Proxier {
|
|
// TODO: Call NewProxier after refactoring out the goroutine
|
|
// invocation into a Run() method.
|
|
ipfamily := v1.IPv4Protocol
|
|
if ipt.IsIPv6() {
|
|
ipfamily = v1.IPv6Protocol
|
|
}
|
|
detectLocal, _ := proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/8", ipt)
|
|
|
|
networkInterfacer := utilproxytest.NewFakeNetwork()
|
|
itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
|
|
addrs := []net.Addr{
|
|
&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)},
|
|
&net.IPNet{IP: netutils.ParseIPSloppy("::1/128"), Mask: net.CIDRMask(128, 128)},
|
|
}
|
|
networkInterfacer.AddInterfaceAddr(&itf, addrs)
|
|
itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}
|
|
addrs1 := []net.Addr{
|
|
&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIP), Mask: net.CIDRMask(24, 32)},
|
|
}
|
|
networkInterfacer.AddInterfaceAddr(&itf1, addrs1)
|
|
|
|
p := &Proxier{
|
|
exec: &fakeexec.FakeExec{},
|
|
serviceMap: make(proxy.ServiceMap),
|
|
serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipfamily, nil, nil),
|
|
endpointsMap: make(proxy.EndpointsMap),
|
|
endpointsChanges: proxy.NewEndpointChangeTracker(testHostname, newEndpointInfo, ipfamily, nil, nil),
|
|
iptables: ipt,
|
|
masqueradeMark: "0x4000",
|
|
localDetector: detectLocal,
|
|
hostname: testHostname,
|
|
portsMap: make(map[netutils.LocalPort]netutils.Closeable),
|
|
portMapper: &fakePortOpener{[]*netutils.LocalPort{}},
|
|
serviceHealthServer: healthcheck.NewFakeServiceHealthServer(),
|
|
precomputedProbabilities: make([]string, 0, 1001),
|
|
iptablesData: bytes.NewBuffer(nil),
|
|
existingFilterChainsData: bytes.NewBuffer(nil),
|
|
filterChains: utilproxy.LineBuffer{},
|
|
filterRules: utilproxy.LineBuffer{},
|
|
natChains: utilproxy.LineBuffer{},
|
|
natRules: utilproxy.LineBuffer{},
|
|
nodePortAddresses: make([]string, 0),
|
|
networkInterfacer: networkInterfacer,
|
|
}
|
|
p.setInitialized(true)
|
|
p.syncRunner = async.NewBoundedFrequencyRunner("test-sync-runner", p.syncProxyRules, 0, time.Minute, 1)
|
|
return p
|
|
}
|
|
|
|
// parseIPTablesData takes iptables-save output and returns a map of table name to array of lines.
|
|
func parseIPTablesData(ruleData string) (map[string][]string, error) {
|
|
// Split ruleData at the "COMMIT" lines; given valid input, this will result in
|
|
// one element for each table plus an extra empty element (since the ruleData
|
|
// should end with a "COMMIT" line).
|
|
rawTables := strings.Split(strings.TrimPrefix(ruleData, "\n"), "COMMIT\n")
|
|
nTables := len(rawTables) - 1
|
|
if nTables < 2 || rawTables[nTables] != "" {
|
|
return nil, fmt.Errorf("bad ruleData (%d tables)\n%s", nTables, ruleData)
|
|
}
|
|
|
|
tables := make(map[string][]string, nTables)
|
|
for i, table := range rawTables[:nTables] {
|
|
lines := strings.Split(strings.Trim(table, "\n"), "\n")
|
|
// The first line should be, eg, "*nat" or "*filter"
|
|
if lines[0][0] != '*' {
|
|
return nil, fmt.Errorf("bad ruleData (table %d starts with %q)", i+1, lines[0])
|
|
}
|
|
// add back the "COMMIT" line that got eaten by the strings.Split above
|
|
lines = append(lines, "COMMIT")
|
|
tables[lines[0][1:]] = lines
|
|
}
|
|
|
|
if tables["nat"] == nil {
|
|
return nil, fmt.Errorf("bad ruleData (no %q table)", "nat")
|
|
}
|
|
if tables["filter"] == nil {
|
|
return nil, fmt.Errorf("bad ruleData (no %q table)", "filter")
|
|
}
|
|
return tables, nil
|
|
}
|
|
|
|
func Test_parseIPTablesData(t *testing.T) {
|
|
for _, tc := range []struct {
|
|
name string
|
|
input string
|
|
output map[string][]string
|
|
error string
|
|
}{
|
|
{
|
|
name: "basic test",
|
|
input: `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`,
|
|
output: map[string][]string{
|
|
"filter": {
|
|
`*filter`,
|
|
`:KUBE-SERVICES - [0:0]`,
|
|
`:KUBE-EXTERNAL-SERVICES - [0:0]`,
|
|
`:KUBE-FORWARD - [0:0]`,
|
|
`:KUBE-NODEPORTS - [0:0]`,
|
|
`-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT`,
|
|
`-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP`,
|
|
`-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT`,
|
|
`-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT`,
|
|
`COMMIT`,
|
|
},
|
|
"nat": {
|
|
`*nat`,
|
|
`:KUBE-SERVICES - [0:0]`,
|
|
`:KUBE-NODEPORTS - [0:0]`,
|
|
`:KUBE-POSTROUTING - [0:0]`,
|
|
`:KUBE-MARK-MASQ - [0:0]`,
|
|
`:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]`,
|
|
`:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]`,
|
|
`-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN`,
|
|
`-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000`,
|
|
`-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE`,
|
|
`-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000`,
|
|
`-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O`,
|
|
`-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ`,
|
|
`-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ`,
|
|
`-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ`,
|
|
`-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80`,
|
|
`-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS`,
|
|
`COMMIT`,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "not enough tables",
|
|
input: `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
`,
|
|
error: "bad ruleData (1 tables)",
|
|
},
|
|
{
|
|
name: "trailing junk",
|
|
input: `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
junk
|
|
`,
|
|
error: "bad ruleData (2 tables)",
|
|
},
|
|
{
|
|
name: "bad start line",
|
|
input: `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
`,
|
|
error: `bad ruleData (table 2 starts with ":KUBE-SERVICES - [0:0]")`,
|
|
},
|
|
{
|
|
name: "no nat",
|
|
input: `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*mangle
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
`,
|
|
error: `bad ruleData (no "nat" table)`,
|
|
},
|
|
{
|
|
name: "no filter",
|
|
input: `
|
|
*mangle
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
`,
|
|
error: `bad ruleData (no "filter" table)`,
|
|
},
|
|
} {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
out, err := parseIPTablesData(tc.input)
|
|
if err == nil {
|
|
if tc.error != "" {
|
|
t.Errorf("unexpectedly did not get error")
|
|
} else {
|
|
assert.Equal(t, tc.output, out)
|
|
}
|
|
} else {
|
|
if tc.error == "" {
|
|
t.Errorf("got unexpected error: %v", err)
|
|
} else if !strings.HasPrefix(err.Error(), tc.error) {
|
|
t.Errorf("got wrong error: %v (expected %q)", err, tc.error)
|
|
}
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func countRules(tableName string, ruleData string) int {
|
|
tables, err := parseIPTablesData(ruleData)
|
|
if err != nil {
|
|
klog.ErrorS(err, "error parsing iptables rules")
|
|
return -1
|
|
}
|
|
|
|
rules := 0
|
|
for _, line := range tables[tableName] {
|
|
if line[0] == '-' {
|
|
rules++
|
|
}
|
|
}
|
|
return rules
|
|
}
|
|
|
|
// findAllMatches takes an array of lines and a pattern with one parenthesized group, and
|
|
// returns a sorted array of all of the unique matches of the parenthesized group.
|
|
func findAllMatches(lines []string, pattern string) []string {
|
|
regex := regexp.MustCompile(pattern)
|
|
allMatches := sets.NewString()
|
|
for _, line := range lines {
|
|
match := regex.FindStringSubmatch(line)
|
|
if len(match) == 2 {
|
|
allMatches.Insert(match[1])
|
|
}
|
|
}
|
|
return allMatches.List()
|
|
}
|
|
|
|
// moveMatchingLines moves lines that match pattern from input to output
|
|
func moveMatchingLines(pattern string, input, output []string) ([]string, []string) {
|
|
var newIn []string
|
|
regex := regexp.MustCompile(pattern)
|
|
for _, line := range input {
|
|
if regex.FindString(line) != "" {
|
|
output = append(output, line)
|
|
} else {
|
|
newIn = append(newIn, line)
|
|
}
|
|
}
|
|
return newIn, output
|
|
}
|
|
|
|
// checkIPTablesRuleJumps checks that every `-j` in the given rules jumps to a chain
|
|
// that we created and added rules to
|
|
func checkIPTablesRuleJumps(ruleData string) error {
|
|
tables, err := parseIPTablesData(ruleData)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
for tableName, lines := range tables {
|
|
// Find all of the lines like ":KUBE-SERVICES", indicating chains that
|
|
// iptables-restore would create when loading the data.
|
|
createdChains := sets.NewString(findAllMatches(lines, `^:([^ ]*)`)...)
|
|
// Find all of the lines like "-X KUBE-SERVICES ..." indicating chains
|
|
// that we are deleting because they are no longer used, and remove
|
|
// those chains from createdChains.
|
|
createdChains = createdChains.Delete(findAllMatches(lines, `-X ([^ ]*)`)...)
|
|
|
|
// Find all of the lines like "-A KUBE-SERVICES ..." indicating chains
|
|
// that we are adding at least one rule to.
|
|
filledChains := sets.NewString(findAllMatches(lines, `-A ([^ ]*)`)...)
|
|
|
|
// Find all of the chains that are jumped to by some rule so we can make
|
|
// sure we only jump to valid chains.
|
|
jumpedChains := sets.NewString(findAllMatches(lines, `-j ([^ ]*)`)...)
|
|
// Ignore jumps to chains that we expect to exist even if kube-proxy
|
|
// didn't create them itself.
|
|
jumpedChains.Delete("ACCEPT", "REJECT", "DROP", "MARK", "RETURN", "DNAT", "SNAT", "MASQUERADE")
|
|
jumpedChains.Delete(string(KubeMarkDropChain))
|
|
|
|
// Find cases where we have "-A FOO ... -j BAR" but no ":BAR", meaning
|
|
// that we are jumping to a chain that was not created.
|
|
missingChains := jumpedChains.Difference(createdChains)
|
|
missingChains = missingChains.Union(filledChains.Difference(createdChains))
|
|
if len(missingChains) > 0 {
|
|
return fmt.Errorf("some chains in %s are used but were not created: %v", tableName, missingChains.List())
|
|
}
|
|
|
|
// Find cases where we have "-A FOO ... -j BAR", but no "-A BAR ...",
|
|
// meaning that we are jumping to a chain that we didn't write out any
|
|
// rules for, which is normally a bug. (Except that KUBE-SERVICES always
|
|
// jumps to KUBE-NODEPORTS, even when there are no NodePort rules.)
|
|
emptyChains := jumpedChains.Difference(filledChains)
|
|
emptyChains.Delete(string(kubeNodePortsChain))
|
|
if len(emptyChains) > 0 {
|
|
return fmt.Errorf("some chains in %s are jumped to but have no rules: %v", tableName, emptyChains.List())
|
|
}
|
|
|
|
// Find cases where we have ":BAR" but no "-A FOO ... -j BAR", meaning
|
|
// that we are creating an empty chain but not using it for anything.
|
|
extraChains := createdChains.Difference(jumpedChains)
|
|
extraChains.Delete(string(kubeServicesChain), string(kubeExternalServicesChain), string(kubeNodePortsChain), string(kubePostroutingChain), string(kubeForwardChain), string(KubeMarkMasqChain))
|
|
if len(extraChains) > 0 {
|
|
return fmt.Errorf("some chains in %s are created but not used: %v", tableName, extraChains.List())
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func Test_checkIPTablesRuleJumps(t *testing.T) {
|
|
for _, tc := range []struct {
|
|
name string
|
|
input string
|
|
error string
|
|
}{
|
|
{
|
|
name: "valid",
|
|
input: `
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
COMMIT
|
|
`,
|
|
error: "",
|
|
},
|
|
{
|
|
name: "can't jump to chain that wasn't created",
|
|
input: `
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
COMMIT
|
|
`,
|
|
error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
|
|
},
|
|
{
|
|
name: "can't jump to chain that has no rules",
|
|
input: `
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
COMMIT
|
|
`,
|
|
error: "some chains in nat are jumped to but have no rules: [KUBE-SVC-XPGD46QRK7WJZT7O]",
|
|
},
|
|
{
|
|
name: "can't add rules to a chain that wasn't created",
|
|
input: `
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" ...
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
COMMIT
|
|
`,
|
|
error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
|
|
},
|
|
{
|
|
name: "can't jump to chain that wasn't created",
|
|
input: `
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
COMMIT
|
|
`,
|
|
error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
|
|
},
|
|
{
|
|
name: "can't jump to chain that has no rules",
|
|
input: `
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
COMMIT
|
|
`,
|
|
error: "some chains in nat are jumped to but have no rules: [KUBE-SVC-XPGD46QRK7WJZT7O]",
|
|
},
|
|
{
|
|
name: "can't add rules to a chain that wasn't created",
|
|
input: `
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" ...
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
COMMIT
|
|
`,
|
|
error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
|
|
},
|
|
{
|
|
name: "can't create chain and then not use it",
|
|
input: `
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" ...
|
|
COMMIT
|
|
`,
|
|
error: "some chains in nat are created but not used: [KUBE-SVC-XPGD46QRK7WJZT7O]",
|
|
},
|
|
} {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
err := checkIPTablesRuleJumps(tc.input)
|
|
if err == nil {
|
|
if tc.error != "" {
|
|
t.Errorf("unexpectedly did not get error")
|
|
}
|
|
} else {
|
|
if tc.error == "" {
|
|
t.Errorf("got unexpected error: %v", err)
|
|
} else if !strings.HasPrefix(err.Error(), tc.error) {
|
|
t.Errorf("got wrong error: %v (expected %q)", err, tc.error)
|
|
}
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
// sortIPTablesRules sorts `iptables-restore` output so as to not depend on the order that
|
|
// Services get processed in, while preserving the relative ordering of related rules.
|
|
func sortIPTablesRules(ruleData string) (string, error) {
|
|
tables, err := parseIPTablesData(ruleData)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
|
|
tableNames := make([]string, 0, len(tables))
|
|
for tableName := range tables {
|
|
tableNames = append(tableNames, tableName)
|
|
}
|
|
sort.Strings(tableNames)
|
|
|
|
var output []string
|
|
for _, name := range tableNames {
|
|
lines := tables[name]
|
|
|
|
// Move "*TABLENAME" line
|
|
lines, output = moveMatchingLines(`^\*`, lines, output)
|
|
|
|
// findAllMatches() returns a sorted list of unique matches. So for
|
|
// each of the following, we find all the matches for the regex, then
|
|
// for each unique match (in sorted order), move all of the lines that
|
|
// contain that match.
|
|
|
|
// Move and sort ":CHAINNAME" lines
|
|
for _, chainName := range findAllMatches(lines, `^(:[^ ]*) `) {
|
|
lines, output = moveMatchingLines(chainName, lines, output)
|
|
}
|
|
|
|
// Move KUBE-NODEPORTS rules for each service, sorted by service name
|
|
for _, nextNodePortService := range findAllMatches(lines, `-A KUBE-NODEPORTS.*--comment "?([^ ]*)`) {
|
|
lines, output = moveMatchingLines(fmt.Sprintf(`^-A KUBE-NODEPORTS.*%s`, nextNodePortService), lines, output)
|
|
}
|
|
|
|
// Move KUBE-SERVICES rules for each service, sorted by service name
|
|
for _, nextService := range findAllMatches(lines, `-A KUBE-SERVICES.*--comment "?([^ ]*)`) {
|
|
lines, output = moveMatchingLines(fmt.Sprintf(`^-A KUBE-SERVICES.*%s`, nextService), lines, output)
|
|
}
|
|
|
|
// Move remaining chains, sorted by chain name
|
|
for _, nextChain := range findAllMatches(lines, `(-A KUBE-[^ ]* )`) {
|
|
lines, output = moveMatchingLines(nextChain, lines, output)
|
|
}
|
|
|
|
// Some tests have deletions...
|
|
for _, nextChain := range findAllMatches(lines, `(-X KUBE-.*)`) {
|
|
lines, output = moveMatchingLines(nextChain, lines, output)
|
|
}
|
|
|
|
// Move the "COMMIT" line and anything else left. (There shouldn't be anything
|
|
// else, but if there is, it will show up in the diff later.)
|
|
_, output = moveMatchingLines(".", lines, output)
|
|
}
|
|
|
|
// Input ended with a "\n", so make sure the output does too
|
|
output = append(output, "")
|
|
|
|
return strings.Join(output, "\n"), nil
|
|
}
|
|
|
|
func Test_sortIPTablesRules(t *testing.T) {
|
|
for _, tc := range []struct {
|
|
name string
|
|
input string
|
|
output string
|
|
error string
|
|
}{
|
|
{
|
|
name: "basic test using each match type",
|
|
input: `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-XLB-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-FW-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
|
|
:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
|
|
:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
|
|
:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
|
|
:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
|
|
:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 192.168.99.11 --dport 80 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment ns2/svc2:p80 -j KUBE-SEP-RS4RBKLTHTF2IUXJ
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -s 127.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 has no local endpoints" -j KUBE-MARK-DROP
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-SVC-X27LE4BHSL4DOUIK
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -j KUBE-SEP-OYPFS5VJICHGATKP
|
|
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -j KUBE-SEP-C6EBXVWJJZMIWKLZ
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`,
|
|
output: `
|
|
*filter
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-FW-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
|
|
:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
|
|
:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
|
|
:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-XLB-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -s 127.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-SVC-X27LE4BHSL4DOUIK
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 192.168.99.11 --dport 80 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
|
|
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -j KUBE-SEP-C6EBXVWJJZMIWKLZ
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment ns2/svc2:p80 -j KUBE-SEP-RS4RBKLTHTF2IUXJ
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -j KUBE-SEP-OYPFS5VJICHGATKP
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 has no local endpoints" -j KUBE-MARK-DROP
|
|
COMMIT
|
|
`,
|
|
},
|
|
{
|
|
name: "not enough tables",
|
|
input: `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
`,
|
|
error: "bad ruleData (1 tables)",
|
|
},
|
|
{
|
|
name: "extra tables",
|
|
input: `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*mangle
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
`,
|
|
output: `
|
|
*filter
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*mangle
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
`,
|
|
},
|
|
{
|
|
name: "correctly match same service name in different styles of comments",
|
|
input: `
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
|
|
-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" svc1 line 1
|
|
-A KUBE-SERVICES -m comment --comment ns1/svc1 svc1 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 blah" svc1 line 3
|
|
-A KUBE-SERVICES -m comment --comment ns4/svc4 svc4 line 1
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" svc4 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4 blah" svc4 line 3
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" svc3 line 1
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3 blah" svc3 line 2
|
|
-A KUBE-SERVICES -m comment --comment ns3/svc3 svc3 line 3
|
|
COMMIT
|
|
`,
|
|
output: `
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" svc1 line 1
|
|
-A KUBE-SERVICES -m comment --comment ns1/svc1 svc1 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 blah" svc1 line 3
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
|
|
-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" svc3 line 1
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3 blah" svc3 line 2
|
|
-A KUBE-SERVICES -m comment --comment ns3/svc3 svc3 line 3
|
|
-A KUBE-SERVICES -m comment --comment ns4/svc4 svc4 line 1
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" svc4 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4 blah" svc4 line 3
|
|
COMMIT
|
|
`,
|
|
},
|
|
{
|
|
name: "unexpected junk lines are preserved",
|
|
input: `
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
|
|
:KUBE-AAAAA - [0:0]
|
|
:KUBE-ZZZZZ - [0:0]
|
|
:WHY-IS-THIS-CHAIN-HERE - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
|
|
-A KUBE-ZZZZZ -m comment --comment "mystery chain number 1"
|
|
-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
|
|
-A WHY-IS-THIS-CHAIN-HERE -j ACCEPT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
|
|
-A KUBE-AAAAA -m comment --comment "mystery chain number 2"
|
|
COMMIT
|
|
`,
|
|
output: `
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-AAAAA - [0:0]
|
|
:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-ZZZZZ - [0:0]
|
|
:WHY-IS-THIS-CHAIN-HERE - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
|
|
-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
|
|
-A KUBE-AAAAA -m comment --comment "mystery chain number 2"
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
|
|
-A KUBE-ZZZZZ -m comment --comment "mystery chain number 1"
|
|
-A WHY-IS-THIS-CHAIN-HERE -j ACCEPT
|
|
COMMIT
|
|
`,
|
|
},
|
|
} {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
out, err := sortIPTablesRules(tc.input)
|
|
if err == nil {
|
|
if tc.error != "" {
|
|
t.Errorf("unexpectedly did not get error")
|
|
} else {
|
|
assert.Equal(t, strings.TrimPrefix(tc.output, "\n"), out)
|
|
}
|
|
} else {
|
|
if tc.error == "" {
|
|
t.Errorf("got unexpected error: %v", err)
|
|
} else if !strings.HasPrefix(err.Error(), tc.error) {
|
|
t.Errorf("got wrong error: %v (expected %q)", err, tc.error)
|
|
}
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
// assertIPTablesRulesEqual asserts that the generated rules in result match the rules in
|
|
// expected, ignoring irrelevant ordering differences.
|
|
func assertIPTablesRulesEqual(t *testing.T, expected, result string) {
|
|
expected, err := sortIPTablesRules(expected)
|
|
if err != nil {
|
|
t.Fatalf("%s", err)
|
|
}
|
|
result, err = sortIPTablesRules(result)
|
|
if err != nil {
|
|
t.Fatalf("%s", err)
|
|
}
|
|
|
|
assert.Equal(t, expected, result)
|
|
|
|
err = checkIPTablesRuleJumps(expected)
|
|
if err != nil {
|
|
t.Fatalf("%s", err)
|
|
}
|
|
}
|
|
|
|
// assertIPTablesRulesNotEqual asserts that the generated rules in result DON'T match the
|
|
// rules in expected, ignoring irrelevant ordering differences.
|
|
func assertIPTablesRulesNotEqual(t *testing.T, expected, result string) {
|
|
expected, err := sortIPTablesRules(expected)
|
|
if err != nil {
|
|
t.Fatalf("%s", err)
|
|
}
|
|
result, err = sortIPTablesRules(result)
|
|
if err != nil {
|
|
t.Fatalf("%s", err)
|
|
}
|
|
|
|
assert.NotEqual(t, expected, result)
|
|
|
|
err = checkIPTablesRuleJumps(expected)
|
|
if err != nil {
|
|
t.Fatalf("%s", err)
|
|
}
|
|
err = checkIPTablesRuleJumps(result)
|
|
if err != nil {
|
|
t.Fatalf("%s", err)
|
|
}
|
|
}
|
|
|
|
// TestOverallIPTablesRulesWithMultipleServices creates 4 types of services: ClusterIP,
|
|
// LoadBalancer, ExternalIP and NodePort and verifies if the NAT table rules created
|
|
// are exactly the same as what is expected. This test provides an overall view of how
|
|
// the NAT table rules look like with the different jumps.
|
|
func TestOverallIPTablesRulesWithMultipleServices(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
metrics.RegisterMetrics()
|
|
tcpProtocol := v1.ProtocolTCP
|
|
|
|
makeServiceMap(fp,
|
|
// create ClusterIP service
|
|
makeTestService("ns1", "svc1", func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = "172.30.0.41"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
}}
|
|
}),
|
|
// create LoadBalancer service
|
|
makeTestService("ns2", "svc2", func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
svc.Spec.ClusterIP = "172.30.0.42"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: 3001,
|
|
}}
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: "1.2.3.4",
|
|
}}
|
|
// Also ensure that invalid LoadBalancerSourceRanges will not result
|
|
// in a crash.
|
|
svc.Spec.ExternalIPs = []string{"192.168.99.22"}
|
|
svc.Spec.LoadBalancerSourceRanges = []string{" 203.0.113.0/25"}
|
|
svc.Spec.HealthCheckNodePort = 30000
|
|
}),
|
|
// create LoadBalancer service with Cluster traffic policy and no source ranges
|
|
makeTestService("ns2b", "svc2b", func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
|
|
svc.Spec.ClusterIP = "172.30.0.43"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: 3002,
|
|
}}
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: "5.6.7.8",
|
|
}}
|
|
svc.Spec.HealthCheckNodePort = 30000
|
|
}),
|
|
// create NodePort service
|
|
makeTestService("ns3", "svc3", func(svc *v1.Service) {
|
|
svc.Spec.Type = "NodePort"
|
|
svc.Spec.ClusterIP = "172.30.0.43"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: 3003,
|
|
}}
|
|
}),
|
|
// create ExternalIP service
|
|
makeTestService("ns4", "svc4", func(svc *v1.Service) {
|
|
svc.Spec.Type = "NodePort"
|
|
svc.Spec.ClusterIP = "172.30.0.44"
|
|
svc.Spec.ExternalIPs = []string{"192.168.99.33"}
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
TargetPort: intstr.FromInt(80),
|
|
}}
|
|
}),
|
|
)
|
|
populateEndpointSlices(fp,
|
|
// create ClusterIP service endpoints
|
|
makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.180.0.1"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
// create Local LoadBalancer endpoints. Note that since we aren't setting
|
|
// its NodeName, this endpoint will be considered non-local and ignored.
|
|
makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.180.0.2"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
// create Cluster LoadBalancer endpoints
|
|
makeTestEndpointSlice("ns2b", "svc2b", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.180.0.3"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
// create NodePort service endpoints
|
|
makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.180.0.3"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
// create ExternalIP service endpoints
|
|
makeTestEndpointSlice("ns4", "svc4", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.180.0.4"},
|
|
}, {
|
|
Addresses: []string{"10.180.0.5"},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-XLB-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-FW-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
|
|
:KUBE-SVC-PAZTZYUUMV5KCDZL - [0:0]
|
|
:KUBE-FW-PAZTZYUUMV5KCDZL - [0:0]
|
|
:KUBE-SEP-QDCEFMBQEGWIV4VT - [0:0]
|
|
:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
|
|
:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
|
|
:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
|
|
:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
|
|
:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment ns2/svc2:p80 -j KUBE-SEP-RS4RBKLTHTF2IUXJ
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -s 127.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-XLB-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 has no local endpoints" -j KUBE-MARK-DROP
|
|
-A KUBE-SVC-PAZTZYUUMV5KCDZL -m comment --comment "ns2b/svc2b:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns2b/svc2b:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-PAZTZYUUMV5KCDZL
|
|
-A KUBE-SERVICES -m comment --comment "ns2b/svc2b:p80 loadbalancer IP" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j KUBE-FW-PAZTZYUUMV5KCDZL
|
|
-A KUBE-FW-PAZTZYUUMV5KCDZL -m comment --comment "ns2b/svc2b:p80 loadbalancer IP" -j KUBE-MARK-MASQ
|
|
-A KUBE-FW-PAZTZYUUMV5KCDZL -m comment --comment "ns2b/svc2b:p80 loadbalancer IP" -j KUBE-SVC-PAZTZYUUMV5KCDZL
|
|
-A KUBE-FW-PAZTZYUUMV5KCDZL -m comment --comment "ns2b/svc2b:p80 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-SVC-PAZTZYUUMV5KCDZL -m comment --comment ns2b/svc2b:p80 -m tcp -p tcp --dport 3002 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns2b/svc2b:p80 -m tcp -p tcp --dport 3002 -j KUBE-SVC-PAZTZYUUMV5KCDZL
|
|
-A KUBE-SVC-PAZTZYUUMV5KCDZL -m comment --comment ns2b/svc2b:p80 -j KUBE-SEP-QDCEFMBQEGWIV4VT
|
|
-A KUBE-SEP-QDCEFMBQEGWIV4VT -m comment --comment ns2b/svc2b:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-QDCEFMBQEGWIV4VT -m comment --comment ns2b/svc2b:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3003 -j KUBE-SVC-X27LE4BHSL4DOUIK
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3003 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -j KUBE-SEP-OYPFS5VJICHGATKP
|
|
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.33 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.33 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -j KUBE-SEP-C6EBXVWJJZMIWKLZ
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
assertIPTablesRulesEqual(t, expected, fp.iptablesData.String())
|
|
|
|
natRulesMetric, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)))
|
|
if err != nil {
|
|
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
|
|
}
|
|
nNatRules := int(natRulesMetric)
|
|
|
|
expectedNatRules := countRules("nat", fp.iptablesData.String())
|
|
|
|
if nNatRules != expectedNatRules {
|
|
t.Fatalf("Wrong number of nat rules: expected %d received %d", expectedNatRules, nNatRules)
|
|
}
|
|
}
|
|
|
|
func TestClusterIPReject(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
}}
|
|
}),
|
|
)
|
|
fp.syncProxyRules()
|
|
|
|
expected := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j REJECT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
|
|
assertIPTablesRulesEqual(t, expected, fp.iptablesData.String())
|
|
}
|
|
|
|
func TestClusterIPEndpointsJump(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
epIP := "10.180.0.1"
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
|
|
assertIPTablesRulesEqual(t, expected, fp.iptablesData.String())
|
|
}
|
|
|
|
func TestLoadBalancer(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcLBIP := "1.2.3.4"
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: svcLBIP,
|
|
}}
|
|
// Also ensure that invalid LoadBalancerSourceRanges will not result
|
|
// in a crash.
|
|
svc.Spec.LoadBalancerSourceRanges = []string{" 203.0.113.0/25"}
|
|
}),
|
|
)
|
|
|
|
epIP := "10.180.0.1"
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-FW-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-XPGD46QRK7WJZT7O
|
|
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -j KUBE-MARK-MASQ
|
|
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
|
|
assertIPTablesRulesEqual(t, expected, fp.iptablesData.String())
|
|
}
|
|
|
|
func TestNodePort(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "NodePort"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
}),
|
|
)
|
|
|
|
epIP := "10.180.0.1"
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
assert.Equal(t, []*netutils.LocalPort{
|
|
{
|
|
Description: "nodePort for ns1/svc1:p80",
|
|
IP: "",
|
|
IPFamily: netutils.IPv4,
|
|
Port: svcNodePort,
|
|
Protocol: netutils.TCP,
|
|
},
|
|
}, fp.portMapper.(*fakePortOpener).openPorts)
|
|
assertIPTablesRulesEqual(t, expected, fp.iptablesData.String())
|
|
}
|
|
|
|
func TestHealthCheckNodePort(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.nodePortAddresses = []string{"127.0.0.0/8"}
|
|
|
|
svcIP := "172.30.0.42"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcHealthCheckNodePort := 30000
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
svc.Spec.HealthCheckNodePort = int32(svcHealthCheckNodePort)
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j REJECT
|
|
-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j REJECT
|
|
-A KUBE-NODEPORTS -m comment --comment "ns1/svc1:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -d 127.0.0.1 -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
|
|
assertIPTablesRulesEqual(t, expected, fp.iptablesData.String())
|
|
}
|
|
|
|
func TestMasqueradeRule(t *testing.T) {
|
|
for _, testcase := range []bool{false, true} {
|
|
ipt := iptablestest.NewFake().SetHasRandomFully(testcase)
|
|
fp := NewFakeProxier(ipt)
|
|
fp.syncProxyRules()
|
|
|
|
expectedFmt := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE%s
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
var expected string
|
|
if testcase {
|
|
expected = fmt.Sprintf(expectedFmt, " --random-fully")
|
|
} else {
|
|
expected = fmt.Sprintf(expectedFmt, "")
|
|
}
|
|
assertIPTablesRulesEqual(t, expected, fp.iptablesData.String())
|
|
}
|
|
}
|
|
|
|
func TestExternalIPsReject(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcExternalIPs := "192.168.99.11"
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "ClusterIP"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.ExternalIPs = []string{svcExternalIPs}
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
TargetPort: intstr.FromInt(svcPort),
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j REJECT
|
|
-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 192.168.99.11 --dport 80 -j REJECT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
|
|
assertIPTablesRulesEqual(t, expected, fp.iptablesData.String())
|
|
}
|
|
|
|
func TestOnlyLocalExternalIPs(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcExternalIPs := "192.168.99.11"
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "NodePort"
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.ExternalIPs = []string{svcExternalIPs}
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
TargetPort: intstr.FromInt(svcPort),
|
|
}}
|
|
}),
|
|
)
|
|
epIP1 := "10.180.0.1"
|
|
epIP2 := "10.180.2.1"
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP1},
|
|
}, {
|
|
Addresses: []string{epIP2},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-XLB-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SEP-ZX7GRIZKSNUQ3LAJ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 external IP" -m tcp -p tcp -d 192.168.99.11 --dport 80 -j KUBE-XLB-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -s 10.180.2.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.2.1:80
|
|
-A KUBE-XLB-XPGD46QRK7WJZT7O -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-XLB-XPGD46QRK7WJZT7O -m comment --comment "masquerade LOCAL traffic for ns1/svc1:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-XPGD46QRK7WJZT7O -m comment --comment "route LOCAL traffic for ns1/svc1:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-XLB-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
|
|
assertIPTablesRulesEqual(t, expected, fp.iptablesData.String())
|
|
}
|
|
|
|
// TestNonLocalExternalIPs tests if we add the masquerade rule into svcChain in order to
|
|
// SNAT packets to external IPs if externalTrafficPolicy is cluster and the traffic is NOT Local.
|
|
func TestNonLocalExternalIPs(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcExternalIPs := "192.168.99.11"
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.ExternalIPs = []string{svcExternalIPs}
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
TargetPort: intstr.FromInt(svcPort),
|
|
}}
|
|
}),
|
|
)
|
|
epIP1 := "10.180.0.1"
|
|
epIP2 := "10.180.2.1"
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP1},
|
|
NodeName: nil,
|
|
}, {
|
|
Addresses: []string{epIP2},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SEP-ZX7GRIZKSNUQ3LAJ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 external IP" -m tcp -p tcp -d 192.168.99.11 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 external IP" -m tcp -p tcp -d 192.168.99.11 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -s 10.180.2.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.2.1:80
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
|
|
assertIPTablesRulesEqual(t, expected, fp.iptablesData.String())
|
|
}
|
|
|
|
func TestNodePortReject(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "NodePort"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j REJECT
|
|
-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j REJECT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
|
|
assertIPTablesRulesEqual(t, expected, fp.iptablesData.String())
|
|
}
|
|
|
|
func TestLoadBalancerReject(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcLBIP := "1.2.3.4"
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
svcSessionAffinityTimeout := int32(10800)
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: svcLBIP,
|
|
}}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
|
|
svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{
|
|
ClientIP: &v1.ClientIPConfig{TimeoutSeconds: &svcSessionAffinityTimeout},
|
|
}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j REJECT
|
|
-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j REJECT
|
|
-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j REJECT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
|
|
assertIPTablesRulesEqual(t, expected, fp.iptablesData.String())
|
|
}
|
|
|
|
func TestOnlyLocalLoadBalancing(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcLBIP := "1.2.3.4"
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
svcSessionAffinityTimeout := int32(10800)
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: svcLBIP,
|
|
}}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
|
|
svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{
|
|
ClientIP: &v1.ClientIPConfig{TimeoutSeconds: &svcSessionAffinityTimeout},
|
|
}
|
|
}),
|
|
)
|
|
|
|
epIP1 := "10.180.0.1"
|
|
epIP2 := "10.180.2.1"
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP1},
|
|
}, {
|
|
Addresses: []string{epIP2},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-XLB-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-FW-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SEP-ZX7GRIZKSNUQ3LAJ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-XPGD46QRK7WJZT7O
|
|
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -j KUBE-XLB-XPGD46QRK7WJZT7O
|
|
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -s 127.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-XLB-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -m recent --name KUBE-SEP-SXIVWICOYRO3J4NJ --rcheck --seconds 10800 --reap -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -m recent --name KUBE-SEP-ZX7GRIZKSNUQ3LAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m recent --name KUBE-SEP-SXIVWICOYRO3J4NJ --set -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -s 10.180.2.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -m recent --name KUBE-SEP-ZX7GRIZKSNUQ3LAJ --set -m tcp -p tcp -j DNAT --to-destination 10.180.2.1:80
|
|
-A KUBE-XLB-XPGD46QRK7WJZT7O -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-XLB-XPGD46QRK7WJZT7O -m comment --comment "masquerade LOCAL traffic for ns1/svc1:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-XPGD46QRK7WJZT7O -m comment --comment "route LOCAL traffic for ns1/svc1:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-XLB-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -m recent --name KUBE-SEP-ZX7GRIZKSNUQ3LAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
-A KUBE-XLB-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
|
|
assertIPTablesRulesEqual(t, expected, fp.iptablesData.String())
|
|
}
|
|
|
|
func TestOnlyLocalNodePortsNoClusterCIDR(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.localDetector = proxyutiliptables.NewNoOpLocalDetector()
|
|
fp.nodePortAddresses = []string{"192.168.0.0/24"}
|
|
|
|
expected := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-XLB-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SEP-ZX7GRIZKSNUQ3LAJ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -s 127.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-XLB-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -s 10.180.2.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.2.1:80
|
|
-A KUBE-XLB-XPGD46QRK7WJZT7O -m comment --comment "masquerade LOCAL traffic for ns1/svc1:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-XPGD46QRK7WJZT7O -m comment --comment "route LOCAL traffic for ns1/svc1:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-XLB-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -d 192.168.0.2 -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
onlyLocalNodePorts(t, fp, ipt, expected)
|
|
}
|
|
|
|
func TestOnlyLocalNodePorts(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.nodePortAddresses = []string{"192.168.0.0/24"}
|
|
|
|
expected := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-XLB-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SEP-ZX7GRIZKSNUQ3LAJ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -s 127.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-XLB-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -s 10.180.2.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.2.1:80
|
|
-A KUBE-XLB-XPGD46QRK7WJZT7O -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-XLB-XPGD46QRK7WJZT7O -m comment --comment "masquerade LOCAL traffic for ns1/svc1:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-XPGD46QRK7WJZT7O -m comment --comment "route LOCAL traffic for ns1/svc1:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-XLB-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -d 192.168.0.2 -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
onlyLocalNodePorts(t, fp, ipt, expected)
|
|
}
|
|
|
|
func onlyLocalNodePorts(t *testing.T, fp *Proxier, ipt *iptablestest.FakeIPTables, expected string) {
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "NodePort"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
}),
|
|
)
|
|
|
|
epIP1 := "10.180.0.1"
|
|
epIP2 := "10.180.2.1"
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP1},
|
|
NodeName: nil,
|
|
}, {
|
|
Addresses: []string{epIP2},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
assertIPTablesRulesEqual(t, expected, fp.iptablesData.String())
|
|
}
|
|
|
|
func TestComputeProbability(t *testing.T) {
|
|
expectedProbabilities := map[int]string{
|
|
1: "1.0000000000",
|
|
2: "0.5000000000",
|
|
10: "0.1000000000",
|
|
100: "0.0100000000",
|
|
1000: "0.0010000000",
|
|
10000: "0.0001000000",
|
|
100000: "0.0000100000",
|
|
100001: "0.0000099999",
|
|
}
|
|
|
|
for num, expected := range expectedProbabilities {
|
|
actual := computeProbability(num)
|
|
if actual != expected {
|
|
t.Errorf("Expected computeProbability(%d) to be %s, got: %s", num, expected, actual)
|
|
}
|
|
}
|
|
|
|
prevProbability := float64(0)
|
|
for i := 100000; i > 1; i-- {
|
|
currProbability, err := strconv.ParseFloat(computeProbability(i), 64)
|
|
if err != nil {
|
|
t.Fatalf("Error parsing float probability for %d: %v", i, err)
|
|
}
|
|
if currProbability <= prevProbability {
|
|
t.Fatalf("Probability unexpectedly <= to previous probability for %d: (%0.10f <= %0.10f)", i, currProbability, prevProbability)
|
|
}
|
|
prevProbability = currProbability
|
|
}
|
|
}
|
|
|
|
func makeTestService(namespace, name string, svcFunc func(*v1.Service)) *v1.Service {
|
|
svc := &v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: name,
|
|
Namespace: namespace,
|
|
Annotations: map[string]string{},
|
|
},
|
|
Spec: v1.ServiceSpec{},
|
|
Status: v1.ServiceStatus{},
|
|
}
|
|
svcFunc(svc)
|
|
return svc
|
|
}
|
|
|
|
func addTestPort(array []v1.ServicePort, name string, protocol v1.Protocol, port, nodeport int32, targetPort int) []v1.ServicePort {
|
|
svcPort := v1.ServicePort{
|
|
Name: name,
|
|
Protocol: protocol,
|
|
Port: port,
|
|
NodePort: nodeport,
|
|
TargetPort: intstr.FromInt(targetPort),
|
|
}
|
|
return append(array, svcPort)
|
|
}
|
|
|
|
func TestBuildServiceMapAddRemove(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
|
|
services := []*v1.Service{
|
|
makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = "172.30.55.4"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "sctpport", "SCTP", 1236, 6321, 0)
|
|
}),
|
|
makeTestService("somewhere-else", "node-port", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeNodePort
|
|
svc.Spec.ClusterIP = "172.30.55.10"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blahblah", "UDP", 345, 678, 0)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "moreblahblah", "TCP", 344, 677, 0)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "muchmoreblah", "SCTP", 343, 676, 0)
|
|
}),
|
|
makeTestService("somewhere", "load-balancer", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
|
svc.Spec.ClusterIP = "172.30.55.11"
|
|
svc.Spec.LoadBalancerIP = "1.2.3.4"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar", "UDP", 8675, 30061, 7000)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8676, 30062, 7001)
|
|
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
|
|
Ingress: []v1.LoadBalancerIngress{
|
|
{IP: "1.2.3.4"},
|
|
},
|
|
}
|
|
}),
|
|
makeTestService("somewhere", "only-local-load-balancer", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
|
svc.Spec.ClusterIP = "172.30.55.12"
|
|
svc.Spec.LoadBalancerIP = "5.6.7.8"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar2", "UDP", 8677, 30063, 7002)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8678, 30064, 7003)
|
|
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
|
|
Ingress: []v1.LoadBalancerIngress{
|
|
{IP: "5.6.7.8"},
|
|
},
|
|
}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
svc.Spec.HealthCheckNodePort = 345
|
|
}),
|
|
}
|
|
|
|
for i := range services {
|
|
fp.OnServiceAdd(services[i])
|
|
}
|
|
result := fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 10 {
|
|
t.Errorf("expected service map length 10, got %v", fp.serviceMap)
|
|
}
|
|
|
|
// The only-local-loadbalancer ones get added
|
|
if len(result.HCServiceNodePorts) != 1 {
|
|
t.Errorf("expected 1 healthcheck port, got %v", result.HCServiceNodePorts)
|
|
} else {
|
|
nsn := makeNSN("somewhere", "only-local-load-balancer")
|
|
if port, found := result.HCServiceNodePorts[nsn]; !found || port != 345 {
|
|
t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, result.HCServiceNodePorts)
|
|
}
|
|
}
|
|
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
// Services only added, so nothing stale yet
|
|
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
|
|
}
|
|
|
|
// Remove some stuff
|
|
// oneService is a modification of services[0] with removed first port.
|
|
oneService := makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = "172.30.55.4"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
|
|
})
|
|
|
|
fp.OnServiceUpdate(services[0], oneService)
|
|
fp.OnServiceDelete(services[1])
|
|
fp.OnServiceDelete(services[2])
|
|
fp.OnServiceDelete(services[3])
|
|
|
|
result = fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 1 {
|
|
t.Errorf("expected service map length 1, got %v", fp.serviceMap)
|
|
}
|
|
|
|
if len(result.HCServiceNodePorts) != 0 {
|
|
t.Errorf("expected 0 healthcheck ports, got %v", result.HCServiceNodePorts)
|
|
}
|
|
|
|
// All services but one were deleted. While you'd expect only the ClusterIPs
|
|
// from the three deleted services here, we still have the ClusterIP for
|
|
// the not-deleted service, because one of it's ServicePorts was deleted.
|
|
expectedStaleUDPServices := []string{"172.30.55.10", "172.30.55.4", "172.30.55.11", "172.30.55.12"}
|
|
if len(result.UDPStaleClusterIP) != len(expectedStaleUDPServices) {
|
|
t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.UDPStaleClusterIP.UnsortedList())
|
|
}
|
|
for _, ip := range expectedStaleUDPServices {
|
|
if !result.UDPStaleClusterIP.Has(ip) {
|
|
t.Errorf("expected stale UDP service service %s", ip)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestBuildServiceMapServiceHeadless(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService("somewhere-else", "headless", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = v1.ClusterIPNone
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "rpc", "UDP", 1234, 0, 0)
|
|
}),
|
|
makeTestService("somewhere-else", "headless-without-port", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = v1.ClusterIPNone
|
|
}),
|
|
)
|
|
|
|
// Headless service should be ignored
|
|
result := fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 0 {
|
|
t.Errorf("expected service map length 0, got %d", len(fp.serviceMap))
|
|
}
|
|
|
|
// No proxied services, so no healthchecks
|
|
if len(result.HCServiceNodePorts) != 0 {
|
|
t.Errorf("expected healthcheck ports length 0, got %d", len(result.HCServiceNodePorts))
|
|
}
|
|
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
|
|
}
|
|
}
|
|
|
|
func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService("somewhere-else", "external-name", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeExternalName
|
|
svc.Spec.ClusterIP = "172.30.55.4" // Should be ignored
|
|
svc.Spec.ExternalName = "foo2.bar.com"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blah", "UDP", 1235, 5321, 0)
|
|
}),
|
|
)
|
|
|
|
result := fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 0 {
|
|
t.Errorf("expected service map length 0, got %v", fp.serviceMap)
|
|
}
|
|
// No proxied services, so no healthchecks
|
|
if len(result.HCServiceNodePorts) != 0 {
|
|
t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts)
|
|
}
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP)
|
|
}
|
|
}
|
|
|
|
func TestBuildServiceMapServiceUpdate(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
|
|
servicev1 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = "172.30.55.4"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 0)
|
|
})
|
|
servicev2 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
|
svc.Spec.ClusterIP = "172.30.55.4"
|
|
svc.Spec.LoadBalancerIP = "1.2.3.4"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 7002)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 7003)
|
|
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
|
|
Ingress: []v1.LoadBalancerIngress{
|
|
{IP: "1.2.3.4"},
|
|
},
|
|
}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
svc.Spec.HealthCheckNodePort = 345
|
|
})
|
|
|
|
fp.OnServiceAdd(servicev1)
|
|
|
|
result := fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 2 {
|
|
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
|
|
}
|
|
if len(result.HCServiceNodePorts) != 0 {
|
|
t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts)
|
|
}
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
// Services only added, so nothing stale yet
|
|
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
|
|
}
|
|
|
|
// Change service to load-balancer
|
|
fp.OnServiceUpdate(servicev1, servicev2)
|
|
result = fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 2 {
|
|
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
|
|
}
|
|
if len(result.HCServiceNodePorts) != 1 {
|
|
t.Errorf("expected healthcheck ports length 1, got %v", result.HCServiceNodePorts)
|
|
}
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP.UnsortedList())
|
|
}
|
|
|
|
// No change; make sure the service map stays the same and there are
|
|
// no health-check changes
|
|
fp.OnServiceUpdate(servicev2, servicev2)
|
|
result = fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 2 {
|
|
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
|
|
}
|
|
if len(result.HCServiceNodePorts) != 1 {
|
|
t.Errorf("expected healthcheck ports length 1, got %v", result.HCServiceNodePorts)
|
|
}
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP.UnsortedList())
|
|
}
|
|
|
|
// And back to ClusterIP
|
|
fp.OnServiceUpdate(servicev2, servicev1)
|
|
result = fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 2 {
|
|
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
|
|
}
|
|
if len(result.HCServiceNodePorts) != 0 {
|
|
t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts)
|
|
}
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
// Services only added, so nothing stale yet
|
|
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
|
|
}
|
|
}
|
|
|
|
func populateEndpointSlices(proxier *Proxier, allEndpointSlices ...*discovery.EndpointSlice) {
|
|
for i := range allEndpointSlices {
|
|
proxier.OnEndpointSliceAdd(allEndpointSlices[i])
|
|
}
|
|
}
|
|
|
|
func makeTestEndpointSlice(namespace, name string, sliceNum int, epsFunc func(*discovery.EndpointSlice)) *discovery.EndpointSlice {
|
|
eps := &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-%d", name, sliceNum),
|
|
Namespace: namespace,
|
|
Labels: map[string]string{discovery.LabelServiceName: name},
|
|
},
|
|
}
|
|
epsFunc(eps)
|
|
return eps
|
|
}
|
|
|
|
func makeNSN(namespace, name string) types.NamespacedName {
|
|
return types.NamespacedName{Namespace: namespace, Name: name}
|
|
}
|
|
|
|
func makeServicePortName(ns, name, port string, protocol v1.Protocol) proxy.ServicePortName {
|
|
return proxy.ServicePortName{
|
|
NamespacedName: makeNSN(ns, name),
|
|
Port: port,
|
|
Protocol: protocol,
|
|
}
|
|
}
|
|
|
|
func makeServiceMap(proxier *Proxier, allServices ...*v1.Service) {
|
|
for i := range allServices {
|
|
proxier.OnServiceAdd(allServices[i])
|
|
}
|
|
|
|
proxier.mu.Lock()
|
|
defer proxier.mu.Unlock()
|
|
proxier.servicesSynced = true
|
|
}
|
|
|
|
func compareEndpointsMaps(t *testing.T, tci int, newMap proxy.EndpointsMap, expected map[proxy.ServicePortName][]*endpointsInfo) {
|
|
if len(newMap) != len(expected) {
|
|
t.Errorf("[%d] expected %d results, got %d: %v", tci, len(expected), len(newMap), newMap)
|
|
}
|
|
for x := range expected {
|
|
if len(newMap[x]) != len(expected[x]) {
|
|
t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(expected[x]), x, len(newMap[x]))
|
|
} else {
|
|
for i := range expected[x] {
|
|
newEp, ok := newMap[x][i].(*endpointsInfo)
|
|
if !ok {
|
|
t.Errorf("Failed to cast endpointsInfo")
|
|
continue
|
|
}
|
|
if newEp.Endpoint != expected[x][i].Endpoint ||
|
|
newEp.IsLocal != expected[x][i].IsLocal ||
|
|
newEp.protocol != expected[x][i].protocol ||
|
|
newEp.chainName != expected[x][i].chainName {
|
|
t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, expected[x][i], newEp)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
func Test_updateEndpointsMap(t *testing.T) {
|
|
var nodeName = testHostname
|
|
udpProtocol := v1.ProtocolUDP
|
|
|
|
emptyEndpointSlices := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, func(*discovery.EndpointSlice) {}),
|
|
}
|
|
subset1 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.1"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
subset2 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.2"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p12"),
|
|
Port: utilpointer.Int32(12),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
namedPortLocal := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1,
|
|
func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.1"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}),
|
|
}
|
|
namedPort := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, subset1),
|
|
}
|
|
namedPortRenamed := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1,
|
|
func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.1"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11-2"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}),
|
|
}
|
|
namedPortRenumbered := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1,
|
|
func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.1"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(22),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}),
|
|
}
|
|
namedPortsLocalNoLocal := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1,
|
|
func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.1"},
|
|
}, {
|
|
Addresses: []string{"10.1.1.2"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}, {
|
|
Name: utilpointer.String("p12"),
|
|
Port: utilpointer.Int32(12),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}),
|
|
}
|
|
multipleSubsets := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, subset1),
|
|
makeTestEndpointSlice("ns1", "ep1", 2, subset2),
|
|
}
|
|
subsetLocal := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.2"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p12"),
|
|
Port: utilpointer.Int32(12),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
multipleSubsetsWithLocal := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, subset1),
|
|
makeTestEndpointSlice("ns1", "ep1", 2, subsetLocal),
|
|
}
|
|
subsetMultiplePortsLocal := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.1"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}, {
|
|
Name: utilpointer.String("p12"),
|
|
Port: utilpointer.Int32(12),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
subset3 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.3"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p13"),
|
|
Port: utilpointer.Int32(13),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
multipleSubsetsMultiplePortsLocal := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, subsetMultiplePortsLocal),
|
|
makeTestEndpointSlice("ns1", "ep1", 2, subset3),
|
|
}
|
|
subsetMultipleIPsPorts1 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.1"},
|
|
}, {
|
|
Addresses: []string{"10.1.1.2"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}, {
|
|
Name: utilpointer.String("p12"),
|
|
Port: utilpointer.Int32(12),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
subsetMultipleIPsPorts2 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.3"},
|
|
}, {
|
|
Addresses: []string{"10.1.1.4"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p13"),
|
|
Port: utilpointer.Int32(13),
|
|
Protocol: &udpProtocol,
|
|
}, {
|
|
Name: utilpointer.String("p14"),
|
|
Port: utilpointer.Int32(14),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
subsetMultipleIPsPorts3 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.2.2.1"},
|
|
}, {
|
|
Addresses: []string{"10.2.2.2"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p21"),
|
|
Port: utilpointer.Int32(21),
|
|
Protocol: &udpProtocol,
|
|
}, {
|
|
Name: utilpointer.String("p22"),
|
|
Port: utilpointer.Int32(22),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
multipleSubsetsIPsPorts := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, subsetMultipleIPsPorts1),
|
|
makeTestEndpointSlice("ns1", "ep1", 2, subsetMultipleIPsPorts2),
|
|
makeTestEndpointSlice("ns2", "ep2", 1, subsetMultipleIPsPorts3),
|
|
}
|
|
complexSubset1 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.2.2.2"},
|
|
NodeName: &nodeName,
|
|
}, {
|
|
Addresses: []string{"10.2.2.22"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p22"),
|
|
Port: utilpointer.Int32(22),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset2 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.2.2.3"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p23"),
|
|
Port: utilpointer.Int32(23),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset3 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.4.4.4"},
|
|
NodeName: &nodeName,
|
|
}, {
|
|
Addresses: []string{"10.4.4.5"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p44"),
|
|
Port: utilpointer.Int32(44),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset4 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.4.4.6"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p45"),
|
|
Port: utilpointer.Int32(45),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset5 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.1"},
|
|
}, {
|
|
Addresses: []string{"10.1.1.11"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset6 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.2"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p12"),
|
|
Port: utilpointer.Int32(12),
|
|
Protocol: &udpProtocol,
|
|
}, {
|
|
Name: utilpointer.String("p122"),
|
|
Port: utilpointer.Int32(122),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset7 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.3.3.3"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p33"),
|
|
Port: utilpointer.Int32(33),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset8 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.4.4.4"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p44"),
|
|
Port: utilpointer.Int32(44),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexBefore := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, subset1),
|
|
nil,
|
|
makeTestEndpointSlice("ns2", "ep2", 1, complexSubset1),
|
|
makeTestEndpointSlice("ns2", "ep2", 2, complexSubset2),
|
|
nil,
|
|
makeTestEndpointSlice("ns4", "ep4", 1, complexSubset3),
|
|
makeTestEndpointSlice("ns4", "ep4", 2, complexSubset4),
|
|
}
|
|
complexAfter := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, complexSubset5),
|
|
makeTestEndpointSlice("ns1", "ep1", 2, complexSubset6),
|
|
nil,
|
|
nil,
|
|
makeTestEndpointSlice("ns3", "ep3", 1, complexSubset7),
|
|
makeTestEndpointSlice("ns4", "ep4", 1, complexSubset8),
|
|
nil,
|
|
}
|
|
|
|
testCases := []struct {
|
|
// previousEndpoints and currentEndpoints are used to call appropriate
|
|
// handlers OnEndpoints* (based on whether corresponding values are nil
|
|
// or non-nil) and must be of equal length.
|
|
previousEndpoints []*discovery.EndpointSlice
|
|
currentEndpoints []*discovery.EndpointSlice
|
|
oldEndpoints map[proxy.ServicePortName][]*endpointsInfo
|
|
expectedResult map[proxy.ServicePortName][]*endpointsInfo
|
|
expectedStaleEndpoints []proxy.ServiceEndpoint
|
|
expectedStaleServiceNames map[proxy.ServicePortName]bool
|
|
expectedHealthchecks map[types.NamespacedName]int
|
|
}{{
|
|
// Case[0]: nothing
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[1]: no change, named port, local
|
|
previousEndpoints: namedPortLocal,
|
|
currentEndpoints: namedPortLocal,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns1", "ep1"): 1,
|
|
},
|
|
}, {
|
|
// Case[2]: no change, multiple subsets
|
|
previousEndpoints: multipleSubsets,
|
|
currentEndpoints: multipleSubsets,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[3]: no change, multiple subsets, multiple ports, local
|
|
previousEndpoints: multipleSubsetsMultiplePortsLocal,
|
|
currentEndpoints: multipleSubsetsMultiplePortsLocal,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns1", "ep1"): 1,
|
|
},
|
|
}, {
|
|
// Case[4]: no change, multiple endpoints, subsets, IPs, and ports
|
|
previousEndpoints: multipleSubsetsIPsPorts,
|
|
currentEndpoints: multipleSubsetsIPsPorts,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.4:13", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.3:14", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.4:14", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.1:21", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.2:21", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.4:13", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.3:14", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.4:14", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.1:21", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.2:21", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns1", "ep1"): 2,
|
|
makeNSN("ns2", "ep2"): 1,
|
|
},
|
|
}, {
|
|
// Case[5]: add an Endpoints
|
|
previousEndpoints: []*discovery.EndpointSlice{nil},
|
|
currentEndpoints: namedPortLocal,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): true,
|
|
},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns1", "ep1"): 1,
|
|
},
|
|
}, {
|
|
// Case[6]: remove an Endpoints
|
|
previousEndpoints: namedPortLocal,
|
|
currentEndpoints: []*discovery.EndpointSlice{nil},
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
|
|
Endpoint: "10.1.1.1:11",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
|
|
}},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[7]: add an IP and port
|
|
previousEndpoints: namedPort,
|
|
currentEndpoints: namedPortsLocalNoLocal,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
|
|
},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns1", "ep1"): 1,
|
|
},
|
|
}, {
|
|
// Case[8]: remove an IP and port
|
|
previousEndpoints: namedPortsLocalNoLocal,
|
|
currentEndpoints: namedPort,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
|
|
Endpoint: "10.1.1.2:11",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
|
|
}, {
|
|
Endpoint: "10.1.1.1:12",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
|
|
}, {
|
|
Endpoint: "10.1.1.2:12",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
|
|
}},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[9]: add a subset
|
|
previousEndpoints: []*discovery.EndpointSlice{namedPort[0], nil},
|
|
currentEndpoints: multipleSubsetsWithLocal,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
|
|
},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns1", "ep1"): 1,
|
|
},
|
|
}, {
|
|
// Case[10]: remove a subset
|
|
previousEndpoints: multipleSubsets,
|
|
currentEndpoints: []*discovery.EndpointSlice{namedPort[0], nil},
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
|
|
Endpoint: "10.1.1.2:12",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
|
|
}},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[11]: rename a port
|
|
previousEndpoints: namedPort,
|
|
currentEndpoints: namedPortRenamed,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
|
|
Endpoint: "10.1.1.1:11",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
|
|
}},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
|
|
makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): true,
|
|
},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[12]: renumber a port
|
|
previousEndpoints: namedPort,
|
|
currentEndpoints: namedPortRenumbered,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
|
|
Endpoint: "10.1.1.1:11",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
|
|
}},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[13]: complex add and remove
|
|
previousEndpoints: complexBefore,
|
|
currentEndpoints: complexAfter,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.22:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.3:23", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.4.4.4:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.4.4.5:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.4.4.6:45", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.11:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:122", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.3.3.3:33", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.4.4.4:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
|
|
Endpoint: "10.2.2.2:22",
|
|
ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
|
|
}, {
|
|
Endpoint: "10.2.2.22:22",
|
|
ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
|
|
}, {
|
|
Endpoint: "10.2.2.3:23",
|
|
ServicePortName: makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP),
|
|
}, {
|
|
Endpoint: "10.4.4.5:44",
|
|
ServicePortName: makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP),
|
|
}, {
|
|
Endpoint: "10.4.4.6:45",
|
|
ServicePortName: makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP),
|
|
}},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
|
|
makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): true,
|
|
makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): true,
|
|
},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns4", "ep4"): 1,
|
|
},
|
|
}, {
|
|
// Case[14]: change from 0 endpoint address to 1 unnamed port
|
|
previousEndpoints: emptyEndpointSlices,
|
|
currentEndpoints: namedPort,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): true,
|
|
},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
},
|
|
}
|
|
|
|
for tci, tc := range testCases {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.hostname = nodeName
|
|
|
|
// First check that after adding all previous versions of endpoints,
|
|
// the fp.oldEndpoints is as we expect.
|
|
for i := range tc.previousEndpoints {
|
|
if tc.previousEndpoints[i] != nil {
|
|
fp.OnEndpointSliceAdd(tc.previousEndpoints[i])
|
|
}
|
|
}
|
|
fp.endpointsMap.Update(fp.endpointsChanges)
|
|
compareEndpointsMaps(t, tci, fp.endpointsMap, tc.oldEndpoints)
|
|
|
|
// Now let's call appropriate handlers to get to state we want to be.
|
|
if len(tc.previousEndpoints) != len(tc.currentEndpoints) {
|
|
t.Fatalf("[%d] different lengths of previous and current endpoints", tci)
|
|
continue
|
|
}
|
|
|
|
for i := range tc.previousEndpoints {
|
|
prev, curr := tc.previousEndpoints[i], tc.currentEndpoints[i]
|
|
switch {
|
|
case prev == nil:
|
|
fp.OnEndpointSliceAdd(curr)
|
|
case curr == nil:
|
|
fp.OnEndpointSliceDelete(prev)
|
|
default:
|
|
fp.OnEndpointSliceUpdate(prev, curr)
|
|
}
|
|
}
|
|
result := fp.endpointsMap.Update(fp.endpointsChanges)
|
|
newMap := fp.endpointsMap
|
|
compareEndpointsMaps(t, tci, newMap, tc.expectedResult)
|
|
if len(result.StaleEndpoints) != len(tc.expectedStaleEndpoints) {
|
|
t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedStaleEndpoints), len(result.StaleEndpoints), result.StaleEndpoints)
|
|
}
|
|
for _, x := range tc.expectedStaleEndpoints {
|
|
found := false
|
|
for _, stale := range result.StaleEndpoints {
|
|
if stale == x {
|
|
found = true
|
|
break
|
|
}
|
|
}
|
|
if !found {
|
|
t.Errorf("[%d] expected staleEndpoints[%v], but didn't find it: %v", tci, x, result.StaleEndpoints)
|
|
}
|
|
}
|
|
if len(result.StaleServiceNames) != len(tc.expectedStaleServiceNames) {
|
|
t.Errorf("[%d] expected %d staleServiceNames, got %d: %v", tci, len(tc.expectedStaleServiceNames), len(result.StaleServiceNames), result.StaleServiceNames)
|
|
}
|
|
for svcName := range tc.expectedStaleServiceNames {
|
|
found := false
|
|
for _, stale := range result.StaleServiceNames {
|
|
if stale == svcName {
|
|
found = true
|
|
}
|
|
}
|
|
if !found {
|
|
t.Errorf("[%d] expected staleServiceNames[%v], but didn't find it: %v", tci, svcName, result.StaleServiceNames)
|
|
}
|
|
}
|
|
if !reflect.DeepEqual(result.HCEndpointsLocalIPSize, tc.expectedHealthchecks) {
|
|
t.Errorf("[%d] expected healthchecks %v, got %v", tci, tc.expectedHealthchecks, result.HCEndpointsLocalIPSize)
|
|
}
|
|
}
|
|
}
|
|
|
|
// The majority of EndpointSlice specific tests are not iptables specific and focus on
|
|
// the shared EndpointChangeTracker and EndpointSliceCache. This test ensures that the
|
|
// iptables proxier supports translating EndpointSlices to iptables output.
|
|
func TestEndpointSliceE2E(t *testing.T) {
|
|
expectedIPTablesWithSlice := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 0 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 0 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-XGJFVO3L2O5SRFNT
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.OnServiceSynced()
|
|
fp.OnEndpointSlicesSynced()
|
|
|
|
serviceName := "svc1"
|
|
namespaceName := "ns1"
|
|
|
|
fp.OnServiceAdd(&v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
|
|
Spec: v1.ServiceSpec{
|
|
ClusterIP: "172.30.1.1",
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Ports: []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt(80), Protocol: v1.ProtocolTCP}},
|
|
},
|
|
})
|
|
|
|
tcpProtocol := v1.ProtocolTCP
|
|
endpointSlice := &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", serviceName),
|
|
Namespace: namespaceName,
|
|
Labels: map[string]string{discovery.LabelServiceName: serviceName},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, {
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr("node2"),
|
|
}, {
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr("node3"),
|
|
}, {
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(false)},
|
|
NodeName: utilpointer.StringPtr("node4"),
|
|
}},
|
|
}
|
|
|
|
fp.OnEndpointSliceAdd(endpointSlice)
|
|
fp.syncProxyRules()
|
|
assertIPTablesRulesEqual(t, expectedIPTablesWithSlice, fp.iptablesData.String())
|
|
|
|
fp.OnEndpointSliceDelete(endpointSlice)
|
|
fp.syncProxyRules()
|
|
assertIPTablesRulesNotEqual(t, expectedIPTablesWithSlice, fp.iptablesData.String())
|
|
}
|
|
|
|
func TestHealthCheckNodePortE2E(t *testing.T) {
|
|
expectedIPTables := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns1/svc1 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-XLB-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 0 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 0 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-NODEPORTS -m comment --comment ns1/svc1 -m tcp -p tcp --dport 30010 -s 127.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns1/svc1 -m tcp -p tcp --dport 30010 -j KUBE-XLB-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-XGJFVO3L2O5SRFNT
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.OnServiceSynced()
|
|
fp.OnEndpointSlicesSynced()
|
|
|
|
serviceName := "svc1"
|
|
namespaceName := "ns1"
|
|
|
|
svc := &v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
|
|
Spec: v1.ServiceSpec{
|
|
ClusterIP: "172.30.1.1",
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Ports: []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt(80), NodePort: 30010, Protocol: v1.ProtocolTCP}},
|
|
Type: "LoadBalancer",
|
|
HealthCheckNodePort: 30000,
|
|
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeLocal,
|
|
},
|
|
}
|
|
fp.OnServiceAdd(svc)
|
|
|
|
tcpProtocol := v1.ProtocolTCP
|
|
endpointSlice := &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", serviceName),
|
|
Namespace: namespaceName,
|
|
Labels: map[string]string{discovery.LabelServiceName: serviceName},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, {
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr("node2"),
|
|
}, {
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr("node3"),
|
|
}, {
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(false)},
|
|
NodeName: utilpointer.StringPtr("node4"),
|
|
}},
|
|
}
|
|
fp.OnEndpointSliceAdd(endpointSlice)
|
|
fp.syncProxyRules()
|
|
assertIPTablesRulesEqual(t, expectedIPTables, fp.iptablesData.String())
|
|
|
|
fp.OnServiceDelete(svc)
|
|
fp.syncProxyRules()
|
|
assertIPTablesRulesNotEqual(t, expectedIPTables, fp.iptablesData.String())
|
|
}
|
|
|
|
// Test_HealthCheckNodePortWhenTerminating tests that health check node ports are not enabled when all local endpoints are terminating
|
|
func Test_HealthCheckNodePortWhenTerminating(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.OnServiceSynced()
|
|
fp.OnEndpointSlicesSynced()
|
|
|
|
serviceName := "svc1"
|
|
namespaceName := "ns1"
|
|
|
|
fp.OnServiceAdd(&v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
|
|
Spec: v1.ServiceSpec{
|
|
ClusterIP: "172.30.1.1",
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Ports: []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt(80), Protocol: v1.ProtocolTCP}},
|
|
},
|
|
})
|
|
|
|
tcpProtocol := v1.ProtocolTCP
|
|
endpointSlice := &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", serviceName),
|
|
Namespace: namespaceName,
|
|
Labels: map[string]string{discovery.LabelServiceName: serviceName},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, {
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, {
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, { // not ready endpoints should be ignored
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(false)},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}},
|
|
}
|
|
|
|
fp.OnEndpointSliceAdd(endpointSlice)
|
|
result := fp.endpointsMap.Update(fp.endpointsChanges)
|
|
if len(result.HCEndpointsLocalIPSize) != 1 {
|
|
t.Errorf("unexpected number of health check node ports, expected 1 but got: %d", len(result.HCEndpointsLocalIPSize))
|
|
}
|
|
|
|
// set all endpoints to terminating
|
|
endpointSliceTerminating := &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", serviceName),
|
|
Namespace: namespaceName,
|
|
Labels: map[string]string{discovery.LabelServiceName: serviceName},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, {
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, {
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, { // not ready endpoints should be ignored
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}},
|
|
}
|
|
|
|
fp.OnEndpointSliceUpdate(endpointSlice, endpointSliceTerminating)
|
|
result = fp.endpointsMap.Update(fp.endpointsChanges)
|
|
if len(result.HCEndpointsLocalIPSize) != 0 {
|
|
t.Errorf("unexpected number of health check node ports, expected 0 but got: %d", len(result.HCEndpointsLocalIPSize))
|
|
}
|
|
}
|
|
|
|
func TestProxierDeleteNodePortStaleUDP(t *testing.T) {
|
|
fcmd := fakeexec.FakeCmd{}
|
|
fexec := fakeexec.FakeExec{
|
|
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
|
|
}
|
|
execFunc := func(cmd string, args ...string) exec.Cmd {
|
|
return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
|
|
}
|
|
cmdOutput := "1 flow entries have been deleted"
|
|
cmdFunc := func() ([]byte, []byte, error) { return []byte(cmdOutput), nil, nil }
|
|
|
|
// Delete ClusterIP entries
|
|
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
|
|
fexec.CommandScript = append(fexec.CommandScript, execFunc)
|
|
// Delete ExternalIP entries
|
|
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
|
|
fexec.CommandScript = append(fexec.CommandScript, execFunc)
|
|
// Delete LoadBalancerIP entries
|
|
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
|
|
fexec.CommandScript = append(fexec.CommandScript, execFunc)
|
|
// Delete NodePort entries
|
|
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
|
|
fexec.CommandScript = append(fexec.CommandScript, execFunc)
|
|
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.exec = &fexec
|
|
|
|
svcIP := "172.30.0.41"
|
|
extIP := "192.168.99.11"
|
|
lbIngressIP := "1.2.3.4"
|
|
svcPort := 80
|
|
nodePort := 31201
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolUDP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.ExternalIPs = []string{extIP}
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolUDP,
|
|
NodePort: int32(nodePort),
|
|
}}
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: lbIngressIP,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
if fexec.CommandCalls != 0 {
|
|
t.Fatalf("Created service without endpoints must not clear conntrack entries")
|
|
}
|
|
|
|
epIP := "10.180.0.1"
|
|
udpProtocol := v1.ProtocolUDP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.Bool(false),
|
|
},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
if fexec.CommandCalls != 0 {
|
|
t.Fatalf("Updated UDP service with not ready endpoints must not clear UDP entries")
|
|
}
|
|
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.Bool(true),
|
|
},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
if fexec.CommandCalls != 4 {
|
|
t.Fatalf("Updated UDP service with new endpoints must clear UDP entries 4 times: ClusterIP, NodePort, ExternalIP and LB")
|
|
}
|
|
|
|
// the order is not guaranteed so we have to compare the strings in any order
|
|
expectedCommands := []string{
|
|
// Delete ClusterIP Conntrack entries
|
|
fmt.Sprintf("conntrack -D --orig-dst %s -p %s", svcIP, strings.ToLower(string((v1.ProtocolUDP)))),
|
|
// Delete ExternalIP Conntrack entries
|
|
fmt.Sprintf("conntrack -D --orig-dst %s -p %s", extIP, strings.ToLower(string((v1.ProtocolUDP)))),
|
|
// Delete LoadBalancerIP Conntrack entries
|
|
fmt.Sprintf("conntrack -D --orig-dst %s -p %s", lbIngressIP, strings.ToLower(string((v1.ProtocolUDP)))),
|
|
// Delete NodePort Conntrack entrie
|
|
fmt.Sprintf("conntrack -D -p %s --dport %d", strings.ToLower(string((v1.ProtocolUDP))), nodePort),
|
|
}
|
|
actualCommands := []string{
|
|
strings.Join(fcmd.CombinedOutputLog[0], " "),
|
|
strings.Join(fcmd.CombinedOutputLog[1], " "),
|
|
strings.Join(fcmd.CombinedOutputLog[2], " "),
|
|
strings.Join(fcmd.CombinedOutputLog[3], " "),
|
|
}
|
|
sort.Strings(expectedCommands)
|
|
sort.Strings(actualCommands)
|
|
|
|
if !reflect.DeepEqual(expectedCommands, actualCommands) {
|
|
t.Errorf("Expected commands: %v, but executed %v", expectedCommands, actualCommands)
|
|
}
|
|
}
|
|
|
|
func TestProxierMetricsIptablesTotalRules(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
tcpProtocol := v1.ProtocolTCP
|
|
|
|
metrics.RegisterMetrics()
|
|
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
nodePort := 31201
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(nodePort),
|
|
}}
|
|
}),
|
|
)
|
|
fp.syncProxyRules()
|
|
iptablesData := fp.iptablesData.String()
|
|
|
|
nFilterRulesMetric, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableFilter)))
|
|
if err != nil {
|
|
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
|
|
}
|
|
nFilterRules := int(nFilterRulesMetric)
|
|
expectedFilterRules := countRules("filter", iptablesData)
|
|
|
|
if nFilterRules != expectedFilterRules {
|
|
t.Fatalf("Wrong number of filter rule: expected %d got %d\n%s", expectedFilterRules, nFilterRules, iptablesData)
|
|
}
|
|
|
|
nNatRulesMetric, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)))
|
|
if err != nil {
|
|
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
|
|
}
|
|
nNatRules := int(nNatRulesMetric)
|
|
expectedNatRules := countRules("nat", iptablesData)
|
|
|
|
if nNatRules != expectedNatRules {
|
|
t.Fatalf("Wrong number of nat rules: expected %d got %d\n%s", expectedNatRules, nNatRules, iptablesData)
|
|
}
|
|
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.0.0.2"},
|
|
}, {
|
|
Addresses: []string{"10.0.0.5"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
iptablesData = fp.iptablesData.String()
|
|
|
|
nFilterRulesMetric, err = testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableFilter)))
|
|
if err != nil {
|
|
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
|
|
}
|
|
nFilterRules = int(nFilterRulesMetric)
|
|
expectedFilterRules = countRules("filter", iptablesData)
|
|
|
|
if nFilterRules != expectedFilterRules {
|
|
t.Fatalf("Wrong number of filter rule: expected %d got %d\n%s", expectedFilterRules, nFilterRules, iptablesData)
|
|
}
|
|
|
|
nNatRulesMetric, err = testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)))
|
|
if err != nil {
|
|
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
|
|
}
|
|
nNatRules = int(nNatRulesMetric)
|
|
expectedNatRules = countRules("nat", iptablesData)
|
|
|
|
if nNatRules != expectedNatRules {
|
|
t.Fatalf("Wrong number of nat rules: expected %d got %d\n%s", expectedNatRules, nNatRules, iptablesData)
|
|
}
|
|
}
|
|
|
|
// TODO(thockin): add *more* tests for syncProxyRules() or break it down further and test the pieces.
|
|
|
|
// This test ensures that the iptables proxier supports translating Endpoints to
|
|
// iptables output when internalTrafficPolicy is specified
|
|
func TestInternalTrafficPolicyE2E(t *testing.T) {
|
|
type endpoint struct {
|
|
ip string
|
|
hostname string
|
|
}
|
|
|
|
cluster := v1.ServiceInternalTrafficPolicyCluster
|
|
local := v1.ServiceInternalTrafficPolicyLocal
|
|
|
|
clusterExpectedIPTables := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-XGJFVO3L2O5SRFNT
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
|
|
testCases := []struct {
|
|
name string
|
|
internalTrafficPolicy *v1.ServiceInternalTrafficPolicyType
|
|
featureGateOn bool
|
|
endpoints []endpoint
|
|
expectEndpointRule bool
|
|
expectedIPTablesWithSlice string
|
|
}{
|
|
{
|
|
name: "internalTrafficPolicy is cluster",
|
|
internalTrafficPolicy: &cluster,
|
|
featureGateOn: true,
|
|
endpoints: []endpoint{
|
|
{"10.0.1.1", testHostname},
|
|
{"10.0.1.2", "host1"},
|
|
{"10.0.1.3", "host2"},
|
|
},
|
|
expectEndpointRule: true,
|
|
expectedIPTablesWithSlice: clusterExpectedIPTables,
|
|
},
|
|
{
|
|
name: "internalTrafficPolicy is local and there is non-zero local endpoints",
|
|
internalTrafficPolicy: &local,
|
|
featureGateOn: true,
|
|
endpoints: []endpoint{
|
|
{"10.0.1.1", testHostname},
|
|
{"10.0.1.2", "host1"},
|
|
{"10.0.1.3", "host2"},
|
|
},
|
|
expectEndpointRule: true,
|
|
expectedIPTablesWithSlice: `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`,
|
|
},
|
|
{
|
|
name: "internalTrafficPolicy is local and there is zero local endpoint",
|
|
internalTrafficPolicy: &local,
|
|
featureGateOn: true,
|
|
endpoints: []endpoint{
|
|
{"10.0.1.1", "host0"},
|
|
{"10.0.1.2", "host1"},
|
|
{"10.0.1.3", "host2"},
|
|
},
|
|
expectEndpointRule: false,
|
|
expectedIPTablesWithSlice: `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 has no endpoints" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j REJECT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`,
|
|
},
|
|
{
|
|
name: "internalTrafficPolicy is local and there is non-zero local endpoint with feature gate off",
|
|
internalTrafficPolicy: &local,
|
|
featureGateOn: false,
|
|
endpoints: []endpoint{
|
|
{"10.0.1.1", testHostname},
|
|
{"10.0.1.2", "host1"},
|
|
{"10.0.1.3", "host2"},
|
|
},
|
|
expectEndpointRule: false,
|
|
expectedIPTablesWithSlice: clusterExpectedIPTables,
|
|
},
|
|
}
|
|
|
|
for _, tc := range testCases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ServiceInternalTrafficPolicy, tc.featureGateOn)()
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.OnServiceSynced()
|
|
fp.OnEndpointSlicesSynced()
|
|
|
|
serviceName := "svc1"
|
|
namespaceName := "ns1"
|
|
|
|
svc := &v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
|
|
Spec: v1.ServiceSpec{
|
|
ClusterIP: "172.30.1.1",
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Ports: []v1.ServicePort{{Name: "", Port: 80, Protocol: v1.ProtocolTCP}},
|
|
},
|
|
}
|
|
if tc.internalTrafficPolicy != nil {
|
|
svc.Spec.InternalTrafficPolicy = tc.internalTrafficPolicy
|
|
}
|
|
|
|
fp.OnServiceAdd(svc)
|
|
|
|
tcpProtocol := v1.ProtocolTCP
|
|
endpointSlice := &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", serviceName),
|
|
Namespace: namespaceName,
|
|
Labels: map[string]string{discovery.LabelServiceName: serviceName},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
}
|
|
for _, ep := range tc.endpoints {
|
|
endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{
|
|
Addresses: []string{ep.ip},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr(ep.hostname),
|
|
})
|
|
}
|
|
|
|
fp.OnEndpointSliceAdd(endpointSlice)
|
|
fp.syncProxyRules()
|
|
assertIPTablesRulesEqual(t, tc.expectedIPTablesWithSlice, fp.iptablesData.String())
|
|
|
|
if tc.expectEndpointRule {
|
|
fp.OnEndpointSliceDelete(endpointSlice)
|
|
fp.syncProxyRules()
|
|
assertIPTablesRulesNotEqual(t, tc.expectedIPTablesWithSlice, fp.iptablesData.String())
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
// Test_EndpointSliceWithTerminatingEndpoints tests that when there are local ready and ready + terminating
|
|
// endpoints, only the ready endpoints are used.
|
|
func Test_EndpointSliceWithTerminatingEndpoints(t *testing.T) {
|
|
tcpProtocol := v1.ProtocolTCP
|
|
timeout := v1.DefaultClientIPServiceAffinitySeconds
|
|
service := &v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"},
|
|
Spec: v1.ServiceSpec{
|
|
ClusterIP: "172.30.1.1",
|
|
Type: v1.ServiceTypeLoadBalancer,
|
|
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeLocal,
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Ports: []v1.ServicePort{
|
|
{
|
|
Name: "",
|
|
TargetPort: intstr.FromInt(80),
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
},
|
|
},
|
|
SessionAffinity: v1.ServiceAffinityClientIP,
|
|
SessionAffinityConfig: &v1.SessionAffinityConfig{
|
|
ClientIP: &v1.ClientIPConfig{
|
|
TimeoutSeconds: &timeout,
|
|
},
|
|
},
|
|
},
|
|
Status: v1.ServiceStatus{
|
|
LoadBalancer: v1.LoadBalancerStatus{
|
|
Ingress: []v1.LoadBalancerIngress{
|
|
{IP: "1.2.3.4"},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
testcases := []struct {
|
|
name string
|
|
terminatingFeatureGate bool
|
|
endpointslice *discovery.EndpointSlice
|
|
expectedIPTables string
|
|
noUsableEndpoints bool
|
|
}{
|
|
{
|
|
name: "feature gate ProxyTerminatingEndpoints enabled, ready endpoints exist",
|
|
terminatingFeatureGate: true,
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored for node ports since there are ready non-terminating endpoints
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored for node ports since there are ready non-terminating endpoints
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored for node ports since it's not local
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr("host-1"),
|
|
},
|
|
},
|
|
},
|
|
expectedIPTables: `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-XLB-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-FW-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-AQI2S6QIMU7PVVRP
|
|
-A KUBE-FW-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 loadbalancer IP" -j KUBE-XLB-AQI2S6QIMU7PVVRP
|
|
-A KUBE-FW-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --rcheck --seconds 10800 --reap -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --rcheck --seconds 10800 --reap -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`,
|
|
},
|
|
{
|
|
name: "feature gate ProxyTerminatingEndpoints disabled, ready endpoints exist",
|
|
terminatingFeatureGate: false,
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored for node ports since there are ready non-terminating endpoints
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored for node ports since there are ready non-terminating endpoints
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored for node ports since it's not local
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr("host-1"),
|
|
},
|
|
},
|
|
},
|
|
expectedIPTables: `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-XLB-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-FW-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-AQI2S6QIMU7PVVRP
|
|
-A KUBE-FW-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 loadbalancer IP" -j KUBE-XLB-AQI2S6QIMU7PVVRP
|
|
-A KUBE-FW-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --rcheck --seconds 10800 --reap -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --rcheck --seconds 10800 --reap -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`,
|
|
},
|
|
{
|
|
name: "feature gate ProxyTerminatingEndpoints enabled, only terminating endpoints exist",
|
|
terminatingFeatureGate: true,
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
// this endpoint should be used since there are only ready terminating endpoints
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be used since there are only ready terminating endpoints
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should not be used since it is both terminating and not ready.
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored for node ports since it's not local
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr("host-1"),
|
|
},
|
|
},
|
|
},
|
|
expectedIPTables: `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-XLB-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-FW-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
|
|
:KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-AQI2S6QIMU7PVVRP
|
|
-A KUBE-FW-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 loadbalancer IP" -j KUBE-XLB-AQI2S6QIMU7PVVRP
|
|
-A KUBE-FW-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-XGJFVO3L2O5SRFNT --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-XGJFVO3L2O5SRFNT --rcheck --seconds 10800 --reap -j KUBE-SEP-XGJFVO3L2O5SRFNT
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-XGJFVO3L2O5SRFNT
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`,
|
|
},
|
|
{
|
|
name: "with ProxyTerminatingEndpoints disabled, only terminating endpoints exist",
|
|
terminatingFeatureGate: false,
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr("host-1"),
|
|
},
|
|
},
|
|
},
|
|
expectedIPTables: `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-XLB-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-FW-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-AQI2S6QIMU7PVVRP
|
|
-A KUBE-FW-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 loadbalancer IP" -j KUBE-XLB-AQI2S6QIMU7PVVRP
|
|
-A KUBE-FW-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment ns1/svc1 -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 has no local endpoints" -j KUBE-MARK-DROP
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`,
|
|
},
|
|
{
|
|
name: "ProxyTerminatingEndpoints enabled, terminating endpoints on remote node",
|
|
terminatingFeatureGate: true,
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
// this endpoint won't be used because it's not local,
|
|
// but it will prevent a REJECT rule from being created
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr("host-1"),
|
|
},
|
|
},
|
|
},
|
|
expectedIPTables: `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-XLB-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-FW-AQI2S6QIMU7PVVRP - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-AQI2S6QIMU7PVVRP
|
|
-A KUBE-FW-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 loadbalancer IP" -j KUBE-XLB-AQI2S6QIMU7PVVRP
|
|
-A KUBE-FW-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-XLB-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 has no local endpoints" -j KUBE-MARK-DROP
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`,
|
|
},
|
|
{
|
|
name: "no usable endpoints on any node",
|
|
terminatingFeatureGate: true,
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
// Local but not ready or serving
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// Remote and not ready or serving
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr("host-1"),
|
|
},
|
|
},
|
|
},
|
|
noUsableEndpoints: true,
|
|
expectedIPTables: `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 has no endpoints" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j REJECT
|
|
-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1 has no endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j REJECT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`,
|
|
},
|
|
}
|
|
|
|
for _, testcase := range testcases {
|
|
t.Run(testcase.name, func(t *testing.T) {
|
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ProxyTerminatingEndpoints, testcase.terminatingFeatureGate)()
|
|
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.OnServiceSynced()
|
|
fp.OnEndpointSlicesSynced()
|
|
|
|
fp.OnServiceAdd(service)
|
|
|
|
fp.OnEndpointSliceAdd(testcase.endpointslice)
|
|
fp.syncProxyRules()
|
|
t.Log(fp.iptablesData.String())
|
|
assertIPTablesRulesEqual(t, testcase.expectedIPTables, fp.iptablesData.String())
|
|
|
|
fp.OnEndpointSliceDelete(testcase.endpointslice)
|
|
fp.syncProxyRules()
|
|
if testcase.noUsableEndpoints {
|
|
// Deleting the EndpointSlice should have had no effect
|
|
assertIPTablesRulesEqual(t, testcase.expectedIPTables, fp.iptablesData.String())
|
|
} else {
|
|
assertIPTablesRulesNotEqual(t, testcase.expectedIPTables, fp.iptablesData.String())
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestMasqueradeAll(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.masqueradeAll = true
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService("ns1", "svc1", func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ClusterIP = "172.30.0.41"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(3001),
|
|
}}
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: "1.2.3.4",
|
|
}}
|
|
}),
|
|
)
|
|
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.180.0.1"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := `
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-FW-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-MARK-MASQ
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-XPGD46QRK7WJZT7O
|
|
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -j KUBE-MARK-MASQ
|
|
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`
|
|
|
|
assertIPTablesRulesEqual(t, expected, fp.iptablesData.String())
|
|
}
|
|
|
|
func countEndpointsAndComments(iptablesData string, matchEndpoint string) (string, int, int) {
|
|
var numEndpoints, numComments int
|
|
var matched string
|
|
for _, line := range strings.Split(iptablesData, "\n") {
|
|
if strings.HasPrefix(line, "-A KUBE-SEP-") && strings.Contains(line, "-j DNAT") {
|
|
numEndpoints++
|
|
if strings.Contains(line, "--comment") {
|
|
numComments++
|
|
}
|
|
if strings.Contains(line, matchEndpoint) {
|
|
matched = line
|
|
}
|
|
}
|
|
}
|
|
return matched, numEndpoints, numComments
|
|
}
|
|
|
|
func TestEndpointCommentElision(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.masqueradeAll = true
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService("ns1", "svc1", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = "172.30.0.41"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
}}
|
|
}),
|
|
makeTestService("ns2", "svc2", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = "172.30.0.42"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p8080",
|
|
Port: 8080,
|
|
Protocol: v1.ProtocolTCP,
|
|
}}
|
|
}),
|
|
makeTestService("ns3", "svc3", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = "172.30.0.43"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p8081",
|
|
Port: 8081,
|
|
Protocol: v1.ProtocolTCP,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = make([]discovery.Endpoint, endpointChainsNumberThreshold/2-1)
|
|
for i := range eps.Endpoints {
|
|
eps.Endpoints[i].Addresses = []string{fmt.Sprintf("10.0.%d.%d", i%256, i/256)}
|
|
}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = make([]discovery.Endpoint, endpointChainsNumberThreshold/2-1)
|
|
for i := range eps.Endpoints {
|
|
eps.Endpoints[i].Addresses = []string{fmt.Sprintf("10.1.%d.%d", i%256, i/256)}
|
|
}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p8080"),
|
|
Port: utilpointer.Int32(8080),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expectedEndpoints := 2 * (endpointChainsNumberThreshold/2 - 1)
|
|
firstEndpoint, numEndpoints, numComments := countEndpointsAndComments(fp.iptablesData.String(), "10.0.0.0")
|
|
assert.Equal(t, "-A KUBE-SEP-DKGQUZGBKLTPAR56 -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.0.0:80", firstEndpoint)
|
|
if numEndpoints != expectedEndpoints {
|
|
t.Errorf("Found wrong number of endpoints: expected %d, got %d", expectedEndpoints, numEndpoints)
|
|
}
|
|
if numComments != numEndpoints {
|
|
t.Errorf("numComments (%d) != numEndpoints (%d) when numEndpoints < threshold (%d)", numComments, numEndpoints, endpointChainsNumberThreshold)
|
|
}
|
|
|
|
fp.OnEndpointSliceAdd(makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"203.0.113.4"},
|
|
}, {
|
|
Addresses: []string{"203.0.113.8"},
|
|
}, {
|
|
Addresses: []string{"203.0.113.12"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p8081"),
|
|
Port: utilpointer.Int32(8081),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}))
|
|
fp.syncProxyRules()
|
|
|
|
expectedEndpoints += 3
|
|
firstEndpoint, numEndpoints, numComments = countEndpointsAndComments(fp.iptablesData.String(), "10.0.0.0")
|
|
assert.Equal(t, "-A KUBE-SEP-DKGQUZGBKLTPAR56 -m tcp -p tcp -j DNAT --to-destination 10.0.0.0:80", firstEndpoint)
|
|
if numEndpoints != expectedEndpoints {
|
|
t.Errorf("Found wrong number of endpoints: expected %d, got %d", expectedEndpoints, numEndpoints)
|
|
}
|
|
if numComments != 0 {
|
|
t.Errorf("numComments (%d) != 0 when numEndpoints (%d) > threshold (%d)", numComments, numEndpoints, endpointChainsNumberThreshold)
|
|
}
|
|
}
|