
Sort the ":CHAINNAME" lines in the same order as the "-A CHAINNAME" lines (meaning, KUBE-NODEPORTS and KUBE-SERVICES come first). (This will simplify IPTablesDump because it won't need to keep track of the declaration order and the rule order separately.)
7897 lines
317 KiB
Go
7897 lines
317 KiB
Go
/*
|
|
Copyright 2015 The Kubernetes Authors.
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
you may not use this file except in compliance with the License.
|
|
You may obtain a copy of the License at
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
See the License for the specific language governing permissions and
|
|
limitations under the License.
|
|
*/
|
|
|
|
package iptables
|
|
|
|
import (
|
|
"bytes"
|
|
"fmt"
|
|
"net"
|
|
"reflect"
|
|
"regexp"
|
|
stdruntime "runtime"
|
|
"sort"
|
|
"strconv"
|
|
"strings"
|
|
"testing"
|
|
"time"
|
|
|
|
"github.com/google/go-cmp/cmp"
|
|
"github.com/lithammer/dedent"
|
|
"github.com/stretchr/testify/assert"
|
|
v1 "k8s.io/api/core/v1"
|
|
discovery "k8s.io/api/discovery/v1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/types"
|
|
"k8s.io/apimachinery/pkg/util/intstr"
|
|
"k8s.io/apimachinery/pkg/util/sets"
|
|
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
|
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
|
"k8s.io/component-base/metrics/testutil"
|
|
"k8s.io/klog/v2"
|
|
"k8s.io/kubernetes/pkg/features"
|
|
"k8s.io/kubernetes/pkg/proxy"
|
|
"k8s.io/kubernetes/pkg/proxy/metrics"
|
|
|
|
"k8s.io/kubernetes/pkg/proxy/healthcheck"
|
|
utilproxy "k8s.io/kubernetes/pkg/proxy/util"
|
|
proxyutiliptables "k8s.io/kubernetes/pkg/proxy/util/iptables"
|
|
utilproxytest "k8s.io/kubernetes/pkg/proxy/util/testing"
|
|
"k8s.io/kubernetes/pkg/util/async"
|
|
"k8s.io/kubernetes/pkg/util/conntrack"
|
|
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
|
|
iptablestest "k8s.io/kubernetes/pkg/util/iptables/testing"
|
|
"k8s.io/utils/exec"
|
|
fakeexec "k8s.io/utils/exec/testing"
|
|
netutils "k8s.io/utils/net"
|
|
utilpointer "k8s.io/utils/pointer"
|
|
)
|
|
|
|
func TestDeleteEndpointConnectionsIPv4(t *testing.T) {
|
|
const (
|
|
UDP = v1.ProtocolUDP
|
|
TCP = v1.ProtocolTCP
|
|
SCTP = v1.ProtocolSCTP
|
|
)
|
|
|
|
testCases := []struct {
|
|
description string
|
|
svcName string
|
|
svcIP string
|
|
svcPort int32
|
|
protocol v1.Protocol
|
|
endpoint string // IP:port endpoint
|
|
epSvcPair proxy.ServiceEndpoint // Will be generated by test
|
|
simulatedErr string
|
|
}{
|
|
{
|
|
description: "V4 UDP",
|
|
svcName: "v4-udp",
|
|
svcIP: "172.30.1.1",
|
|
svcPort: 80,
|
|
protocol: UDP,
|
|
endpoint: "10.240.0.3:80",
|
|
},
|
|
{
|
|
description: "V4 TCP",
|
|
svcName: "v4-tcp",
|
|
svcIP: "172.30.2.2",
|
|
svcPort: 80,
|
|
protocol: TCP,
|
|
endpoint: "10.240.0.4:80",
|
|
},
|
|
{
|
|
description: "V4 SCTP",
|
|
svcName: "v4-sctp",
|
|
svcIP: "172.30.3.3",
|
|
svcPort: 80,
|
|
protocol: SCTP,
|
|
endpoint: "10.240.0.5:80",
|
|
},
|
|
{
|
|
description: "V4 UDP, nothing to delete, benign error",
|
|
svcName: "v4-udp-nothing-to-delete",
|
|
svcIP: "172.30.1.1",
|
|
svcPort: 80,
|
|
protocol: UDP,
|
|
endpoint: "10.240.0.3:80",
|
|
simulatedErr: conntrack.NoConnectionToDelete,
|
|
},
|
|
{
|
|
description: "V4 UDP, unexpected error, should be glogged",
|
|
svcName: "v4-udp-simulated-error",
|
|
svcIP: "172.30.1.1",
|
|
svcPort: 80,
|
|
protocol: UDP,
|
|
endpoint: "10.240.0.3:80",
|
|
simulatedErr: "simulated error",
|
|
},
|
|
}
|
|
|
|
// Create a fake executor for the conntrack utility. This should only be
|
|
// invoked for UDP and SCTP connections, since no conntrack cleanup is needed for TCP
|
|
fcmd := fakeexec.FakeCmd{}
|
|
fexec := fakeexec.FakeExec{
|
|
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
|
|
}
|
|
execFunc := func(cmd string, args ...string) exec.Cmd {
|
|
return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
|
|
}
|
|
for _, tc := range testCases {
|
|
if conntrack.IsClearConntrackNeeded(tc.protocol) {
|
|
var cmdOutput string
|
|
var simErr error
|
|
if tc.simulatedErr == "" {
|
|
cmdOutput = "1 flow entries have been deleted"
|
|
} else {
|
|
simErr = fmt.Errorf(tc.simulatedErr)
|
|
}
|
|
cmdFunc := func() ([]byte, []byte, error) { return []byte(cmdOutput), nil, simErr }
|
|
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
|
|
fexec.CommandScript = append(fexec.CommandScript, execFunc)
|
|
}
|
|
}
|
|
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.exec = &fexec
|
|
|
|
for _, tc := range testCases {
|
|
makeServiceMap(fp,
|
|
makeTestService("ns1", tc.svcName, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = tc.svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: tc.svcPort,
|
|
Protocol: tc.protocol,
|
|
}}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
}),
|
|
)
|
|
|
|
fp.serviceMap.Update(fp.serviceChanges)
|
|
}
|
|
|
|
// Run the test cases
|
|
for _, tc := range testCases {
|
|
priorExecs := fexec.CommandCalls
|
|
priorGlogErrs := klog.Stats.Error.Lines()
|
|
|
|
svc := proxy.ServicePortName{
|
|
NamespacedName: types.NamespacedName{Namespace: "ns1", Name: tc.svcName},
|
|
Port: "p80",
|
|
Protocol: tc.protocol,
|
|
}
|
|
input := []proxy.ServiceEndpoint{
|
|
{
|
|
Endpoint: tc.endpoint,
|
|
ServicePortName: svc,
|
|
},
|
|
}
|
|
|
|
fp.deleteEndpointConnections(input)
|
|
|
|
// For UDP and SCTP connections, check the executed conntrack command
|
|
var expExecs int
|
|
if conntrack.IsClearConntrackNeeded(tc.protocol) {
|
|
isIPv6 := func(ip string) bool {
|
|
netIP := netutils.ParseIPSloppy(ip)
|
|
return netIP.To4() == nil
|
|
}
|
|
endpointIP := utilproxy.IPPart(tc.endpoint)
|
|
expectCommand := fmt.Sprintf("conntrack -D --orig-dst %s --dst-nat %s -p %s", tc.svcIP, endpointIP, strings.ToLower(string((tc.protocol))))
|
|
if isIPv6(endpointIP) {
|
|
expectCommand += " -f ipv6"
|
|
}
|
|
actualCommand := strings.Join(fcmd.CombinedOutputLog[fexec.CommandCalls-1], " ")
|
|
if actualCommand != expectCommand {
|
|
t.Errorf("%s: Expected command: %s, but executed %s", tc.description, expectCommand, actualCommand)
|
|
}
|
|
expExecs = 1
|
|
}
|
|
|
|
// Check the number of times conntrack was executed
|
|
execs := fexec.CommandCalls - priorExecs
|
|
if execs != expExecs {
|
|
t.Errorf("%s: Expected conntrack to be executed %d times, but got %d", tc.description, expExecs, execs)
|
|
}
|
|
|
|
// Check the number of new glog errors
|
|
var expGlogErrs int64
|
|
if tc.simulatedErr != "" && tc.simulatedErr != conntrack.NoConnectionToDelete {
|
|
expGlogErrs = 1
|
|
}
|
|
glogErrs := klog.Stats.Error.Lines() - priorGlogErrs
|
|
if glogErrs != expGlogErrs {
|
|
t.Errorf("%s: Expected %d glogged errors, but got %d", tc.description, expGlogErrs, glogErrs)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestDeleteEndpointConnectionsIPv6(t *testing.T) {
|
|
const (
|
|
UDP = v1.ProtocolUDP
|
|
TCP = v1.ProtocolTCP
|
|
SCTP = v1.ProtocolSCTP
|
|
)
|
|
|
|
testCases := []struct {
|
|
description string
|
|
svcName string
|
|
svcIP string
|
|
svcPort int32
|
|
protocol v1.Protocol
|
|
endpoint string // IP:port endpoint
|
|
epSvcPair proxy.ServiceEndpoint // Will be generated by test
|
|
simulatedErr string
|
|
}{
|
|
{
|
|
description: "V6 UDP",
|
|
svcName: "v6-udp",
|
|
svcIP: "fd00:1234::20",
|
|
svcPort: 80,
|
|
protocol: UDP,
|
|
endpoint: "[2001:db8::2]:80",
|
|
},
|
|
{
|
|
description: "V6 TCP",
|
|
svcName: "v6-tcp",
|
|
svcIP: "fd00:1234::30",
|
|
svcPort: 80,
|
|
protocol: TCP,
|
|
endpoint: "[2001:db8::3]:80",
|
|
},
|
|
{
|
|
description: "V6 SCTP",
|
|
svcName: "v6-sctp",
|
|
svcIP: "fd00:1234::40",
|
|
svcPort: 80,
|
|
protocol: SCTP,
|
|
endpoint: "[2001:db8::4]:80",
|
|
},
|
|
}
|
|
|
|
// Create a fake executor for the conntrack utility. This should only be
|
|
// invoked for UDP and SCTP connections, since no conntrack cleanup is needed for TCP
|
|
fcmd := fakeexec.FakeCmd{}
|
|
fexec := fakeexec.FakeExec{
|
|
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
|
|
}
|
|
execFunc := func(cmd string, args ...string) exec.Cmd {
|
|
return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
|
|
}
|
|
for _, tc := range testCases {
|
|
if conntrack.IsClearConntrackNeeded(tc.protocol) {
|
|
var cmdOutput string
|
|
var simErr error
|
|
if tc.simulatedErr == "" {
|
|
cmdOutput = "1 flow entries have been deleted"
|
|
} else {
|
|
simErr = fmt.Errorf(tc.simulatedErr)
|
|
}
|
|
cmdFunc := func() ([]byte, []byte, error) { return []byte(cmdOutput), nil, simErr }
|
|
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
|
|
fexec.CommandScript = append(fexec.CommandScript, execFunc)
|
|
}
|
|
}
|
|
|
|
ipt := iptablestest.NewIPv6Fake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.exec = &fexec
|
|
|
|
for _, tc := range testCases {
|
|
makeServiceMap(fp,
|
|
makeTestService("ns1", tc.svcName, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = tc.svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: tc.svcPort,
|
|
Protocol: tc.protocol,
|
|
}}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
}),
|
|
)
|
|
|
|
fp.serviceMap.Update(fp.serviceChanges)
|
|
}
|
|
|
|
// Run the test cases
|
|
for _, tc := range testCases {
|
|
priorExecs := fexec.CommandCalls
|
|
priorGlogErrs := klog.Stats.Error.Lines()
|
|
|
|
svc := proxy.ServicePortName{
|
|
NamespacedName: types.NamespacedName{Namespace: "ns1", Name: tc.svcName},
|
|
Port: "p80",
|
|
Protocol: tc.protocol,
|
|
}
|
|
input := []proxy.ServiceEndpoint{
|
|
{
|
|
Endpoint: tc.endpoint,
|
|
ServicePortName: svc,
|
|
},
|
|
}
|
|
|
|
fp.deleteEndpointConnections(input)
|
|
|
|
// For UDP and SCTP connections, check the executed conntrack command
|
|
var expExecs int
|
|
if conntrack.IsClearConntrackNeeded(tc.protocol) {
|
|
isIPv6 := func(ip string) bool {
|
|
netIP := netutils.ParseIPSloppy(ip)
|
|
return netIP.To4() == nil
|
|
}
|
|
endpointIP := utilproxy.IPPart(tc.endpoint)
|
|
expectCommand := fmt.Sprintf("conntrack -D --orig-dst %s --dst-nat %s -p %s", tc.svcIP, endpointIP, strings.ToLower(string((tc.protocol))))
|
|
if isIPv6(endpointIP) {
|
|
expectCommand += " -f ipv6"
|
|
}
|
|
actualCommand := strings.Join(fcmd.CombinedOutputLog[fexec.CommandCalls-1], " ")
|
|
if actualCommand != expectCommand {
|
|
t.Errorf("%s: Expected command: %s, but executed %s", tc.description, expectCommand, actualCommand)
|
|
}
|
|
expExecs = 1
|
|
}
|
|
|
|
// Check the number of times conntrack was executed
|
|
execs := fexec.CommandCalls - priorExecs
|
|
if execs != expExecs {
|
|
t.Errorf("%s: Expected conntrack to be executed %d times, but got %d", tc.description, expExecs, execs)
|
|
}
|
|
|
|
// Check the number of new glog errors
|
|
var expGlogErrs int64
|
|
if tc.simulatedErr != "" && tc.simulatedErr != conntrack.NoConnectionToDelete {
|
|
expGlogErrs = 1
|
|
}
|
|
glogErrs := klog.Stats.Error.Lines() - priorGlogErrs
|
|
if glogErrs != expGlogErrs {
|
|
t.Errorf("%s: Expected %d glogged errors, but got %d", tc.description, expGlogErrs, glogErrs)
|
|
}
|
|
}
|
|
}
|
|
|
|
// Conventions for tests using NewFakeProxier:
|
|
//
|
|
// Pod IPs: 10.0.0.0/8
|
|
// Service ClusterIPs: 172.30.0.0/16
|
|
// Node IPs: 192.168.0.0/24
|
|
// Local Node IP: 192.168.0.2
|
|
// Service ExternalIPs: 192.168.99.0/24
|
|
// LoadBalancer IPs: 1.2.3.4, 5.6.7.8, 9.10.11.12
|
|
// Non-cluster IPs: 203.0.113.0/24
|
|
// LB Source Range: 203.0.113.0/25
|
|
|
|
const testHostname = "test-hostname"
|
|
const testNodeIP = "192.168.0.2"
|
|
const testExternalClient = "203.0.113.2"
|
|
const testExternalClientBlocked = "203.0.113.130"
|
|
|
|
func NewFakeProxier(ipt utiliptables.Interface) *Proxier {
|
|
// TODO: Call NewProxier after refactoring out the goroutine
|
|
// invocation into a Run() method.
|
|
ipfamily := v1.IPv4Protocol
|
|
if ipt.IsIPv6() {
|
|
ipfamily = v1.IPv6Protocol
|
|
}
|
|
detectLocal, _ := proxyutiliptables.NewDetectLocalByCIDR("10.0.0.0/8", ipt)
|
|
|
|
networkInterfacer := utilproxytest.NewFakeNetwork()
|
|
itf := net.Interface{Index: 0, MTU: 0, Name: "lo", HardwareAddr: nil, Flags: 0}
|
|
addrs := []net.Addr{
|
|
&net.IPNet{IP: netutils.ParseIPSloppy("127.0.0.1"), Mask: net.CIDRMask(8, 32)},
|
|
&net.IPNet{IP: netutils.ParseIPSloppy("::1/128"), Mask: net.CIDRMask(128, 128)},
|
|
}
|
|
networkInterfacer.AddInterfaceAddr(&itf, addrs)
|
|
itf1 := net.Interface{Index: 1, MTU: 0, Name: "eth0", HardwareAddr: nil, Flags: 0}
|
|
addrs1 := []net.Addr{
|
|
&net.IPNet{IP: netutils.ParseIPSloppy(testNodeIP), Mask: net.CIDRMask(24, 32)},
|
|
}
|
|
networkInterfacer.AddInterfaceAddr(&itf1, addrs1)
|
|
|
|
p := &Proxier{
|
|
exec: &fakeexec.FakeExec{},
|
|
serviceMap: make(proxy.ServiceMap),
|
|
serviceChanges: proxy.NewServiceChangeTracker(newServiceInfo, ipfamily, nil, nil),
|
|
endpointsMap: make(proxy.EndpointsMap),
|
|
endpointsChanges: proxy.NewEndpointChangeTracker(testHostname, newEndpointInfo, ipfamily, nil, nil),
|
|
iptables: ipt,
|
|
masqueradeMark: "0x4000",
|
|
localDetector: detectLocal,
|
|
hostname: testHostname,
|
|
serviceHealthServer: healthcheck.NewFakeServiceHealthServer(),
|
|
precomputedProbabilities: make([]string, 0, 1001),
|
|
iptablesData: bytes.NewBuffer(nil),
|
|
existingFilterChainsData: bytes.NewBuffer(nil),
|
|
filterChains: utilproxy.LineBuffer{},
|
|
filterRules: utilproxy.LineBuffer{},
|
|
natChains: utilproxy.LineBuffer{},
|
|
natRules: utilproxy.LineBuffer{},
|
|
nodeIP: netutils.ParseIPSloppy(testNodeIP),
|
|
nodePortAddresses: make([]string, 0),
|
|
networkInterfacer: networkInterfacer,
|
|
}
|
|
p.setInitialized(true)
|
|
p.syncRunner = async.NewBoundedFrequencyRunner("test-sync-runner", p.syncProxyRules, 0, time.Minute, 1)
|
|
return p
|
|
}
|
|
|
|
// parseIPTablesData takes iptables-save output and returns a map of table name to array of lines.
|
|
func parseIPTablesData(ruleData string) (map[string][]string, error) {
|
|
// Split ruleData at the "COMMIT" lines; given valid input, this will result in
|
|
// one element for each table plus an extra empty element (since the ruleData
|
|
// should end with a "COMMIT" line).
|
|
rawTables := strings.Split(strings.TrimPrefix(ruleData, "\n"), "COMMIT\n")
|
|
nTables := len(rawTables) - 1
|
|
if nTables < 2 || rawTables[nTables] != "" {
|
|
return nil, fmt.Errorf("bad ruleData (%d tables)\n%s", nTables, ruleData)
|
|
}
|
|
|
|
tables := make(map[string][]string, nTables)
|
|
for i, table := range rawTables[:nTables] {
|
|
lines := strings.Split(strings.Trim(table, "\n"), "\n")
|
|
// The first line should be, eg, "*nat" or "*filter"
|
|
if lines[0][0] != '*' {
|
|
return nil, fmt.Errorf("bad ruleData (table %d starts with %q)", i+1, lines[0])
|
|
}
|
|
// add back the "COMMIT" line that got eaten by the strings.Split above
|
|
lines = append(lines, "COMMIT")
|
|
tables[lines[0][1:]] = lines
|
|
}
|
|
|
|
if tables["nat"] == nil {
|
|
return nil, fmt.Errorf("bad ruleData (no %q table)", "nat")
|
|
}
|
|
if tables["filter"] == nil {
|
|
return nil, fmt.Errorf("bad ruleData (no %q table)", "filter")
|
|
}
|
|
return tables, nil
|
|
}
|
|
|
|
func TestParseIPTablesData(t *testing.T) {
|
|
for _, tc := range []struct {
|
|
name string
|
|
input string
|
|
output map[string][]string
|
|
error string
|
|
}{
|
|
{
|
|
name: "basic test",
|
|
input: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`),
|
|
output: map[string][]string{
|
|
"filter": {
|
|
`*filter`,
|
|
`:KUBE-SERVICES - [0:0]`,
|
|
`:KUBE-EXTERNAL-SERVICES - [0:0]`,
|
|
`:KUBE-FORWARD - [0:0]`,
|
|
`:KUBE-NODEPORTS - [0:0]`,
|
|
`-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT`,
|
|
`-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP`,
|
|
`-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT`,
|
|
`-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT`,
|
|
`COMMIT`,
|
|
},
|
|
"nat": {
|
|
`*nat`,
|
|
`:KUBE-SERVICES - [0:0]`,
|
|
`:KUBE-NODEPORTS - [0:0]`,
|
|
`:KUBE-POSTROUTING - [0:0]`,
|
|
`:KUBE-MARK-MASQ - [0:0]`,
|
|
`:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]`,
|
|
`:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]`,
|
|
`-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN`,
|
|
`-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000`,
|
|
`-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE`,
|
|
`-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000`,
|
|
`-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O`,
|
|
`-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ`,
|
|
`-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ`,
|
|
`-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ`,
|
|
`-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80`,
|
|
`-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS`,
|
|
`COMMIT`,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "not enough tables",
|
|
input: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
`),
|
|
error: "bad ruleData (1 tables)",
|
|
},
|
|
{
|
|
name: "trailing junk",
|
|
input: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
junk
|
|
`),
|
|
error: "bad ruleData (2 tables)",
|
|
},
|
|
{
|
|
name: "bad start line",
|
|
input: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
`),
|
|
error: `bad ruleData (table 2 starts with ":KUBE-SERVICES - [0:0]")`,
|
|
},
|
|
{
|
|
name: "no nat",
|
|
input: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*mangle
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
`),
|
|
error: `bad ruleData (no "nat" table)`,
|
|
},
|
|
{
|
|
name: "no filter",
|
|
input: dedent.Dedent(`
|
|
*mangle
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
`),
|
|
error: `bad ruleData (no "filter" table)`,
|
|
},
|
|
} {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
out, err := parseIPTablesData(tc.input)
|
|
if err == nil {
|
|
if tc.error != "" {
|
|
t.Errorf("unexpectedly did not get error")
|
|
} else {
|
|
assert.Equal(t, tc.output, out)
|
|
}
|
|
} else {
|
|
if tc.error == "" {
|
|
t.Errorf("got unexpected error: %v", err)
|
|
} else if !strings.HasPrefix(err.Error(), tc.error) {
|
|
t.Errorf("got wrong error: %v (expected %q)", err, tc.error)
|
|
}
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
func countRules(tableName string, ruleData string) int {
|
|
tables, err := parseIPTablesData(ruleData)
|
|
if err != nil {
|
|
klog.ErrorS(err, "error parsing iptables rules")
|
|
return -1
|
|
}
|
|
|
|
rules := 0
|
|
for _, line := range tables[tableName] {
|
|
if line[0] == '-' {
|
|
rules++
|
|
}
|
|
}
|
|
return rules
|
|
}
|
|
|
|
// findAllMatches takes an array of lines and a pattern with one parenthesized group, and
|
|
// returns a sorted array of all of the unique matches of the parenthesized group.
|
|
func findAllMatches(lines []string, pattern string) []string {
|
|
regex := regexp.MustCompile(pattern)
|
|
allMatches := sets.NewString()
|
|
for _, line := range lines {
|
|
match := regex.FindStringSubmatch(line)
|
|
if len(match) == 2 {
|
|
allMatches.Insert(match[1])
|
|
}
|
|
}
|
|
return allMatches.List()
|
|
}
|
|
|
|
// moveMatchingLines moves lines that match pattern from input to output
|
|
func moveMatchingLines(pattern string, input, output []string) ([]string, []string) {
|
|
var newIn []string
|
|
regex := regexp.MustCompile(pattern)
|
|
for _, line := range input {
|
|
if regex.FindString(line) != "" {
|
|
output = append(output, line)
|
|
} else {
|
|
newIn = append(newIn, line)
|
|
}
|
|
}
|
|
return newIn, output
|
|
}
|
|
|
|
// checkIPTablesRuleJumps checks that every `-j` in the given rules jumps to a chain
|
|
// that we created and added rules to
|
|
func checkIPTablesRuleJumps(ruleData string) error {
|
|
tables, err := parseIPTablesData(ruleData)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
for tableName, lines := range tables {
|
|
// Find all of the lines like ":KUBE-SERVICES", indicating chains that
|
|
// iptables-restore would create when loading the data.
|
|
createdChains := sets.NewString(findAllMatches(lines, `^:([^ ]*)`)...)
|
|
// Find all of the lines like "-X KUBE-SERVICES ..." indicating chains
|
|
// that we are deleting because they are no longer used, and remove
|
|
// those chains from createdChains.
|
|
createdChains = createdChains.Delete(findAllMatches(lines, `-X ([^ ]*)`)...)
|
|
|
|
// Find all of the lines like "-A KUBE-SERVICES ..." indicating chains
|
|
// that we are adding at least one rule to.
|
|
filledChains := sets.NewString(findAllMatches(lines, `-A ([^ ]*)`)...)
|
|
|
|
// Find all of the chains that are jumped to by some rule so we can make
|
|
// sure we only jump to valid chains.
|
|
jumpedChains := sets.NewString(findAllMatches(lines, `-j ([^ ]*)`)...)
|
|
// Ignore jumps to chains that we expect to exist even if kube-proxy
|
|
// didn't create them itself.
|
|
jumpedChains.Delete("ACCEPT", "REJECT", "DROP", "MARK", "RETURN", "DNAT", "SNAT", "MASQUERADE")
|
|
jumpedChains.Delete(string(KubeMarkDropChain))
|
|
|
|
// Find cases where we have "-A FOO ... -j BAR" but no ":BAR", meaning
|
|
// that we are jumping to a chain that was not created.
|
|
missingChains := jumpedChains.Difference(createdChains)
|
|
missingChains = missingChains.Union(filledChains.Difference(createdChains))
|
|
if len(missingChains) > 0 {
|
|
return fmt.Errorf("some chains in %s are used but were not created: %v", tableName, missingChains.List())
|
|
}
|
|
|
|
// Find cases where we have "-A FOO ... -j BAR", but no "-A BAR ...",
|
|
// meaning that we are jumping to a chain that we didn't write out any
|
|
// rules for, which is normally a bug. (Except that KUBE-SERVICES always
|
|
// jumps to KUBE-NODEPORTS, even when there are no NodePort rules.)
|
|
emptyChains := jumpedChains.Difference(filledChains)
|
|
emptyChains.Delete(string(kubeNodePortsChain))
|
|
if len(emptyChains) > 0 {
|
|
return fmt.Errorf("some chains in %s are jumped to but have no rules: %v", tableName, emptyChains.List())
|
|
}
|
|
|
|
// Find cases where we have ":BAR" but no "-A FOO ... -j BAR", meaning
|
|
// that we are creating an empty chain but not using it for anything.
|
|
extraChains := createdChains.Difference(jumpedChains)
|
|
extraChains.Delete(string(kubeServicesChain), string(kubeExternalServicesChain), string(kubeNodePortsChain), string(kubePostroutingChain), string(kubeForwardChain), string(KubeMarkMasqChain))
|
|
if len(extraChains) > 0 {
|
|
return fmt.Errorf("some chains in %s are created but not used: %v", tableName, extraChains.List())
|
|
}
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func TestCheckIPTablesRuleJumps(t *testing.T) {
|
|
for _, tc := range []struct {
|
|
name string
|
|
input string
|
|
error string
|
|
}{
|
|
{
|
|
name: "valid",
|
|
input: dedent.Dedent(`
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 10.20.30.41 --dport 80 ! -s 10.0.0.0/24 -j KUBE-MARK-MASQ
|
|
COMMIT
|
|
`),
|
|
error: "",
|
|
},
|
|
{
|
|
name: "can't jump to chain that wasn't created",
|
|
input: dedent.Dedent(`
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
COMMIT
|
|
`),
|
|
error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
|
|
},
|
|
{
|
|
name: "can't jump to chain that has no rules",
|
|
input: dedent.Dedent(`
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
COMMIT
|
|
`),
|
|
error: "some chains in nat are jumped to but have no rules: [KUBE-SVC-XPGD46QRK7WJZT7O]",
|
|
},
|
|
{
|
|
name: "can't add rules to a chain that wasn't created",
|
|
input: dedent.Dedent(`
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" ...
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
COMMIT
|
|
`),
|
|
error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
|
|
},
|
|
{
|
|
name: "can't jump to chain that wasn't created",
|
|
input: dedent.Dedent(`
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
COMMIT
|
|
`),
|
|
error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
|
|
},
|
|
{
|
|
name: "can't jump to chain that has no rules",
|
|
input: dedent.Dedent(`
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
COMMIT
|
|
`),
|
|
error: "some chains in nat are jumped to but have no rules: [KUBE-SVC-XPGD46QRK7WJZT7O]",
|
|
},
|
|
{
|
|
name: "can't add rules to a chain that wasn't created",
|
|
input: dedent.Dedent(`
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" ...
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
COMMIT
|
|
`),
|
|
error: "some chains in nat are used but were not created: [KUBE-SVC-XPGD46QRK7WJZT7O]",
|
|
},
|
|
{
|
|
name: "can't create chain and then not use it",
|
|
input: dedent.Dedent(`
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" ...
|
|
COMMIT
|
|
`),
|
|
error: "some chains in nat are created but not used: [KUBE-SVC-XPGD46QRK7WJZT7O]",
|
|
},
|
|
} {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
err := checkIPTablesRuleJumps(tc.input)
|
|
if err == nil {
|
|
if tc.error != "" {
|
|
t.Errorf("unexpectedly did not get error")
|
|
}
|
|
} else {
|
|
if tc.error == "" {
|
|
t.Errorf("got unexpected error: %v", err)
|
|
} else if !strings.HasPrefix(err.Error(), tc.error) {
|
|
t.Errorf("got wrong error: %v (expected %q)", err, tc.error)
|
|
}
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
// sortIPTablesRules sorts `iptables-restore` output so as to not depend on the order that
|
|
// Services get processed in, while preserving the relative ordering of related rules.
|
|
func sortIPTablesRules(ruleData string) (string, error) {
|
|
tables, err := parseIPTablesData(ruleData)
|
|
if err != nil {
|
|
return "", err
|
|
}
|
|
|
|
tableNames := make([]string, 0, len(tables))
|
|
for tableName := range tables {
|
|
tableNames = append(tableNames, tableName)
|
|
}
|
|
sort.Strings(tableNames)
|
|
|
|
var output []string
|
|
for _, name := range tableNames {
|
|
lines := tables[name]
|
|
|
|
// Move "*TABLENAME" line
|
|
lines, output = moveMatchingLines(`^\*`, lines, output)
|
|
|
|
// findAllMatches() returns a sorted list of unique matches. So for
|
|
// each of the following, we find all the matches for the regex, then
|
|
// for each unique match (in sorted order), move all of the lines that
|
|
// contain that match.
|
|
|
|
// Move and sort ":CHAINNAME" lines (in the same order we will sort
|
|
// the chains themselves below).
|
|
lines, output = moveMatchingLines(`:KUBE-NODEPORTS`, lines, output)
|
|
lines, output = moveMatchingLines(`:KUBE-SERVICES`, lines, output)
|
|
for _, chainName := range findAllMatches(lines, `^(:KUBE-[^ ]*) `) {
|
|
lines, output = moveMatchingLines(chainName, lines, output)
|
|
}
|
|
for _, chainName := range findAllMatches(lines, `^(:[^ ]*) `) {
|
|
lines, output = moveMatchingLines(chainName, lines, output)
|
|
}
|
|
|
|
// Move KUBE-NODEPORTS rules for each service, sorted by service name
|
|
for _, nextNodePortService := range findAllMatches(lines, `-A KUBE-NODEPORTS.*--comment "?([^ ]*)`) {
|
|
lines, output = moveMatchingLines(fmt.Sprintf(`^-A KUBE-NODEPORTS.*%s`, nextNodePortService), lines, output)
|
|
}
|
|
|
|
// Move KUBE-SERVICES rules for each service, sorted by service name. The
|
|
// relative ordering of actual per-service lines doesn't matter, but keep
|
|
// the "must be the last rule" rule last because it's confusing otherwise...
|
|
lines, tmp := moveMatchingLines(`KUBE-SERVICES.*must be the last rule`, lines, nil)
|
|
for _, nextService := range findAllMatches(lines, `-A KUBE-SERVICES.*--comment "?([^ ]*)`) {
|
|
lines, output = moveMatchingLines(fmt.Sprintf(`^-A KUBE-SERVICES.*%s`, nextService), lines, output)
|
|
}
|
|
_, output = moveMatchingLines(`.`, tmp, output)
|
|
|
|
// Move remaining chains, sorted by chain name
|
|
for _, nextChain := range findAllMatches(lines, `(-A KUBE-[^ ]* )`) {
|
|
lines, output = moveMatchingLines(nextChain, lines, output)
|
|
}
|
|
|
|
// Some tests have deletions...
|
|
for _, nextChain := range findAllMatches(lines, `(-X KUBE-.*)`) {
|
|
lines, output = moveMatchingLines(nextChain, lines, output)
|
|
}
|
|
|
|
// Move the "COMMIT" line and anything else left. (There shouldn't be anything
|
|
// else, but if there is, it will show up in the diff later.)
|
|
_, output = moveMatchingLines(".", lines, output)
|
|
}
|
|
|
|
// Input ended with a "\n", so make sure the output does too
|
|
output = append(output, "")
|
|
|
|
return strings.Join(output, "\n"), nil
|
|
}
|
|
|
|
func TestSortIPTablesRules(t *testing.T) {
|
|
for _, tc := range []struct {
|
|
name string
|
|
input string
|
|
output string
|
|
error string
|
|
}{
|
|
{
|
|
name: "basic test using each match type",
|
|
input: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-EXT-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-SVL-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-FW-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
|
|
:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
|
|
:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
|
|
:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
|
|
:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
|
|
:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 192.168.99.11 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment ns2/svc2:p80 -j KUBE-SEP-RS4RBKLTHTF2IUXJ
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -j KUBE-SVL-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SVL-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 has no local endpoints" -j KUBE-MARK-DROP
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-SVC-X27LE4BHSL4DOUIK
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -j KUBE-SEP-OYPFS5VJICHGATKP
|
|
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -j KUBE-SEP-C6EBXVWJJZMIWKLZ
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
COMMIT
|
|
`),
|
|
output: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXT-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-FW-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
|
|
:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
|
|
:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
|
|
:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
|
|
:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SVL-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-SVC-X27LE4BHSL4DOUIK
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 192.168.99.11 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "Redirect pods trying to reach external loadbalancer VIP to clusterIP" -s 10.0.0.0/8 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 LB IP to service chain" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -j KUBE-SVL-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
|
|
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -j KUBE-SEP-C6EBXVWJJZMIWKLZ
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment ns2/svc2:p80 -j KUBE-SEP-RS4RBKLTHTF2IUXJ
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3002 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment ns3/svc3:p80 -j KUBE-SEP-OYPFS5VJICHGATKP
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SVL-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 has no local endpoints" -j KUBE-MARK-DROP
|
|
COMMIT
|
|
`),
|
|
},
|
|
{
|
|
name: "not enough tables",
|
|
input: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
`),
|
|
error: "bad ruleData (1 tables)",
|
|
},
|
|
{
|
|
name: "extra tables",
|
|
input: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*mangle
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
`),
|
|
output: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*mangle
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
`),
|
|
},
|
|
{
|
|
name: "correctly match same service name in different styles of comments",
|
|
input: dedent.Dedent(`
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
|
|
-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" svc1 line 1
|
|
-A KUBE-SERVICES -m comment --comment ns1/svc1 svc1 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 blah" svc1 line 3
|
|
-A KUBE-SERVICES -m comment --comment ns4/svc4 svc4 line 1
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" svc4 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4 blah" svc4 line 3
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" svc3 line 1
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3 blah" svc3 line 2
|
|
-A KUBE-SERVICES -m comment --comment ns3/svc3 svc3 line 3
|
|
COMMIT
|
|
`),
|
|
output: dedent.Dedent(`
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" svc1 line 1
|
|
-A KUBE-SERVICES -m comment --comment ns1/svc1 svc1 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 blah" svc1 line 3
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
|
|
-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" svc3 line 1
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3 blah" svc3 line 2
|
|
-A KUBE-SERVICES -m comment --comment ns3/svc3 svc3 line 3
|
|
-A KUBE-SERVICES -m comment --comment ns4/svc4 svc4 line 1
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" svc4 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4 blah" svc4 line 3
|
|
COMMIT
|
|
`),
|
|
},
|
|
{
|
|
name: "unexpected junk lines are preserved",
|
|
input: dedent.Dedent(`
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
|
|
:KUBE-AAAAA - [0:0]
|
|
:KUBE-ZZZZZ - [0:0]
|
|
:WHY-IS-THIS-CHAIN-HERE - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
|
|
-A KUBE-ZZZZZ -m comment --comment "mystery chain number 1"
|
|
-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
|
|
-A WHY-IS-THIS-CHAIN-HERE -j ACCEPT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
|
|
-A KUBE-AAAAA -m comment --comment "mystery chain number 2"
|
|
COMMIT
|
|
`),
|
|
output: dedent.Dedent(`
|
|
*filter
|
|
COMMIT
|
|
*nat
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-AAAAA - [0:0]
|
|
:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
|
|
:KUBE-ZZZZZ - [0:0]
|
|
:WHY-IS-THIS-CHAIN-HERE - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" svc2 line 1
|
|
-A KUBE-SERVICES -m comment --comment ns2/svc2 svc2 line 2
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2 blah" svc2 line 3
|
|
-A KUBE-AAAAA -m comment --comment "mystery chain number 2"
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
|
|
-A KUBE-ZZZZZ -m comment --comment "mystery chain number 1"
|
|
-A WHY-IS-THIS-CHAIN-HERE -j ACCEPT
|
|
COMMIT
|
|
`),
|
|
},
|
|
} {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
out, err := sortIPTablesRules(tc.input)
|
|
if err == nil {
|
|
if tc.error != "" {
|
|
t.Errorf("unexpectedly did not get error")
|
|
} else {
|
|
assert.Equal(t, strings.TrimPrefix(tc.output, "\n"), out)
|
|
}
|
|
} else {
|
|
if tc.error == "" {
|
|
t.Errorf("got unexpected error: %v", err)
|
|
} else if !strings.HasPrefix(err.Error(), tc.error) {
|
|
t.Errorf("got wrong error: %v (expected %q)", err, tc.error)
|
|
}
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
// getLine returns the line number of the caller, if possible. This is useful in
|
|
// tests with a large number of cases - when something goes wrong you can find
|
|
// which case more easily.
|
|
func getLine() int {
|
|
_, _, line, ok := stdruntime.Caller(1)
|
|
if ok {
|
|
return line
|
|
}
|
|
return 0
|
|
}
|
|
|
|
// assertIPTablesRulesEqual asserts that the generated rules in result match the rules in
|
|
// expected, ignoring irrelevant ordering differences.
|
|
func assertIPTablesRulesEqual(t *testing.T, line int, expected, result string) {
|
|
expected = strings.TrimLeft(expected, " \t\n")
|
|
|
|
result, err := sortIPTablesRules(strings.TrimLeft(result, " \t\n"))
|
|
if err != nil {
|
|
t.Fatalf("%s", err)
|
|
}
|
|
|
|
lineStr := ""
|
|
if line != 0 {
|
|
lineStr = fmt.Sprintf(" (from line %d)", line)
|
|
}
|
|
if diff := cmp.Diff(expected, result); diff != "" {
|
|
t.Errorf("rules do not match%s:\ndiff:\n%s\nfull result:\n```\n%s```", lineStr, diff, result)
|
|
}
|
|
|
|
err = checkIPTablesRuleJumps(expected)
|
|
if err != nil {
|
|
t.Fatalf("%s", err)
|
|
}
|
|
}
|
|
|
|
// assertIPTablesRulesNotEqual asserts that the generated rules in result DON'T match the
|
|
// rules in expected, ignoring irrelevant ordering differences.
|
|
func assertIPTablesRulesNotEqual(t *testing.T, line int, expected, result string) {
|
|
expected = strings.TrimLeft(expected, " \t\n")
|
|
|
|
result, err := sortIPTablesRules(strings.TrimLeft(result, " \t\n"))
|
|
if err != nil {
|
|
t.Fatalf("%s", err)
|
|
}
|
|
|
|
lineStr := ""
|
|
if line != 0 {
|
|
lineStr = fmt.Sprintf(" (from line %d)", line)
|
|
}
|
|
if cmp.Equal(expected, result) {
|
|
t.Errorf("rules do not differ%s:\nfull result:\n```\n%s```", lineStr, result)
|
|
}
|
|
|
|
err = checkIPTablesRuleJumps(expected)
|
|
if err != nil {
|
|
t.Fatalf("%s", err)
|
|
}
|
|
err = checkIPTablesRuleJumps(result)
|
|
if err != nil {
|
|
t.Fatalf("%s", err)
|
|
}
|
|
}
|
|
|
|
// ruleMatchesIP helps test whether an iptables rule such as "! -s 192.168.0.0/16" matches
|
|
// ipStr. ruleAddress is either an IP address ("1.2.3.4") or a CIDR string
|
|
// ("1.2.3.0/24"). negated is whether the iptables rule negates the match.
|
|
func ruleMatchesIP(t *testing.T, negated bool, ruleAddress, ipStr string) bool {
|
|
ip := netutils.ParseIPSloppy(ipStr)
|
|
if ip == nil {
|
|
t.Fatalf("Bad IP in test case: %s", ipStr)
|
|
}
|
|
|
|
var matches bool
|
|
if strings.Contains(ruleAddress, "/") {
|
|
_, cidr, err := netutils.ParseCIDRSloppy(ruleAddress)
|
|
if err != nil {
|
|
t.Errorf("Bad CIDR in kube-proxy output: %v", err)
|
|
}
|
|
matches = cidr.Contains(ip)
|
|
} else {
|
|
ip2 := netutils.ParseIPSloppy(ruleAddress)
|
|
if ip2 == nil {
|
|
t.Errorf("Bad IP/CIDR in kube-proxy output: %s", ruleAddress)
|
|
}
|
|
matches = ip.Equal(ip2)
|
|
}
|
|
return (!negated && matches) || (negated && !matches)
|
|
}
|
|
|
|
// Regular expressions used by iptablesTracer. Note that these are not fully general-purpose
|
|
// and may need to be updated if we make large changes to our iptable rules.
|
|
var addRuleToChainRegex = regexp.MustCompile(`-A ([^ ]*) `)
|
|
var moduleRegex = regexp.MustCompile("-m ([^ ]*)")
|
|
var commentRegex = regexp.MustCompile(`-m comment --comment ("[^"]*"|[^" ]*) `)
|
|
var srcLocalRegex = regexp.MustCompile("(!)? --src-type LOCAL")
|
|
var destLocalRegex = regexp.MustCompile("(!)? --dst-type LOCAL")
|
|
var destIPRegex = regexp.MustCompile("(!)? -d ([^ ]*) ")
|
|
var destPortRegex = regexp.MustCompile(" --dport ([^ ]*) ")
|
|
var sourceIPRegex = regexp.MustCompile("(!)? -s ([^ ]*) ")
|
|
var affinityRegex = regexp.MustCompile(" --rcheck ")
|
|
|
|
// (If `--probability` appears, it can only appear before the `-j`, and if `--to-destination`
|
|
// appears it can only appear after the `-j`, so this is not as fragile as it looks.
|
|
var jumpRegex = regexp.MustCompile("(--probability.*)? -j ([^ ]*)( --to-destination (.*))?$")
|
|
|
|
func Test_iptablesTracerRegexps(t *testing.T) {
|
|
testCases := []struct {
|
|
name string
|
|
regex *regexp.Regexp
|
|
rule string
|
|
matches []string
|
|
}{
|
|
{
|
|
name: "addRuleToChainRegex",
|
|
regex: addRuleToChainRegex,
|
|
rule: `-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT`,
|
|
matches: []string{`-A KUBE-NODEPORTS `, "KUBE-NODEPORTS"},
|
|
},
|
|
{
|
|
name: "addRuleToChainRegex requires an actual rule, not just a chain name",
|
|
regex: addRuleToChainRegex,
|
|
rule: `-A KUBE-NODEPORTS`,
|
|
matches: nil,
|
|
},
|
|
{
|
|
name: "addRuleToChainRegex only matches adds",
|
|
regex: addRuleToChainRegex,
|
|
rule: `-D KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT`,
|
|
matches: nil,
|
|
},
|
|
{
|
|
name: "commentRegex with quoted comment",
|
|
regex: commentRegex,
|
|
rule: `-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT`,
|
|
matches: []string{`-m comment --comment "ns2/svc2:p80 health check node port" `, `"ns2/svc2:p80 health check node port"`},
|
|
},
|
|
{
|
|
name: "commentRegex with unquoted comment",
|
|
regex: commentRegex,
|
|
rule: `-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ`,
|
|
matches: []string{`-m comment --comment ns1/svc1:p80 `, "ns1/svc1:p80"},
|
|
},
|
|
{
|
|
name: "no comment",
|
|
regex: commentRegex,
|
|
rule: `-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000`,
|
|
matches: nil,
|
|
},
|
|
{
|
|
name: "moduleRegex",
|
|
regex: moduleRegex,
|
|
rule: `-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT`,
|
|
matches: []string{"-m comment", "comment"},
|
|
},
|
|
{
|
|
name: "local source",
|
|
regex: srcLocalRegex,
|
|
rule: `-A KUBE-XLB-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 LB IP" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ`,
|
|
matches: []string{" --src-type LOCAL", ""},
|
|
},
|
|
{
|
|
name: "not local destination",
|
|
regex: destLocalRegex,
|
|
rule: `-A RULE-TYPE-NOT-CURRENTLY-USED-BY-KUBE-PROXY -m addrtype ! --dst-type LOCAL -j KUBE-MARK-MASQ`,
|
|
matches: []string{"! --dst-type LOCAL", "!"},
|
|
},
|
|
{
|
|
name: "destination IP",
|
|
regex: destIPRegex,
|
|
rule: `-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O`,
|
|
matches: []string{" -d 172.30.0.41 ", "", "172.30.0.41"},
|
|
},
|
|
{
|
|
name: "destination port",
|
|
regex: destPortRegex,
|
|
rule: `-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O`,
|
|
matches: []string{" --dport 80 ", "80"},
|
|
},
|
|
{
|
|
name: "destination IP but no port",
|
|
regex: destPortRegex,
|
|
rule: `-A KUBE-SVC-XPGD46QRK7WJZT7O -d 172.30.0.41 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ`,
|
|
matches: nil,
|
|
},
|
|
{
|
|
name: "source IP",
|
|
regex: sourceIPRegex,
|
|
rule: `-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ`,
|
|
matches: []string{" -s 10.180.0.1 ", "", "10.180.0.1"},
|
|
},
|
|
{
|
|
name: "not source IP",
|
|
regex: sourceIPRegex,
|
|
rule: `-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ`,
|
|
matches: []string{"! -s 10.0.0.0/8 ", "!", "10.0.0.0/8"},
|
|
},
|
|
{
|
|
name: "affinityRegex",
|
|
regex: affinityRegex,
|
|
rule: `-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -m recent --name KUBE-SEP-SXIVWICOYRO3J4NJ --rcheck --seconds 10800 --reap -j KUBE-SEP-SXIVWICOYRO3J4NJ`,
|
|
matches: []string{" --rcheck "},
|
|
},
|
|
{
|
|
name: "jump to internal target",
|
|
regex: jumpRegex,
|
|
rule: `-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT`,
|
|
matches: []string{" -j ACCEPT", "", "ACCEPT", "", ""},
|
|
},
|
|
{
|
|
name: "jump to KUBE chain",
|
|
regex: jumpRegex,
|
|
rule: `-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment ns1/svc1:p80 -j KUBE-SEP-SXIVWICOYRO3J4NJ`,
|
|
matches: []string{" -j KUBE-SEP-SXIVWICOYRO3J4NJ", "", "KUBE-SEP-SXIVWICOYRO3J4NJ", "", ""},
|
|
},
|
|
{
|
|
name: "jump to DNAT",
|
|
regex: jumpRegex,
|
|
rule: `-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80`,
|
|
matches: []string{" -j DNAT --to-destination 10.180.0.1:80", "", "DNAT", " --to-destination 10.180.0.1:80", "10.180.0.1:80"},
|
|
},
|
|
{
|
|
name: "jump to endpoint",
|
|
regex: jumpRegex,
|
|
rule: `-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment ns4/svc4:p80 -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC`,
|
|
matches: []string{"--probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC", "--probability 0.5000000000", "KUBE-SEP-UKSFD7AGPMPPLUHC", "", ""},
|
|
},
|
|
}
|
|
|
|
for _, testCase := range testCases {
|
|
t.Run(testCase.name, func(t *testing.T) {
|
|
matches := testCase.regex.FindStringSubmatch(testCase.rule)
|
|
if !reflect.DeepEqual(matches, testCase.matches) {
|
|
t.Errorf("bad match: expected %#v, got %#v", testCase.matches, matches)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
// knownModules is the set of modules (ie "-m foo") that we allow to be present in rules passed
|
|
// to an iptablesTracer. If a rule using another module is found in a rule, the test will
|
|
// fail.
|
|
//
|
|
// If a module is in knownModules but is not in noMatchModules and is not handled by
|
|
// ruleMatches, then the result is that match rules using that module will have no effect
|
|
// for tracing purposes.
|
|
var knownModules = sets.NewString("addrtype", "comment", "conntrack", "mark", "recent", "statistic", "tcp", "udp")
|
|
|
|
// noMatchModules is the list of modules that if we see them in a rule, we just
|
|
// assume the rule doesn't match and ignore it (because rules with these modules exist
|
|
// in the data we are testing against, but aren't relevant to what we're testing).
|
|
var noMatchModules = sets.NewString("conntrack", "mark")
|
|
|
|
type iptablesChain []string
|
|
type iptablesTable map[string]iptablesChain
|
|
|
|
// iptablesTracer holds data used while virtually tracing a packet through a set of
|
|
// iptables rules
|
|
type iptablesTracer struct {
|
|
tables map[string]iptablesTable
|
|
nodeIP string
|
|
t *testing.T
|
|
|
|
// matches accumulates the list of rules that were matched, for debugging purposes.
|
|
matches []string
|
|
|
|
// outputs accumulates the list of matched terminal rule targets (endpoint
|
|
// IP:ports, or a special target like "REJECT") and is eventually used to generate
|
|
// the return value of tracePacket.
|
|
outputs []string
|
|
|
|
// markMasq and markDrop track whether the packet has been marked for masquerading
|
|
// or dropping.
|
|
markMasq bool
|
|
markDrop bool
|
|
}
|
|
|
|
// newIPTablesTracer creates an iptablesTracer. ruleData is an iptables rule dump (as with
|
|
// "iptables-save"). nodeIP is the IP to treat as the local node IP (for determining
|
|
// whether rules with "--src-type LOCAL" or "--dst-type LOCAL" match).
|
|
func newIPTablesTracer(t *testing.T, ruleData, nodeIP string) (*iptablesTracer, error) {
|
|
tables, err := parseIPTablesData(ruleData)
|
|
if err != nil {
|
|
return nil, err
|
|
}
|
|
|
|
tracer := &iptablesTracer{
|
|
tables: make(map[string]iptablesTable),
|
|
nodeIP: nodeIP,
|
|
t: t,
|
|
}
|
|
|
|
for name, rules := range tables {
|
|
table := make(iptablesTable)
|
|
for _, rule := range rules {
|
|
match := addRuleToChainRegex.FindStringSubmatch(rule)
|
|
if match != nil {
|
|
chainName := match[1]
|
|
table[chainName] = append(table[chainName], rule)
|
|
}
|
|
}
|
|
tracer.tables[name] = table
|
|
}
|
|
|
|
return tracer, nil
|
|
}
|
|
|
|
// ruleMatches checks if the given iptables rule matches (at least probabilistically) a
|
|
// packet with the given sourceIP, destIP, and destPort. (Note that protocol is currently
|
|
// ignored.)
|
|
func (tracer *iptablesTracer) ruleMatches(rule, sourceIP, destIP, destPort string) bool {
|
|
var match []string
|
|
|
|
// Delete comments so we don't mistakenly match something in a comment string later
|
|
rule = commentRegex.ReplaceAllString(rule, "")
|
|
|
|
// Make sure the rule only uses modules ("-m foo") that we are aware of
|
|
for _, matches := range moduleRegex.FindAllStringSubmatch(rule, -1) {
|
|
moduleName := matches[1]
|
|
if !knownModules.Has(moduleName) {
|
|
tracer.t.Errorf("Rule %q uses unknown iptables module %q", rule, moduleName)
|
|
}
|
|
if noMatchModules.Has(moduleName) {
|
|
// This rule is doing something irrelevant to iptablesTracer
|
|
return false
|
|
}
|
|
}
|
|
|
|
// The sub-rules within an iptables rule are ANDed together, so the rule only
|
|
// matches if all of them match. So go through the subrules, and if any of them
|
|
// DON'T match, then fail.
|
|
|
|
// Match local/non-local.
|
|
match = srcLocalRegex.FindStringSubmatch(rule)
|
|
if match != nil {
|
|
wantLocal := (match[1] != "!")
|
|
sourceIsLocal := (sourceIP == tracer.nodeIP || sourceIP == "127.0.0.1")
|
|
if wantLocal != sourceIsLocal {
|
|
return false
|
|
}
|
|
}
|
|
match = destLocalRegex.FindStringSubmatch(rule)
|
|
if match != nil {
|
|
wantLocal := (match[1] != "!")
|
|
destIsLocal := (destIP == tracer.nodeIP || destIP == "127.0.0.1")
|
|
if wantLocal != destIsLocal {
|
|
return false
|
|
}
|
|
}
|
|
|
|
// Match destination IP/port.
|
|
match = destIPRegex.FindStringSubmatch(rule)
|
|
if match != nil {
|
|
negated := match[1] == "!"
|
|
ruleAddress := match[2]
|
|
if !ruleMatchesIP(tracer.t, negated, ruleAddress, destIP) {
|
|
return false
|
|
}
|
|
}
|
|
match = destPortRegex.FindStringSubmatch(rule)
|
|
if match != nil {
|
|
rulePort := match[1]
|
|
if rulePort != destPort {
|
|
return false
|
|
}
|
|
}
|
|
|
|
// Match source IP (but not currently port)
|
|
match = sourceIPRegex.FindStringSubmatch(rule)
|
|
if match != nil {
|
|
negated := match[1] == "!"
|
|
ruleAddress := match[2]
|
|
if !ruleMatchesIP(tracer.t, negated, ruleAddress, sourceIP) {
|
|
return false
|
|
}
|
|
}
|
|
|
|
// The iptablesTracer has no state/history, so any rule that checks whether affinity
|
|
// has been established for a particular endpoint must not match.
|
|
if affinityRegex.MatchString(rule) {
|
|
return false
|
|
}
|
|
|
|
// Anything else is assumed to match
|
|
return true
|
|
}
|
|
|
|
// runChain runs the given packet through the rules in the given table and chain, updating
|
|
// tracer's internal state accordingly. It returns true if it hits a terminal action.
|
|
func (tracer *iptablesTracer) runChain(table, chain, sourceIP, destIP, destPort string) bool {
|
|
for _, rule := range tracer.tables[table][chain] {
|
|
match := jumpRegex.FindStringSubmatch(rule)
|
|
if match == nil {
|
|
// You _can_ have rules that don't end in `-j`, but we don't currently
|
|
// do that.
|
|
tracer.t.Errorf("Could not find jump target in rule %q", rule)
|
|
}
|
|
isProbabilisticMatch := (match[1] != "")
|
|
target := match[2]
|
|
natDestination := match[4]
|
|
|
|
if !tracer.ruleMatches(rule, sourceIP, destIP, destPort) {
|
|
continue
|
|
}
|
|
// record the matched rule for debugging purposes
|
|
tracer.matches = append(tracer.matches, rule)
|
|
|
|
switch target {
|
|
case "KUBE-MARK-MASQ":
|
|
tracer.markMasq = true
|
|
continue
|
|
|
|
case "KUBE-MARK-DROP":
|
|
tracer.markDrop = true
|
|
continue
|
|
|
|
case "ACCEPT", "REJECT":
|
|
// (only valid in filter)
|
|
tracer.outputs = append(tracer.outputs, target)
|
|
return true
|
|
|
|
case "DNAT":
|
|
// (only valid in nat)
|
|
tracer.outputs = append(tracer.outputs, natDestination)
|
|
return true
|
|
|
|
default:
|
|
// We got a "-j KUBE-SOMETHING", so process that chain
|
|
terminated := tracer.runChain(table, target, sourceIP, destIP, destPort)
|
|
|
|
// If the subchain hit a terminal rule AND the rule that sent us
|
|
// to that chain was non-probabilistic, then this chain terminates
|
|
// as well. But if we went there because of a --probability rule,
|
|
// then we want to keep accumulating further matches against this
|
|
// chain.
|
|
if terminated && !isProbabilisticMatch {
|
|
return true
|
|
}
|
|
}
|
|
}
|
|
|
|
return false
|
|
}
|
|
|
|
// tracePacket determines what would happen to a packet with the given sourceIP, destIP,
|
|
// and destPort, given the indicated iptables ruleData. nodeIP is the local node IP (for
|
|
// rules matching "LOCAL").
|
|
//
|
|
// The return values are: an array of matched rules (for debugging), the final packet
|
|
// destinations (a comma-separated list of IPs, or one of the special targets "ACCEPT",
|
|
// "DROP", or "REJECT"), and whether the packet would be masqueraded.
|
|
func tracePacket(t *testing.T, ruleData, sourceIP, destIP, destPort, nodeIP string) ([]string, string, bool) {
|
|
tracer, err := newIPTablesTracer(t, ruleData, nodeIP)
|
|
if err != nil {
|
|
t.Errorf("Bad iptables ruleData: %v", err)
|
|
}
|
|
|
|
// nat:PREROUTING goes first, then the filter chains, then nat:POSTROUTING. For our
|
|
// purposes that means we run through the "nat" chains first, starting from the top of
|
|
// KUBE-SERVICES, then we do the "filter" chains. The only interesting thing that
|
|
// happens in nat:POSTROUTING is that the masquerade mark gets turned into actual
|
|
// masquerading.
|
|
|
|
// FIXME: we ought to be able to say
|
|
// trace.runChain("nat", "PREROUTING", ...)
|
|
// here instead of
|
|
// trace.runChain("nat", "KUBE-SERVICES", ...)
|
|
// (and similarly below with the "filter" chains) but this doesn't work because the
|
|
// rules like "-A PREROUTING -j KUBE-SERVICES" are created with iptables.EnsureRule(),
|
|
// which iptablestest.FakeIPTables doesn't implement, so those rules will be missing
|
|
// from the ruleData we have. So we have to explicitly specify each kube-proxy chain
|
|
// we want to run through here.
|
|
tracer.runChain("nat", "KUBE-SERVICES", sourceIP, destIP, destPort)
|
|
|
|
// Process pending DNAT (which theoretically might affect REJECT/ACCEPT filter rules)
|
|
if len(tracer.outputs) != 0 {
|
|
destIP = strings.Split(tracer.outputs[0], ":")[0]
|
|
}
|
|
|
|
// Now run the filter rules to see if the packet is REJECTed or ACCEPTed. The DROP
|
|
// rule is created by kubelet, not us, so we have to simulate that manually
|
|
if tracer.markDrop {
|
|
return tracer.matches, "DROP", false
|
|
}
|
|
tracer.runChain("filter", "KUBE-SERVICES", sourceIP, destIP, destPort)
|
|
tracer.runChain("filter", "KUBE-EXTERNAL-SERVICES", sourceIP, destIP, destPort)
|
|
tracer.runChain("filter", "KUBE-NODEPORTS", sourceIP, destIP, destPort)
|
|
|
|
return tracer.matches, strings.Join(tracer.outputs, ", "), tracer.markMasq
|
|
}
|
|
|
|
type packetFlowTest struct {
|
|
name string
|
|
sourceIP string
|
|
destIP string
|
|
destPort int
|
|
output string
|
|
masq bool
|
|
}
|
|
|
|
func runPacketFlowTests(t *testing.T, line int, ruleData, nodeIP string, testCases []packetFlowTest) {
|
|
lineStr := ""
|
|
if line != 0 {
|
|
lineStr = fmt.Sprintf(" (from line %d)", line)
|
|
}
|
|
for _, tc := range testCases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
matches, output, masq := tracePacket(t, ruleData, tc.sourceIP, tc.destIP, fmt.Sprintf("%d", tc.destPort), nodeIP)
|
|
var errors []string
|
|
if output != tc.output {
|
|
errors = append(errors, fmt.Sprintf("wrong output: expected %q got %q", tc.output, output))
|
|
}
|
|
if masq != tc.masq {
|
|
errors = append(errors, fmt.Sprintf("wrong masq: expected %v got %v", tc.masq, masq))
|
|
}
|
|
if errors != nil {
|
|
t.Errorf("Test %q of a packet from %s to %s:%d%s got result:\n%s\n\nBy matching:\n%s\n\n",
|
|
tc.name, tc.sourceIP, tc.destIP, tc.destPort, lineStr, strings.Join(errors, "\n"), strings.Join(matches, "\n"))
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
// This tests tracePackets against static data, just to make sure we match things in the
|
|
// way we expect to.
|
|
func TestTracePackets(t *testing.T) {
|
|
rules := dedent.Dedent(`
|
|
*filter
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j REJECT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-EXT-4SW47YFZTEDKD3PK - [0:0]
|
|
:KUBE-EXT-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-EXT-PAZTZYUUMV5KCDZL - [0:0]
|
|
:KUBE-EXT-X27LE4BHSL4DOUIK - [0:0]
|
|
:KUBE-FW-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
|
|
:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
|
|
:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SVL-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.33 --dport 80 -j KUBE-EXT-4SW47YFZTEDKD3PK
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-EXT-4SW47YFZTEDKD3PK -m comment --comment "masquerade traffic for ns4/svc4:p80 external destinations" -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-4SW47YFZTEDKD3PK -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "pod traffic for ns2/svc2:p80 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -j KUBE-SVL-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.180.0.4:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.180.0.5:80" -j KUBE-SEP-C6EBXVWJJZMIWKLZ
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 -> 10.180.0.2:80" -j KUBE-SEP-RS4RBKLTHTF2IUXJ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SVL-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 has no local endpoints" -j KUBE-MARK-DROP
|
|
COMMIT
|
|
`)
|
|
|
|
runPacketFlowTests(t, getLine(), rules, testNodeIP, []packetFlowTest{
|
|
{
|
|
name: "no match",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "10.0.0.3",
|
|
destPort: 80,
|
|
output: "",
|
|
},
|
|
{
|
|
name: "single endpoint",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.0.41",
|
|
destPort: 80,
|
|
output: "10.180.0.1:80",
|
|
},
|
|
{
|
|
name: "multiple endpoints",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.0.44",
|
|
destPort: 80,
|
|
output: "10.180.0.4:80, 10.180.0.5:80",
|
|
},
|
|
{
|
|
name: "LOCAL, KUBE-MARK-MASQ",
|
|
sourceIP: testNodeIP,
|
|
destIP: "192.168.99.22",
|
|
destPort: 80,
|
|
output: "10.180.0.2:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "KUBE-MARK-DROP",
|
|
sourceIP: testExternalClient,
|
|
destIP: "192.168.99.22",
|
|
destPort: 80,
|
|
output: "DROP",
|
|
},
|
|
{
|
|
name: "ACCEPT (NodePortHealthCheck)",
|
|
sourceIP: testNodeIP,
|
|
destIP: testNodeIP,
|
|
destPort: 30000,
|
|
output: "ACCEPT",
|
|
},
|
|
{
|
|
name: "REJECT",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.0.43",
|
|
destPort: 80,
|
|
output: "REJECT",
|
|
},
|
|
})
|
|
}
|
|
|
|
// TestOverallIPTablesRulesWithMultipleServices creates 4 types of services: ClusterIP,
|
|
// LoadBalancer, ExternalIP and NodePort and verifies if the NAT table rules created
|
|
// are exactly the same as what is expected. This test provides an overall view of how
|
|
// the NAT table rules look like with the different jumps.
|
|
func TestOverallIPTablesRulesWithMultipleServices(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
metrics.RegisterMetrics()
|
|
tcpProtocol := v1.ProtocolTCP
|
|
|
|
makeServiceMap(fp,
|
|
// create ClusterIP service
|
|
makeTestService("ns1", "svc1", func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = "172.30.0.41"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
}}
|
|
}),
|
|
// create LoadBalancer service
|
|
makeTestService("ns2", "svc2", func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
svc.Spec.ClusterIP = "172.30.0.42"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: 3001,
|
|
}}
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: "1.2.3.4",
|
|
}}
|
|
// Also ensure that invalid LoadBalancerSourceRanges will not result
|
|
// in a crash.
|
|
svc.Spec.ExternalIPs = []string{"192.168.99.22"}
|
|
svc.Spec.LoadBalancerSourceRanges = []string{" 203.0.113.0/25"}
|
|
svc.Spec.HealthCheckNodePort = 30000
|
|
}),
|
|
// create LoadBalancer service with Cluster traffic policy and no source ranges
|
|
makeTestService("ns2b", "svc2b", func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeCluster
|
|
svc.Spec.ClusterIP = "172.30.0.43"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: 3002,
|
|
}}
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: "5.6.7.8",
|
|
}}
|
|
svc.Spec.HealthCheckNodePort = 30000
|
|
}),
|
|
// create NodePort service
|
|
makeTestService("ns3", "svc3", func(svc *v1.Service) {
|
|
svc.Spec.Type = "NodePort"
|
|
svc.Spec.ClusterIP = "172.30.0.43"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: 3003,
|
|
}}
|
|
}),
|
|
// create ExternalIP service
|
|
makeTestService("ns4", "svc4", func(svc *v1.Service) {
|
|
svc.Spec.Type = "NodePort"
|
|
svc.Spec.ClusterIP = "172.30.0.44"
|
|
svc.Spec.ExternalIPs = []string{"192.168.99.33"}
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
TargetPort: intstr.FromInt(80),
|
|
}}
|
|
}),
|
|
)
|
|
populateEndpointSlices(fp,
|
|
// create ClusterIP service endpoints
|
|
makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.180.0.1"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
// create Local LoadBalancer endpoints. Note that since we aren't setting
|
|
// its NodeName, this endpoint will be considered non-local and ignored.
|
|
makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.180.0.2"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
// create Cluster LoadBalancer endpoints
|
|
makeTestEndpointSlice("ns2b", "svc2b", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.180.0.3"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
// create NodePort service endpoints
|
|
makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.180.0.3"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
// create ExternalIP service endpoints
|
|
makeTestEndpointSlice("ns4", "svc4", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.180.0.4"},
|
|
}, {
|
|
Addresses: []string{"10.180.0.5"},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns2/svc2:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXT-4SW47YFZTEDKD3PK - [0:0]
|
|
:KUBE-EXT-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-EXT-PAZTZYUUMV5KCDZL - [0:0]
|
|
:KUBE-EXT-X27LE4BHSL4DOUIK - [0:0]
|
|
:KUBE-FW-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-C6EBXVWJJZMIWKLZ - [0:0]
|
|
:KUBE-SEP-OYPFS5VJICHGATKP - [0:0]
|
|
:KUBE-SEP-QDCEFMBQEGWIV4VT - [0:0]
|
|
:KUBE-SEP-RS4RBKLTHTF2IUXJ - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SEP-UKSFD7AGPMPPLUHC - [0:0]
|
|
:KUBE-SVC-4SW47YFZTEDKD3PK - [0:0]
|
|
:KUBE-SVC-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
:KUBE-SVC-PAZTZYUUMV5KCDZL - [0:0]
|
|
:KUBE-SVC-X27LE4BHSL4DOUIK - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SVL-GNZBNJ2PO5MGZ6GT - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment ns2/svc2:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-NODEPORTS -m comment --comment ns2b/svc2b:p80 -m tcp -p tcp --dport 3002 -j KUBE-EXT-PAZTZYUUMV5KCDZL
|
|
-A KUBE-NODEPORTS -m comment --comment ns3/svc3:p80 -m tcp -p tcp --dport 3003 -j KUBE-EXT-X27LE4BHSL4DOUIK
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 external IP" -m tcp -p tcp -d 192.168.99.22 --dport 80 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2/svc2:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-SERVICES -m comment --comment "ns2b/svc2b:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-PAZTZYUUMV5KCDZL
|
|
-A KUBE-SERVICES -m comment --comment "ns2b/svc2b:p80 loadbalancer IP" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j KUBE-EXT-PAZTZYUUMV5KCDZL
|
|
-A KUBE-SERVICES -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 -j KUBE-SVC-X27LE4BHSL4DOUIK
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-SERVICES -m comment --comment "ns4/svc4:p80 external IP" -m tcp -p tcp -d 192.168.99.33 --dport 80 -j KUBE-EXT-4SW47YFZTEDKD3PK
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-EXT-4SW47YFZTEDKD3PK -m comment --comment "masquerade traffic for ns4/svc4:p80 external destinations" -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-4SW47YFZTEDKD3PK -j KUBE-SVC-4SW47YFZTEDKD3PK
|
|
-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "pod traffic for ns2/svc2:p80 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "masquerade LOCAL traffic for ns2/svc2:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -m comment --comment "route LOCAL traffic for ns2/svc2:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-EXT-GNZBNJ2PO5MGZ6GT -j KUBE-SVL-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-EXT-PAZTZYUUMV5KCDZL -m comment --comment "masquerade traffic for ns2b/svc2b:p80 external destinations" -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-PAZTZYUUMV5KCDZL -j KUBE-SVC-PAZTZYUUMV5KCDZL
|
|
-A KUBE-EXT-X27LE4BHSL4DOUIK -m comment --comment "masquerade traffic for ns3/svc3:p80 external destinations" -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-X27LE4BHSL4DOUIK -j KUBE-SVC-X27LE4BHSL4DOUIK
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-GNZBNJ2PO5MGZ6GT
|
|
-A KUBE-FW-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -s 10.180.0.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-C6EBXVWJJZMIWKLZ -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.5:80
|
|
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-OYPFS5VJICHGATKP -m comment --comment ns3/svc3:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
|
|
-A KUBE-SEP-QDCEFMBQEGWIV4VT -m comment --comment ns2b/svc2b:p80 -s 10.180.0.3 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-QDCEFMBQEGWIV4VT -m comment --comment ns2b/svc2b:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.3:80
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -s 10.180.0.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-RS4RBKLTHTF2IUXJ -m comment --comment ns2/svc2:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.2:80
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -s 10.180.0.4 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-UKSFD7AGPMPPLUHC -m comment --comment ns4/svc4:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.4:80
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 cluster IP" -m tcp -p tcp -d 172.30.0.44 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.180.0.4:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-UKSFD7AGPMPPLUHC
|
|
-A KUBE-SVC-4SW47YFZTEDKD3PK -m comment --comment "ns4/svc4:p80 -> 10.180.0.5:80" -j KUBE-SEP-C6EBXVWJJZMIWKLZ
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 cluster IP" -m tcp -p tcp -d 172.30.0.42 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 -> 10.180.0.2:80" -j KUBE-SEP-RS4RBKLTHTF2IUXJ
|
|
-A KUBE-SVC-PAZTZYUUMV5KCDZL -m comment --comment "ns2b/svc2b:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-PAZTZYUUMV5KCDZL -m comment --comment "ns2b/svc2b:p80 -> 10.180.0.3:80" -j KUBE-SEP-QDCEFMBQEGWIV4VT
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 cluster IP" -m tcp -p tcp -d 172.30.0.43 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-X27LE4BHSL4DOUIK -m comment --comment "ns3/svc3:p80 -> 10.180.0.3:80" -j KUBE-SEP-OYPFS5VJICHGATKP
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SVL-GNZBNJ2PO5MGZ6GT -m comment --comment "ns2/svc2:p80 has no local endpoints" -j KUBE-MARK-DROP
|
|
COMMIT
|
|
`)
|
|
|
|
assertIPTablesRulesEqual(t, getLine(), expected, fp.iptablesData.String())
|
|
|
|
natRulesMetric, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)))
|
|
if err != nil {
|
|
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
|
|
}
|
|
nNatRules := int(natRulesMetric)
|
|
|
|
expectedNatRules := countRules("nat", fp.iptablesData.String())
|
|
|
|
if nNatRules != expectedNatRules {
|
|
t.Fatalf("Wrong number of nat rules: expected %d received %d", expectedNatRules, nNatRules)
|
|
}
|
|
}
|
|
|
|
func TestClusterIPReject(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
}}
|
|
}),
|
|
)
|
|
fp.syncProxyRules()
|
|
|
|
expected := dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j REJECT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
COMMIT
|
|
`)
|
|
|
|
assertIPTablesRulesEqual(t, getLine(), expected, fp.iptablesData.String())
|
|
|
|
runPacketFlowTests(t, getLine(), fp.iptablesData.String(), testNodeIP, []packetFlowTest{
|
|
{
|
|
name: "cluster IP rejected",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.0.41",
|
|
destPort: 80,
|
|
output: "REJECT",
|
|
},
|
|
})
|
|
}
|
|
|
|
func TestClusterIPEndpointsJump(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
epIP := "10.180.0.1"
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
COMMIT
|
|
`)
|
|
assertIPTablesRulesEqual(t, getLine(), expected, fp.iptablesData.String())
|
|
|
|
runPacketFlowTests(t, getLine(), fp.iptablesData.String(), testNodeIP, []packetFlowTest{
|
|
{
|
|
name: "cluster IP accepted",
|
|
sourceIP: "10.180.0.2",
|
|
destIP: "172.30.0.41",
|
|
destPort: 80,
|
|
output: "10.180.0.1:80",
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "hairpin to cluster IP",
|
|
sourceIP: "10.180.0.1",
|
|
destIP: "172.30.0.41",
|
|
destPort: 80,
|
|
output: "10.180.0.1:80",
|
|
masq: true,
|
|
},
|
|
})
|
|
}
|
|
|
|
func TestLoadBalancer(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcLBIP1 := "1.2.3.4"
|
|
svcLBIP2 := "5.6.7.8"
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{
|
|
{IP: svcLBIP1},
|
|
{IP: svcLBIP2},
|
|
}
|
|
svc.Spec.LoadBalancerSourceRanges = []string{
|
|
"192.168.0.0/24",
|
|
|
|
// Regression test that excess whitespace gets ignored
|
|
" 203.0.113.0/25",
|
|
}
|
|
}),
|
|
)
|
|
|
|
epIP := "10.180.0.1"
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXT-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-FW-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-FW-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 loadbalancer IP" -m tcp -p tcp -d 5.6.7.8 --dport 80 -j KUBE-FW-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade traffic for ns1/svc1:p80 external destinations" -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -s 192.168.0.0/24 -j KUBE-EXT-XPGD46QRK7WJZT7O
|
|
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -s 203.0.113.0/25 -j KUBE-EXT-XPGD46QRK7WJZT7O
|
|
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -s 1.2.3.4 -j KUBE-EXT-XPGD46QRK7WJZT7O
|
|
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -s 5.6.7.8 -j KUBE-EXT-XPGD46QRK7WJZT7O
|
|
-A KUBE-FW-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 loadbalancer IP" -j KUBE-MARK-DROP
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
COMMIT
|
|
`)
|
|
|
|
assertIPTablesRulesEqual(t, getLine(), expected, fp.iptablesData.String())
|
|
|
|
runPacketFlowTests(t, getLine(), fp.iptablesData.String(), testNodeIP, []packetFlowTest{
|
|
{
|
|
name: "pod to cluster IP",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: svcIP,
|
|
destPort: svcPort,
|
|
output: fmt.Sprintf("%s:%d", epIP, svcPort),
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "external to nodePort",
|
|
sourceIP: testExternalClient,
|
|
destIP: testNodeIP,
|
|
destPort: svcNodePort,
|
|
output: fmt.Sprintf("%s:%d", epIP, svcPort),
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "nodePort bypasses LoadBalancerSourceRanges",
|
|
sourceIP: testExternalClientBlocked,
|
|
destIP: testNodeIP,
|
|
destPort: svcNodePort,
|
|
output: fmt.Sprintf("%s:%d", epIP, svcPort),
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "accepted external to LB1",
|
|
sourceIP: testExternalClient,
|
|
destIP: svcLBIP1,
|
|
destPort: svcPort,
|
|
output: fmt.Sprintf("%s:%d", epIP, svcPort),
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "accepted external to LB2",
|
|
sourceIP: testExternalClient,
|
|
destIP: svcLBIP2,
|
|
destPort: svcPort,
|
|
output: fmt.Sprintf("%s:%d", epIP, svcPort),
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "blocked external to LB1",
|
|
sourceIP: testExternalClientBlocked,
|
|
destIP: svcLBIP1,
|
|
destPort: svcPort,
|
|
output: "DROP",
|
|
},
|
|
{
|
|
name: "blocked external to LB2",
|
|
sourceIP: testExternalClientBlocked,
|
|
destIP: svcLBIP2,
|
|
destPort: svcPort,
|
|
output: "DROP",
|
|
},
|
|
{
|
|
name: "pod to LB1 (blocked by LoadBalancerSourceRanges)",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: svcLBIP1,
|
|
destPort: svcPort,
|
|
output: "DROP",
|
|
},
|
|
{
|
|
name: "pod to LB2 (blocked by LoadBalancerSourceRanges)",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: svcLBIP2,
|
|
destPort: svcPort,
|
|
output: "DROP",
|
|
},
|
|
{
|
|
name: "node to LB1 (allowed by LoadBalancerSourceRanges)",
|
|
sourceIP: testNodeIP,
|
|
destIP: svcLBIP1,
|
|
destPort: svcPort,
|
|
output: fmt.Sprintf("%s:%d", epIP, svcPort),
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "node to LB2 (allowed by LoadBalancerSourceRanges)",
|
|
sourceIP: testNodeIP,
|
|
destIP: svcLBIP2,
|
|
destPort: svcPort,
|
|
output: fmt.Sprintf("%s:%d", epIP, svcPort),
|
|
masq: true,
|
|
},
|
|
|
|
// The LB rules assume that when you connect from a node to a LB IP, that
|
|
// something external to kube-proxy will cause the connection to be
|
|
// SNATted to the LB IP, so if the LoadBalancerSourceRanges include the
|
|
// node IP, then we add a rule allowing traffic from the LB IP as well...
|
|
{
|
|
name: "same node to LB1, SNATted to LB1 (implicitly allowed)",
|
|
sourceIP: svcLBIP1,
|
|
destIP: svcLBIP1,
|
|
destPort: svcPort,
|
|
output: fmt.Sprintf("%s:%d", epIP, svcPort),
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "same node to LB2, SNATted to LB2 (implicitly allowed)",
|
|
sourceIP: svcLBIP2,
|
|
destIP: svcLBIP2,
|
|
destPort: svcPort,
|
|
output: fmt.Sprintf("%s:%d", epIP, svcPort),
|
|
masq: true,
|
|
},
|
|
})
|
|
}
|
|
|
|
func TestNodePort(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "NodePort"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
}),
|
|
)
|
|
|
|
epIP := "10.180.0.1"
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXT-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade traffic for ns1/svc1:p80 external destinations" -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
COMMIT
|
|
`)
|
|
assertIPTablesRulesEqual(t, getLine(), expected, fp.iptablesData.String())
|
|
|
|
runPacketFlowTests(t, getLine(), fp.iptablesData.String(), testNodeIP, []packetFlowTest{
|
|
{
|
|
name: "pod to cluster IP",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: svcIP,
|
|
destPort: svcPort,
|
|
output: fmt.Sprintf("%s:%d", epIP, svcPort),
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "external to nodePort",
|
|
sourceIP: testExternalClient,
|
|
destIP: testNodeIP,
|
|
destPort: svcNodePort,
|
|
output: fmt.Sprintf("%s:%d", epIP, svcPort),
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "node to nodePort",
|
|
sourceIP: testNodeIP,
|
|
destIP: testNodeIP,
|
|
destPort: svcNodePort,
|
|
output: fmt.Sprintf("%s:%d", epIP, svcPort),
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "localhost to nodePort gets masqueraded",
|
|
sourceIP: "127.0.0.1",
|
|
destIP: "127.0.0.1",
|
|
destPort: svcNodePort,
|
|
output: fmt.Sprintf("%s:%d", epIP, svcPort),
|
|
masq: true,
|
|
},
|
|
})
|
|
}
|
|
|
|
func TestHealthCheckNodePort(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.nodePortAddresses = []string{"127.0.0.0/8"}
|
|
|
|
svcIP := "172.30.0.42"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcHealthCheckNodePort := 30000
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
|
|
svc := makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
svc.Spec.HealthCheckNodePort = int32(svcHealthCheckNodePort)
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
})
|
|
makeServiceMap(fp, svc)
|
|
fp.syncProxyRules()
|
|
|
|
expected := dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns1/svc1:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.42 --dport 80 -j REJECT
|
|
-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j REJECT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -d 127.0.0.1 -j KUBE-NODEPORTS
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
COMMIT
|
|
`)
|
|
|
|
assertIPTablesRulesEqual(t, getLine(), expected, fp.iptablesData.String())
|
|
|
|
runPacketFlowTests(t, getLine(), fp.iptablesData.String(), testNodeIP, []packetFlowTest{
|
|
{
|
|
name: "firewall accepts HealthCheckNodePort",
|
|
sourceIP: "1.2.3.4",
|
|
destIP: testNodeIP,
|
|
destPort: svcHealthCheckNodePort,
|
|
output: "ACCEPT",
|
|
masq: false,
|
|
},
|
|
})
|
|
|
|
fp.OnServiceDelete(svc)
|
|
fp.syncProxyRules()
|
|
|
|
runPacketFlowTests(t, getLine(), fp.iptablesData.String(), testNodeIP, []packetFlowTest{
|
|
{
|
|
name: "HealthCheckNodePort no longer has any rule",
|
|
sourceIP: "1.2.3.4",
|
|
destIP: testNodeIP,
|
|
destPort: svcHealthCheckNodePort,
|
|
output: "",
|
|
},
|
|
})
|
|
}
|
|
|
|
func TestMasqueradeRule(t *testing.T) {
|
|
for _, testcase := range []bool{false, true} {
|
|
ipt := iptablestest.NewFake().SetHasRandomFully(testcase)
|
|
fp := NewFakeProxier(ipt)
|
|
fp.syncProxyRules()
|
|
|
|
expectedFmt := dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE%s
|
|
COMMIT
|
|
`)
|
|
var expected string
|
|
if testcase {
|
|
expected = fmt.Sprintf(expectedFmt, " --random-fully")
|
|
} else {
|
|
expected = fmt.Sprintf(expectedFmt, "")
|
|
}
|
|
assertIPTablesRulesEqual(t, getLine(), expected, fp.iptablesData.String())
|
|
}
|
|
}
|
|
|
|
func TestExternalIPsReject(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcExternalIPs := "192.168.99.11"
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "ClusterIP"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.ExternalIPs = []string{svcExternalIPs}
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
TargetPort: intstr.FromInt(svcPort),
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j REJECT
|
|
-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 192.168.99.11 --dport 80 -j REJECT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
COMMIT
|
|
`)
|
|
assertIPTablesRulesEqual(t, getLine(), expected, fp.iptablesData.String())
|
|
|
|
runPacketFlowTests(t, getLine(), fp.iptablesData.String(), testNodeIP, []packetFlowTest{
|
|
{
|
|
name: "cluster IP with no endpoints",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: svcIP,
|
|
destPort: svcPort,
|
|
output: "REJECT",
|
|
},
|
|
{
|
|
name: "external IP with no endpoints",
|
|
sourceIP: testExternalClient,
|
|
destIP: svcExternalIPs,
|
|
destPort: svcPort,
|
|
output: "REJECT",
|
|
},
|
|
})
|
|
}
|
|
|
|
func TestOnlyLocalExternalIPs(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcExternalIPs := "192.168.99.11"
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "NodePort"
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.ExternalIPs = []string{svcExternalIPs}
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
TargetPort: intstr.FromInt(svcPort),
|
|
}}
|
|
}),
|
|
)
|
|
epIP1 := "10.180.0.1"
|
|
epIP2 := "10.180.2.1"
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP1},
|
|
}, {
|
|
Addresses: []string{epIP2},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXT-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SEP-ZX7GRIZKSNUQ3LAJ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SVL-XPGD46QRK7WJZT7O - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 external IP" -m tcp -p tcp -d 192.168.99.11 --dport 80 -j KUBE-EXT-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "pod traffic for ns1/svc1:p80 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "route LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVL-XPGD46QRK7WJZT7O
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -s 10.180.2.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.2.1:80
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
-A KUBE-SVL-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
COMMIT
|
|
`)
|
|
assertIPTablesRulesEqual(t, getLine(), expected, fp.iptablesData.String())
|
|
|
|
runPacketFlowTests(t, getLine(), fp.iptablesData.String(), testNodeIP, []packetFlowTest{
|
|
{
|
|
name: "cluster IP hits both endpoints",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: svcIP,
|
|
destPort: svcPort,
|
|
output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "external IP hits only local endpoint, unmasqueraded",
|
|
sourceIP: testExternalClient,
|
|
destIP: svcExternalIPs,
|
|
destPort: svcPort,
|
|
output: fmt.Sprintf("%s:%d", epIP2, svcPort),
|
|
masq: false,
|
|
},
|
|
})
|
|
}
|
|
|
|
// TestNonLocalExternalIPs tests if we add the masquerade rule into svcChain in order to
|
|
// SNAT packets to external IPs if externalTrafficPolicy is cluster and the traffic is NOT Local.
|
|
func TestNonLocalExternalIPs(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcExternalIPs := "192.168.99.11"
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.ExternalIPs = []string{svcExternalIPs}
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
TargetPort: intstr.FromInt(svcPort),
|
|
}}
|
|
}),
|
|
)
|
|
epIP1 := "10.180.0.1"
|
|
epIP2 := "10.180.2.1"
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP1},
|
|
NodeName: nil,
|
|
}, {
|
|
Addresses: []string{epIP2},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXT-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SEP-ZX7GRIZKSNUQ3LAJ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 external IP" -m tcp -p tcp -d 192.168.99.11 --dport 80 -j KUBE-EXT-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade traffic for ns1/svc1:p80 external destinations" -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -s 10.180.2.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.2.1:80
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
COMMIT
|
|
`)
|
|
assertIPTablesRulesEqual(t, getLine(), expected, fp.iptablesData.String())
|
|
|
|
runPacketFlowTests(t, getLine(), fp.iptablesData.String(), testNodeIP, []packetFlowTest{
|
|
{
|
|
name: "pod to cluster IP",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: svcIP,
|
|
destPort: svcPort,
|
|
output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "external to external IP",
|
|
sourceIP: testExternalClient,
|
|
destIP: svcExternalIPs,
|
|
destPort: svcPort,
|
|
output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
|
|
masq: true,
|
|
},
|
|
})
|
|
}
|
|
|
|
func TestNodePortReject(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "NodePort"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j REJECT
|
|
-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j REJECT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
COMMIT
|
|
`)
|
|
assertIPTablesRulesEqual(t, getLine(), expected, fp.iptablesData.String())
|
|
|
|
runPacketFlowTests(t, getLine(), fp.iptablesData.String(), testNodeIP, []packetFlowTest{
|
|
{
|
|
name: "pod to cluster IP",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: svcIP,
|
|
destPort: svcPort,
|
|
output: "REJECT",
|
|
},
|
|
{
|
|
name: "pod to NodePort",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: testNodeIP,
|
|
destPort: svcNodePort,
|
|
output: "REJECT",
|
|
},
|
|
{
|
|
name: "external to NodePort",
|
|
sourceIP: testExternalClient,
|
|
destIP: testNodeIP,
|
|
destPort: svcNodePort,
|
|
output: "REJECT",
|
|
},
|
|
})
|
|
}
|
|
|
|
func TestLoadBalancerReject(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcHealthCheckNodePort := 30000
|
|
svcLBIP := "1.2.3.4"
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
svcSessionAffinityTimeout := int32(10800)
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
svc.Spec.HealthCheckNodePort = int32(svcHealthCheckNodePort)
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: svcLBIP,
|
|
}}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
|
|
svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{
|
|
ClientIP: &v1.ClientIPConfig{TimeoutSeconds: &svcSessionAffinityTimeout},
|
|
}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns1/svc1:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j REJECT
|
|
-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j REJECT
|
|
-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1:p80 has no endpoints" -m addrtype --dst-type LOCAL -m tcp -p tcp --dport 3001 -j REJECT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
COMMIT
|
|
`)
|
|
assertIPTablesRulesEqual(t, getLine(), expected, fp.iptablesData.String())
|
|
|
|
runPacketFlowTests(t, getLine(), fp.iptablesData.String(), testNodeIP, []packetFlowTest{
|
|
{
|
|
name: "pod to cluster IP",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: svcIP,
|
|
destPort: svcPort,
|
|
output: "REJECT",
|
|
},
|
|
{
|
|
name: "pod to LoadBalancer IP",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: svcLBIP,
|
|
destPort: svcPort,
|
|
output: "REJECT",
|
|
},
|
|
{
|
|
name: "external to LoadBalancer IP",
|
|
sourceIP: testExternalClient,
|
|
destIP: svcLBIP,
|
|
destPort: svcPort,
|
|
output: "REJECT",
|
|
},
|
|
})
|
|
}
|
|
|
|
func TestOnlyLocalLoadBalancing(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcHealthCheckNodePort := 30000
|
|
svcLBIP := "1.2.3.4"
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
svcSessionAffinityTimeout := int32(10800)
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
svc.Spec.HealthCheckNodePort = int32(svcHealthCheckNodePort)
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: svcLBIP,
|
|
}}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
svc.Spec.SessionAffinity = v1.ServiceAffinityClientIP
|
|
svc.Spec.SessionAffinityConfig = &v1.SessionAffinityConfig{
|
|
ClientIP: &v1.ClientIPConfig{TimeoutSeconds: &svcSessionAffinityTimeout},
|
|
}
|
|
}),
|
|
)
|
|
|
|
epIP1 := "10.180.0.1"
|
|
epIP2 := "10.180.2.1"
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP1},
|
|
}, {
|
|
Addresses: []string{epIP2},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expected := dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns1/svc1:p80 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXT-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SEP-ZX7GRIZKSNUQ3LAJ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SVL-XPGD46QRK7WJZT7O - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "pod traffic for ns1/svc1:p80 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "route LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVL-XPGD46QRK7WJZT7O
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m recent --name KUBE-SEP-SXIVWICOYRO3J4NJ --set -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -s 10.180.2.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -m recent --name KUBE-SEP-ZX7GRIZKSNUQ3LAJ --set -m tcp -p tcp -j DNAT --to-destination 10.180.2.1:80
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -m recent --name KUBE-SEP-SXIVWICOYRO3J4NJ --rcheck --seconds 10800 --reap -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -m recent --name KUBE-SEP-ZX7GRIZKSNUQ3LAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
-A KUBE-SVL-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -m recent --name KUBE-SEP-ZX7GRIZKSNUQ3LAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
-A KUBE-SVL-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
COMMIT
|
|
`)
|
|
assertIPTablesRulesEqual(t, getLine(), expected, fp.iptablesData.String())
|
|
|
|
runPacketFlowTests(t, getLine(), fp.iptablesData.String(), testNodeIP, []packetFlowTest{
|
|
{
|
|
name: "pod to cluster IP hits both endpoints",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: svcIP,
|
|
destPort: svcPort,
|
|
output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "external to LB IP hits only local endpoint, unmasqueraded",
|
|
sourceIP: testExternalClient,
|
|
destIP: svcLBIP,
|
|
destPort: svcPort,
|
|
output: fmt.Sprintf("%s:%d", epIP2, svcPort),
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "external to NodePort hits only local endpoint, unmasqueraded",
|
|
sourceIP: testExternalClient,
|
|
destIP: testNodeIP,
|
|
destPort: svcNodePort,
|
|
output: fmt.Sprintf("%s:%d", epIP2, svcPort),
|
|
masq: false,
|
|
},
|
|
})
|
|
}
|
|
|
|
func TestOnlyLocalNodePortsNoClusterCIDR(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.localDetector = proxyutiliptables.NewNoOpLocalDetector()
|
|
fp.nodePortAddresses = []string{"192.168.0.0/24"}
|
|
|
|
expected := dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXT-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SEP-ZX7GRIZKSNUQ3LAJ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SVL-XPGD46QRK7WJZT7O - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -d 192.168.0.2 -j KUBE-NODEPORTS
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "route LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVL-XPGD46QRK7WJZT7O
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -s 10.180.2.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.2.1:80
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
-A KUBE-SVL-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
COMMIT
|
|
`)
|
|
onlyLocalNodePorts(t, fp, ipt, expected, getLine())
|
|
}
|
|
|
|
func TestOnlyLocalNodePorts(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.nodePortAddresses = []string{"192.168.0.0/24"}
|
|
|
|
expected := dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXT-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-SXIVWICOYRO3J4NJ - [0:0]
|
|
:KUBE-SEP-ZX7GRIZKSNUQ3LAJ - [0:0]
|
|
:KUBE-SVC-XPGD46QRK7WJZT7O - [0:0]
|
|
:KUBE-SVL-XPGD46QRK7WJZT7O - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment ns1/svc1:p80 -m tcp -p tcp --dport 3001 -j KUBE-EXT-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -d 192.168.0.2 -j KUBE-NODEPORTS
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "pod traffic for ns1/svc1:p80 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "masquerade LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -m comment --comment "route LOCAL traffic for ns1/svc1:p80 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-XPGD46QRK7WJZT7O
|
|
-A KUBE-EXT-XPGD46QRK7WJZT7O -j KUBE-SVL-XPGD46QRK7WJZT7O
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -s 10.180.0.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-SXIVWICOYRO3J4NJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.0.1:80
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -s 10.180.2.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-ZX7GRIZKSNUQ3LAJ -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.180.2.1:80
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 cluster IP" -m tcp -p tcp -d 172.30.0.41 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.0.1:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-SXIVWICOYRO3J4NJ
|
|
-A KUBE-SVC-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
-A KUBE-SVL-XPGD46QRK7WJZT7O -m comment --comment "ns1/svc1:p80 -> 10.180.2.1:80" -j KUBE-SEP-ZX7GRIZKSNUQ3LAJ
|
|
COMMIT
|
|
`)
|
|
onlyLocalNodePorts(t, fp, ipt, expected, getLine())
|
|
}
|
|
|
|
func onlyLocalNodePorts(t *testing.T, fp *Proxier, ipt *iptablestest.FakeIPTables, expected string, line int) {
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
svcNodePort := 3001
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.Type = "NodePort"
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(svcNodePort),
|
|
}}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
}),
|
|
)
|
|
|
|
epIP1 := "10.180.0.1"
|
|
epIP2 := "10.180.2.1"
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP1},
|
|
NodeName: nil,
|
|
}, {
|
|
Addresses: []string{epIP2},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
assertIPTablesRulesEqual(t, line, expected, fp.iptablesData.String())
|
|
|
|
runPacketFlowTests(t, line, fp.iptablesData.String(), testNodeIP, []packetFlowTest{
|
|
{
|
|
name: "pod to cluster IP hit both endpoints",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: svcIP,
|
|
destPort: svcPort,
|
|
output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "external to NodePort hits only local endpoint",
|
|
sourceIP: testExternalClient,
|
|
destIP: testNodeIP,
|
|
destPort: svcNodePort,
|
|
output: fmt.Sprintf("%s:%d", epIP2, svcPort),
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "pod to localhost doesn't work because localhost is not in nodePortAddresses",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "127.0.0.1",
|
|
destPort: svcNodePort,
|
|
output: "",
|
|
},
|
|
})
|
|
|
|
if fp.localDetector.IsImplemented() {
|
|
// pod-to-NodePort is treated as internal traffic, so we see both endpoints
|
|
runPacketFlowTests(t, line, fp.iptablesData.String(), testNodeIP, []packetFlowTest{
|
|
{
|
|
name: "pod to NodePort hits both endpoints",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: testNodeIP,
|
|
destPort: svcNodePort,
|
|
output: fmt.Sprintf("%s:%d, %s:%d", epIP1, svcPort, epIP2, svcPort),
|
|
masq: false,
|
|
},
|
|
})
|
|
} else {
|
|
// pod-to-NodePort is (incorrectly) treated as external traffic
|
|
// when there is no LocalTrafficDetector.
|
|
runPacketFlowTests(t, line, fp.iptablesData.String(), testNodeIP, []packetFlowTest{
|
|
{
|
|
name: "pod to NodePort hits only local endpoint",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: testNodeIP,
|
|
destPort: svcNodePort,
|
|
output: fmt.Sprintf("%s:%d", epIP2, svcPort),
|
|
masq: false,
|
|
},
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestComputeProbability(t *testing.T) {
|
|
expectedProbabilities := map[int]string{
|
|
1: "1.0000000000",
|
|
2: "0.5000000000",
|
|
10: "0.1000000000",
|
|
100: "0.0100000000",
|
|
1000: "0.0010000000",
|
|
10000: "0.0001000000",
|
|
100000: "0.0000100000",
|
|
100001: "0.0000099999",
|
|
}
|
|
|
|
for num, expected := range expectedProbabilities {
|
|
actual := computeProbability(num)
|
|
if actual != expected {
|
|
t.Errorf("Expected computeProbability(%d) to be %s, got: %s", num, expected, actual)
|
|
}
|
|
}
|
|
|
|
prevProbability := float64(0)
|
|
for i := 100000; i > 1; i-- {
|
|
currProbability, err := strconv.ParseFloat(computeProbability(i), 64)
|
|
if err != nil {
|
|
t.Fatalf("Error parsing float probability for %d: %v", i, err)
|
|
}
|
|
if currProbability <= prevProbability {
|
|
t.Fatalf("Probability unexpectedly <= to previous probability for %d: (%0.10f <= %0.10f)", i, currProbability, prevProbability)
|
|
}
|
|
prevProbability = currProbability
|
|
}
|
|
}
|
|
|
|
func makeTestService(namespace, name string, svcFunc func(*v1.Service)) *v1.Service {
|
|
svc := &v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: name,
|
|
Namespace: namespace,
|
|
Annotations: map[string]string{},
|
|
},
|
|
Spec: v1.ServiceSpec{},
|
|
Status: v1.ServiceStatus{},
|
|
}
|
|
svcFunc(svc)
|
|
return svc
|
|
}
|
|
|
|
func addTestPort(array []v1.ServicePort, name string, protocol v1.Protocol, port, nodeport int32, targetPort int) []v1.ServicePort {
|
|
svcPort := v1.ServicePort{
|
|
Name: name,
|
|
Protocol: protocol,
|
|
Port: port,
|
|
NodePort: nodeport,
|
|
TargetPort: intstr.FromInt(targetPort),
|
|
}
|
|
return append(array, svcPort)
|
|
}
|
|
|
|
func TestBuildServiceMapAddRemove(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
|
|
services := []*v1.Service{
|
|
makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = "172.30.55.4"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "sctpport", "SCTP", 1236, 6321, 0)
|
|
}),
|
|
makeTestService("somewhere-else", "node-port", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeNodePort
|
|
svc.Spec.ClusterIP = "172.30.55.10"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blahblah", "UDP", 345, 678, 0)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "moreblahblah", "TCP", 344, 677, 0)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "muchmoreblah", "SCTP", 343, 676, 0)
|
|
}),
|
|
makeTestService("somewhere", "load-balancer", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
|
svc.Spec.ClusterIP = "172.30.55.11"
|
|
svc.Spec.LoadBalancerIP = "1.2.3.4"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar", "UDP", 8675, 30061, 7000)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8676, 30062, 7001)
|
|
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
|
|
Ingress: []v1.LoadBalancerIngress{
|
|
{IP: "1.2.3.4"},
|
|
},
|
|
}
|
|
}),
|
|
makeTestService("somewhere", "only-local-load-balancer", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
|
svc.Spec.ClusterIP = "172.30.55.12"
|
|
svc.Spec.LoadBalancerIP = "5.6.7.8"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "foobar2", "UDP", 8677, 30063, 7002)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "baz", "UDP", 8678, 30064, 7003)
|
|
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
|
|
Ingress: []v1.LoadBalancerIngress{
|
|
{IP: "5.6.7.8"},
|
|
},
|
|
}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
svc.Spec.HealthCheckNodePort = 345
|
|
}),
|
|
}
|
|
|
|
for i := range services {
|
|
fp.OnServiceAdd(services[i])
|
|
}
|
|
result := fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 10 {
|
|
t.Errorf("expected service map length 10, got %v", fp.serviceMap)
|
|
}
|
|
|
|
// The only-local-loadbalancer ones get added
|
|
if len(result.HCServiceNodePorts) != 1 {
|
|
t.Errorf("expected 1 healthcheck port, got %v", result.HCServiceNodePorts)
|
|
} else {
|
|
nsn := makeNSN("somewhere", "only-local-load-balancer")
|
|
if port, found := result.HCServiceNodePorts[nsn]; !found || port != 345 {
|
|
t.Errorf("expected healthcheck port [%q]=345: got %v", nsn, result.HCServiceNodePorts)
|
|
}
|
|
}
|
|
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
// Services only added, so nothing stale yet
|
|
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
|
|
}
|
|
|
|
// Remove some stuff
|
|
// oneService is a modification of services[0] with removed first port.
|
|
oneService := makeTestService("somewhere-else", "cluster-ip", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = "172.30.55.4"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "UDP", 1235, 5321, 0)
|
|
})
|
|
|
|
fp.OnServiceUpdate(services[0], oneService)
|
|
fp.OnServiceDelete(services[1])
|
|
fp.OnServiceDelete(services[2])
|
|
fp.OnServiceDelete(services[3])
|
|
|
|
result = fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 1 {
|
|
t.Errorf("expected service map length 1, got %v", fp.serviceMap)
|
|
}
|
|
|
|
if len(result.HCServiceNodePorts) != 0 {
|
|
t.Errorf("expected 0 healthcheck ports, got %v", result.HCServiceNodePorts)
|
|
}
|
|
|
|
// All services but one were deleted. While you'd expect only the ClusterIPs
|
|
// from the three deleted services here, we still have the ClusterIP for
|
|
// the not-deleted service, because one of it's ServicePorts was deleted.
|
|
expectedStaleUDPServices := []string{"172.30.55.10", "172.30.55.4", "172.30.55.11", "172.30.55.12"}
|
|
if len(result.UDPStaleClusterIP) != len(expectedStaleUDPServices) {
|
|
t.Errorf("expected stale UDP services length %d, got %v", len(expectedStaleUDPServices), result.UDPStaleClusterIP.UnsortedList())
|
|
}
|
|
for _, ip := range expectedStaleUDPServices {
|
|
if !result.UDPStaleClusterIP.Has(ip) {
|
|
t.Errorf("expected stale UDP service service %s", ip)
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestBuildServiceMapServiceHeadless(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService("somewhere-else", "headless", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = v1.ClusterIPNone
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "rpc", "UDP", 1234, 0, 0)
|
|
}),
|
|
makeTestService("somewhere-else", "headless-without-port", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = v1.ClusterIPNone
|
|
}),
|
|
)
|
|
|
|
// Headless service should be ignored
|
|
result := fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 0 {
|
|
t.Errorf("expected service map length 0, got %d", len(fp.serviceMap))
|
|
}
|
|
|
|
// No proxied services, so no healthchecks
|
|
if len(result.HCServiceNodePorts) != 0 {
|
|
t.Errorf("expected healthcheck ports length 0, got %d", len(result.HCServiceNodePorts))
|
|
}
|
|
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
|
|
}
|
|
}
|
|
|
|
func TestBuildServiceMapServiceTypeExternalName(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService("somewhere-else", "external-name", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeExternalName
|
|
svc.Spec.ClusterIP = "172.30.55.4" // Should be ignored
|
|
svc.Spec.ExternalName = "foo2.bar.com"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "blah", "UDP", 1235, 5321, 0)
|
|
}),
|
|
)
|
|
|
|
result := fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 0 {
|
|
t.Errorf("expected service map length 0, got %v", fp.serviceMap)
|
|
}
|
|
// No proxied services, so no healthchecks
|
|
if len(result.HCServiceNodePorts) != 0 {
|
|
t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts)
|
|
}
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP)
|
|
}
|
|
}
|
|
|
|
func TestBuildServiceMapServiceUpdate(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
|
|
servicev1 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = "172.30.55.4"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 0)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 0)
|
|
})
|
|
servicev2 := makeTestService("somewhere", "some-service", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeLoadBalancer
|
|
svc.Spec.ClusterIP = "172.30.55.4"
|
|
svc.Spec.LoadBalancerIP = "1.2.3.4"
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "something", "UDP", 1234, 4321, 7002)
|
|
svc.Spec.Ports = addTestPort(svc.Spec.Ports, "somethingelse", "TCP", 1235, 5321, 7003)
|
|
svc.Status.LoadBalancer = v1.LoadBalancerStatus{
|
|
Ingress: []v1.LoadBalancerIngress{
|
|
{IP: "1.2.3.4"},
|
|
},
|
|
}
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
svc.Spec.HealthCheckNodePort = 345
|
|
})
|
|
|
|
fp.OnServiceAdd(servicev1)
|
|
|
|
result := fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 2 {
|
|
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
|
|
}
|
|
if len(result.HCServiceNodePorts) != 0 {
|
|
t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts)
|
|
}
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
// Services only added, so nothing stale yet
|
|
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
|
|
}
|
|
|
|
// Change service to load-balancer
|
|
fp.OnServiceUpdate(servicev1, servicev2)
|
|
result = fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 2 {
|
|
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
|
|
}
|
|
if len(result.HCServiceNodePorts) != 1 {
|
|
t.Errorf("expected healthcheck ports length 1, got %v", result.HCServiceNodePorts)
|
|
}
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP.UnsortedList())
|
|
}
|
|
|
|
// No change; make sure the service map stays the same and there are
|
|
// no health-check changes
|
|
fp.OnServiceUpdate(servicev2, servicev2)
|
|
result = fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 2 {
|
|
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
|
|
}
|
|
if len(result.HCServiceNodePorts) != 1 {
|
|
t.Errorf("expected healthcheck ports length 1, got %v", result.HCServiceNodePorts)
|
|
}
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
t.Errorf("expected stale UDP services length 0, got %v", result.UDPStaleClusterIP.UnsortedList())
|
|
}
|
|
|
|
// And back to ClusterIP
|
|
fp.OnServiceUpdate(servicev2, servicev1)
|
|
result = fp.serviceMap.Update(fp.serviceChanges)
|
|
if len(fp.serviceMap) != 2 {
|
|
t.Errorf("expected service map length 2, got %v", fp.serviceMap)
|
|
}
|
|
if len(result.HCServiceNodePorts) != 0 {
|
|
t.Errorf("expected healthcheck ports length 0, got %v", result.HCServiceNodePorts)
|
|
}
|
|
if len(result.UDPStaleClusterIP) != 0 {
|
|
// Services only added, so nothing stale yet
|
|
t.Errorf("expected stale UDP services length 0, got %d", len(result.UDPStaleClusterIP))
|
|
}
|
|
}
|
|
|
|
func populateEndpointSlices(proxier *Proxier, allEndpointSlices ...*discovery.EndpointSlice) {
|
|
for i := range allEndpointSlices {
|
|
proxier.OnEndpointSliceAdd(allEndpointSlices[i])
|
|
}
|
|
}
|
|
|
|
func makeTestEndpointSlice(namespace, name string, sliceNum int, epsFunc func(*discovery.EndpointSlice)) *discovery.EndpointSlice {
|
|
eps := &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-%d", name, sliceNum),
|
|
Namespace: namespace,
|
|
Labels: map[string]string{discovery.LabelServiceName: name},
|
|
},
|
|
}
|
|
epsFunc(eps)
|
|
return eps
|
|
}
|
|
|
|
func makeNSN(namespace, name string) types.NamespacedName {
|
|
return types.NamespacedName{Namespace: namespace, Name: name}
|
|
}
|
|
|
|
func makeServicePortName(ns, name, port string, protocol v1.Protocol) proxy.ServicePortName {
|
|
return proxy.ServicePortName{
|
|
NamespacedName: makeNSN(ns, name),
|
|
Port: port,
|
|
Protocol: protocol,
|
|
}
|
|
}
|
|
|
|
func makeServiceMap(proxier *Proxier, allServices ...*v1.Service) {
|
|
for i := range allServices {
|
|
proxier.OnServiceAdd(allServices[i])
|
|
}
|
|
|
|
proxier.mu.Lock()
|
|
defer proxier.mu.Unlock()
|
|
proxier.servicesSynced = true
|
|
}
|
|
|
|
func compareEndpointsMapsExceptChainName(t *testing.T, tci int, newMap proxy.EndpointsMap, expected map[proxy.ServicePortName][]*endpointsInfo) {
|
|
if len(newMap) != len(expected) {
|
|
t.Errorf("[%d] expected %d results, got %d: %v", tci, len(expected), len(newMap), newMap)
|
|
}
|
|
for x := range expected {
|
|
if len(newMap[x]) != len(expected[x]) {
|
|
t.Errorf("[%d] expected %d endpoints for %v, got %d", tci, len(expected[x]), x, len(newMap[x]))
|
|
} else {
|
|
for i := range expected[x] {
|
|
newEp, ok := newMap[x][i].(*endpointsInfo)
|
|
if !ok {
|
|
t.Errorf("Failed to cast endpointsInfo")
|
|
continue
|
|
}
|
|
if newEp.Endpoint != expected[x][i].Endpoint ||
|
|
newEp.IsLocal != expected[x][i].IsLocal {
|
|
t.Errorf("[%d] expected new[%v][%d] to be %v, got %v", tci, x, i, expected[x][i], newEp)
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
func TestUpdateEndpointsMap(t *testing.T) {
|
|
var nodeName = testHostname
|
|
udpProtocol := v1.ProtocolUDP
|
|
|
|
emptyEndpointSlices := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, func(*discovery.EndpointSlice) {}),
|
|
}
|
|
subset1 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.1"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
subset2 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.2"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p12"),
|
|
Port: utilpointer.Int32(12),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
namedPortLocal := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1,
|
|
func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.1"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}),
|
|
}
|
|
namedPort := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, subset1),
|
|
}
|
|
namedPortRenamed := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1,
|
|
func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.1"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11-2"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}),
|
|
}
|
|
namedPortRenumbered := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1,
|
|
func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.1"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(22),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}),
|
|
}
|
|
namedPortsLocalNoLocal := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1,
|
|
func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.1"},
|
|
}, {
|
|
Addresses: []string{"10.1.1.2"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}, {
|
|
Name: utilpointer.String("p12"),
|
|
Port: utilpointer.Int32(12),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}),
|
|
}
|
|
multipleSubsets := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, subset1),
|
|
makeTestEndpointSlice("ns1", "ep1", 2, subset2),
|
|
}
|
|
subsetLocal := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.2"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p12"),
|
|
Port: utilpointer.Int32(12),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
multipleSubsetsWithLocal := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, subset1),
|
|
makeTestEndpointSlice("ns1", "ep1", 2, subsetLocal),
|
|
}
|
|
subsetMultiplePortsLocal := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.1"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}, {
|
|
Name: utilpointer.String("p12"),
|
|
Port: utilpointer.Int32(12),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
subset3 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.3"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p13"),
|
|
Port: utilpointer.Int32(13),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
multipleSubsetsMultiplePortsLocal := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, subsetMultiplePortsLocal),
|
|
makeTestEndpointSlice("ns1", "ep1", 2, subset3),
|
|
}
|
|
subsetMultipleIPsPorts1 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.1"},
|
|
}, {
|
|
Addresses: []string{"10.1.1.2"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}, {
|
|
Name: utilpointer.String("p12"),
|
|
Port: utilpointer.Int32(12),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
subsetMultipleIPsPorts2 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.3"},
|
|
}, {
|
|
Addresses: []string{"10.1.1.4"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p13"),
|
|
Port: utilpointer.Int32(13),
|
|
Protocol: &udpProtocol,
|
|
}, {
|
|
Name: utilpointer.String("p14"),
|
|
Port: utilpointer.Int32(14),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
subsetMultipleIPsPorts3 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.2.2.1"},
|
|
}, {
|
|
Addresses: []string{"10.2.2.2"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p21"),
|
|
Port: utilpointer.Int32(21),
|
|
Protocol: &udpProtocol,
|
|
}, {
|
|
Name: utilpointer.String("p22"),
|
|
Port: utilpointer.Int32(22),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
multipleSubsetsIPsPorts := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, subsetMultipleIPsPorts1),
|
|
makeTestEndpointSlice("ns1", "ep1", 2, subsetMultipleIPsPorts2),
|
|
makeTestEndpointSlice("ns2", "ep2", 1, subsetMultipleIPsPorts3),
|
|
}
|
|
complexSubset1 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.2.2.2"},
|
|
NodeName: &nodeName,
|
|
}, {
|
|
Addresses: []string{"10.2.2.22"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p22"),
|
|
Port: utilpointer.Int32(22),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset2 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.2.2.3"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p23"),
|
|
Port: utilpointer.Int32(23),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset3 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.4.4.4"},
|
|
NodeName: &nodeName,
|
|
}, {
|
|
Addresses: []string{"10.4.4.5"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p44"),
|
|
Port: utilpointer.Int32(44),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset4 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.4.4.6"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p45"),
|
|
Port: utilpointer.Int32(45),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset5 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.1"},
|
|
}, {
|
|
Addresses: []string{"10.1.1.11"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p11"),
|
|
Port: utilpointer.Int32(11),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset6 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.1.1.2"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p12"),
|
|
Port: utilpointer.Int32(12),
|
|
Protocol: &udpProtocol,
|
|
}, {
|
|
Name: utilpointer.String("p122"),
|
|
Port: utilpointer.Int32(122),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset7 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.3.3.3"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p33"),
|
|
Port: utilpointer.Int32(33),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexSubset8 := func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.4.4.4"},
|
|
NodeName: &nodeName,
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.String("p44"),
|
|
Port: utilpointer.Int32(44),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}
|
|
complexBefore := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, subset1),
|
|
nil,
|
|
makeTestEndpointSlice("ns2", "ep2", 1, complexSubset1),
|
|
makeTestEndpointSlice("ns2", "ep2", 2, complexSubset2),
|
|
nil,
|
|
makeTestEndpointSlice("ns4", "ep4", 1, complexSubset3),
|
|
makeTestEndpointSlice("ns4", "ep4", 2, complexSubset4),
|
|
}
|
|
complexAfter := []*discovery.EndpointSlice{
|
|
makeTestEndpointSlice("ns1", "ep1", 1, complexSubset5),
|
|
makeTestEndpointSlice("ns1", "ep1", 2, complexSubset6),
|
|
nil,
|
|
nil,
|
|
makeTestEndpointSlice("ns3", "ep3", 1, complexSubset7),
|
|
makeTestEndpointSlice("ns4", "ep4", 1, complexSubset8),
|
|
nil,
|
|
}
|
|
|
|
testCases := []struct {
|
|
// previousEndpoints and currentEndpoints are used to call appropriate
|
|
// handlers OnEndpoints* (based on whether corresponding values are nil
|
|
// or non-nil) and must be of equal length.
|
|
name string
|
|
previousEndpoints []*discovery.EndpointSlice
|
|
currentEndpoints []*discovery.EndpointSlice
|
|
oldEndpoints map[proxy.ServicePortName][]*endpointsInfo
|
|
expectedResult map[proxy.ServicePortName][]*endpointsInfo
|
|
expectedStaleEndpoints []proxy.ServiceEndpoint
|
|
expectedStaleServiceNames map[proxy.ServicePortName]bool
|
|
expectedHealthchecks map[types.NamespacedName]int
|
|
}{{
|
|
// Case[0]: nothing
|
|
name: "nothing",
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[1]: no change, named port, local
|
|
name: "no change, named port, local",
|
|
previousEndpoints: namedPortLocal,
|
|
currentEndpoints: namedPortLocal,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns1", "ep1"): 1,
|
|
},
|
|
}, {
|
|
// Case[2]: no change, multiple subsets
|
|
name: "no change, multiple subsets",
|
|
previousEndpoints: multipleSubsets,
|
|
currentEndpoints: multipleSubsets,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[3]: no change, multiple subsets, multiple ports, local
|
|
name: "no change, multiple subsets, multiple ports, local",
|
|
previousEndpoints: multipleSubsetsMultiplePortsLocal,
|
|
currentEndpoints: multipleSubsetsMultiplePortsLocal,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns1", "ep1"): 1,
|
|
},
|
|
}, {
|
|
// Case[4]: no change, multiple endpoints, subsets, IPs, and ports
|
|
name: "no change, multiple endpoints, subsets, IPs, and ports",
|
|
previousEndpoints: multipleSubsetsIPsPorts,
|
|
currentEndpoints: multipleSubsetsIPsPorts,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.4:13", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.3:14", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.4:14", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.1:21", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.2:21", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p13", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.3:13", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.4:13", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p14", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.3:14", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.4:14", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns2", "ep2", "p21", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.1:21", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.2:21", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns1", "ep1"): 2,
|
|
makeNSN("ns2", "ep2"): 1,
|
|
},
|
|
}, {
|
|
// Case[5]: add an Endpoints
|
|
name: "add an Endpoints",
|
|
previousEndpoints: []*discovery.EndpointSlice{nil},
|
|
currentEndpoints: namedPortLocal,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): true,
|
|
},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns1", "ep1"): 1,
|
|
},
|
|
}, {
|
|
// Case[6]: remove an Endpoints
|
|
name: "remove an Endpoints",
|
|
previousEndpoints: namedPortLocal,
|
|
currentEndpoints: []*discovery.EndpointSlice{nil},
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
|
|
Endpoint: "10.1.1.1:11",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
|
|
}},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[7]: add an IP and port
|
|
name: "add an IP and port",
|
|
previousEndpoints: namedPort,
|
|
currentEndpoints: namedPortsLocalNoLocal,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
|
|
},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns1", "ep1"): 1,
|
|
},
|
|
}, {
|
|
// Case[8]: remove an IP and port
|
|
name: "remove an IP and port",
|
|
previousEndpoints: namedPortsLocalNoLocal,
|
|
currentEndpoints: namedPort,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:11", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
|
|
Endpoint: "10.1.1.2:11",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
|
|
}, {
|
|
Endpoint: "10.1.1.1:12",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
|
|
}, {
|
|
Endpoint: "10.1.1.2:12",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
|
|
}},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[9]: add a subset
|
|
name: "add a subset",
|
|
previousEndpoints: []*discovery.EndpointSlice{namedPort[0], nil},
|
|
currentEndpoints: multipleSubsetsWithLocal,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
|
|
},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns1", "ep1"): 1,
|
|
},
|
|
}, {
|
|
// Case[10]: remove a subset
|
|
name: "remove a subset",
|
|
previousEndpoints: multipleSubsets,
|
|
currentEndpoints: []*discovery.EndpointSlice{namedPort[0], nil},
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
|
|
Endpoint: "10.1.1.2:12",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP),
|
|
}},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[11]: rename a port
|
|
name: "rename a port",
|
|
previousEndpoints: namedPort,
|
|
currentEndpoints: namedPortRenamed,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
|
|
Endpoint: "10.1.1.1:11",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
|
|
}},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
|
|
makeServicePortName("ns1", "ep1", "p11-2", v1.ProtocolUDP): true,
|
|
},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[12]: renumber a port
|
|
name: "renumber a port",
|
|
previousEndpoints: namedPort,
|
|
currentEndpoints: namedPortRenumbered,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:22", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
|
|
Endpoint: "10.1.1.1:11",
|
|
ServicePortName: makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP),
|
|
}},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
}, {
|
|
// Case[13]: complex add and remove
|
|
name: "complex add and remove",
|
|
previousEndpoints: complexBefore,
|
|
currentEndpoints: complexAfter,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.22:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.2:22", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.2.2.3:23", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.4.4.4:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.4.4.5:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.4.4.6:45", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.11:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:12", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.2:122", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.3.3.3:33", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.4.4.4:44", IsLocal: true, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{{
|
|
Endpoint: "10.2.2.2:22",
|
|
ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
|
|
}, {
|
|
Endpoint: "10.2.2.22:22",
|
|
ServicePortName: makeServicePortName("ns2", "ep2", "p22", v1.ProtocolUDP),
|
|
}, {
|
|
Endpoint: "10.2.2.3:23",
|
|
ServicePortName: makeServicePortName("ns2", "ep2", "p23", v1.ProtocolUDP),
|
|
}, {
|
|
Endpoint: "10.4.4.5:44",
|
|
ServicePortName: makeServicePortName("ns4", "ep4", "p44", v1.ProtocolUDP),
|
|
}, {
|
|
Endpoint: "10.4.4.6:45",
|
|
ServicePortName: makeServicePortName("ns4", "ep4", "p45", v1.ProtocolUDP),
|
|
}},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
|
|
makeServicePortName("ns1", "ep1", "p12", v1.ProtocolUDP): true,
|
|
makeServicePortName("ns1", "ep1", "p122", v1.ProtocolUDP): true,
|
|
makeServicePortName("ns3", "ep3", "p33", v1.ProtocolUDP): true,
|
|
},
|
|
expectedHealthchecks: map[types.NamespacedName]int{
|
|
makeNSN("ns4", "ep4"): 1,
|
|
},
|
|
}, {
|
|
// Case[14]: change from 0 endpoint address to 1 unnamed port
|
|
name: "change from 0 endpoint address to 1 unnamed port",
|
|
previousEndpoints: emptyEndpointSlices,
|
|
currentEndpoints: namedPort,
|
|
oldEndpoints: map[proxy.ServicePortName][]*endpointsInfo{},
|
|
expectedResult: map[proxy.ServicePortName][]*endpointsInfo{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): {
|
|
{BaseEndpointInfo: &proxy.BaseEndpointInfo{Endpoint: "10.1.1.1:11", IsLocal: false, Ready: true, Serving: true, Terminating: false}},
|
|
},
|
|
},
|
|
expectedStaleEndpoints: []proxy.ServiceEndpoint{},
|
|
expectedStaleServiceNames: map[proxy.ServicePortName]bool{
|
|
makeServicePortName("ns1", "ep1", "p11", v1.ProtocolUDP): true,
|
|
},
|
|
expectedHealthchecks: map[types.NamespacedName]int{},
|
|
},
|
|
}
|
|
|
|
for tci, tc := range testCases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.hostname = nodeName
|
|
|
|
// First check that after adding all previous versions of endpoints,
|
|
// the fp.oldEndpoints is as we expect.
|
|
for i := range tc.previousEndpoints {
|
|
if tc.previousEndpoints[i] != nil {
|
|
fp.OnEndpointSliceAdd(tc.previousEndpoints[i])
|
|
}
|
|
}
|
|
fp.endpointsMap.Update(fp.endpointsChanges)
|
|
compareEndpointsMapsExceptChainName(t, tci, fp.endpointsMap, tc.oldEndpoints)
|
|
|
|
// Now let's call appropriate handlers to get to state we want to be.
|
|
if len(tc.previousEndpoints) != len(tc.currentEndpoints) {
|
|
t.Fatalf("[%d] different lengths of previous and current endpoints", tci)
|
|
}
|
|
|
|
for i := range tc.previousEndpoints {
|
|
prev, curr := tc.previousEndpoints[i], tc.currentEndpoints[i]
|
|
switch {
|
|
case prev == nil:
|
|
fp.OnEndpointSliceAdd(curr)
|
|
case curr == nil:
|
|
fp.OnEndpointSliceDelete(prev)
|
|
default:
|
|
fp.OnEndpointSliceUpdate(prev, curr)
|
|
}
|
|
}
|
|
result := fp.endpointsMap.Update(fp.endpointsChanges)
|
|
newMap := fp.endpointsMap
|
|
compareEndpointsMapsExceptChainName(t, tci, newMap, tc.expectedResult)
|
|
if len(result.StaleEndpoints) != len(tc.expectedStaleEndpoints) {
|
|
t.Errorf("[%d] expected %d staleEndpoints, got %d: %v", tci, len(tc.expectedStaleEndpoints), len(result.StaleEndpoints), result.StaleEndpoints)
|
|
}
|
|
for _, x := range tc.expectedStaleEndpoints {
|
|
found := false
|
|
for _, stale := range result.StaleEndpoints {
|
|
if stale == x {
|
|
found = true
|
|
break
|
|
}
|
|
}
|
|
if !found {
|
|
t.Errorf("[%d] expected staleEndpoints[%v], but didn't find it: %v", tci, x, result.StaleEndpoints)
|
|
}
|
|
}
|
|
if len(result.StaleServiceNames) != len(tc.expectedStaleServiceNames) {
|
|
t.Errorf("[%d] expected %d staleServiceNames, got %d: %v", tci, len(tc.expectedStaleServiceNames), len(result.StaleServiceNames), result.StaleServiceNames)
|
|
}
|
|
for svcName := range tc.expectedStaleServiceNames {
|
|
found := false
|
|
for _, stale := range result.StaleServiceNames {
|
|
if stale == svcName {
|
|
found = true
|
|
}
|
|
}
|
|
if !found {
|
|
t.Errorf("[%d] expected staleServiceNames[%v], but didn't find it: %v", tci, svcName, result.StaleServiceNames)
|
|
}
|
|
}
|
|
if !reflect.DeepEqual(result.HCEndpointsLocalIPSize, tc.expectedHealthchecks) {
|
|
t.Errorf("[%d] expected healthchecks %v, got %v", tci, tc.expectedHealthchecks, result.HCEndpointsLocalIPSize)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
// The majority of EndpointSlice specific tests are not iptables specific and focus on
|
|
// the shared EndpointChangeTracker and EndpointSliceCache. This test ensures that the
|
|
// iptables proxier supports translating EndpointSlices to iptables output.
|
|
func TestEndpointSliceE2E(t *testing.T) {
|
|
expectedIPTablesWithSlice := dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 0 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 0 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.3:80" -j KUBE-SEP-XGJFVO3L2O5SRFNT
|
|
COMMIT
|
|
`)
|
|
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.OnServiceSynced()
|
|
fp.OnEndpointSlicesSynced()
|
|
|
|
serviceName := "svc1"
|
|
namespaceName := "ns1"
|
|
|
|
fp.OnServiceAdd(&v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
|
|
Spec: v1.ServiceSpec{
|
|
ClusterIP: "172.30.1.1",
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Ports: []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt(80), Protocol: v1.ProtocolTCP}},
|
|
},
|
|
})
|
|
|
|
tcpProtocol := v1.ProtocolTCP
|
|
endpointSlice := &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", serviceName),
|
|
Namespace: namespaceName,
|
|
Labels: map[string]string{discovery.LabelServiceName: serviceName},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, {
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr("node2"),
|
|
}, {
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr("node3"),
|
|
}, {
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(false)},
|
|
NodeName: utilpointer.StringPtr("node4"),
|
|
}},
|
|
}
|
|
|
|
fp.OnEndpointSliceAdd(endpointSlice)
|
|
fp.syncProxyRules()
|
|
assertIPTablesRulesEqual(t, getLine(), expectedIPTablesWithSlice, fp.iptablesData.String())
|
|
|
|
fp.OnEndpointSliceDelete(endpointSlice)
|
|
fp.syncProxyRules()
|
|
assertIPTablesRulesNotEqual(t, getLine(), expectedIPTablesWithSlice, fp.iptablesData.String())
|
|
}
|
|
|
|
// TestHealthCheckNodePortWhenTerminating tests that health check node ports are not enabled when all local endpoints are terminating
|
|
func TestHealthCheckNodePortWhenTerminating(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.OnServiceSynced()
|
|
fp.OnEndpointSlicesSynced()
|
|
|
|
serviceName := "svc1"
|
|
namespaceName := "ns1"
|
|
|
|
fp.OnServiceAdd(&v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
|
|
Spec: v1.ServiceSpec{
|
|
ClusterIP: "172.30.1.1",
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Ports: []v1.ServicePort{{Name: "", TargetPort: intstr.FromInt(80), Protocol: v1.ProtocolTCP}},
|
|
},
|
|
})
|
|
|
|
tcpProtocol := v1.ProtocolTCP
|
|
endpointSlice := &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", serviceName),
|
|
Namespace: namespaceName,
|
|
Labels: map[string]string{discovery.LabelServiceName: serviceName},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, {
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, {
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, { // not ready endpoints should be ignored
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(false)},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}},
|
|
}
|
|
|
|
fp.OnEndpointSliceAdd(endpointSlice)
|
|
result := fp.endpointsMap.Update(fp.endpointsChanges)
|
|
if len(result.HCEndpointsLocalIPSize) != 1 {
|
|
t.Errorf("unexpected number of health check node ports, expected 1 but got: %d", len(result.HCEndpointsLocalIPSize))
|
|
}
|
|
|
|
// set all endpoints to terminating
|
|
endpointSliceTerminating := &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", serviceName),
|
|
Namespace: namespaceName,
|
|
Labels: map[string]string{discovery.LabelServiceName: serviceName},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, {
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, {
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}, { // not ready endpoints should be ignored
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
}},
|
|
}
|
|
|
|
fp.OnEndpointSliceUpdate(endpointSlice, endpointSliceTerminating)
|
|
result = fp.endpointsMap.Update(fp.endpointsChanges)
|
|
if len(result.HCEndpointsLocalIPSize) != 0 {
|
|
t.Errorf("unexpected number of health check node ports, expected 0 but got: %d", len(result.HCEndpointsLocalIPSize))
|
|
}
|
|
}
|
|
|
|
func TestProxierDeleteNodePortStaleUDP(t *testing.T) {
|
|
fcmd := fakeexec.FakeCmd{}
|
|
fexec := fakeexec.FakeExec{
|
|
LookPathFunc: func(cmd string) (string, error) { return cmd, nil },
|
|
}
|
|
execFunc := func(cmd string, args ...string) exec.Cmd {
|
|
return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
|
|
}
|
|
cmdOutput := "1 flow entries have been deleted"
|
|
cmdFunc := func() ([]byte, []byte, error) { return []byte(cmdOutput), nil, nil }
|
|
|
|
// Delete ClusterIP entries
|
|
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
|
|
fexec.CommandScript = append(fexec.CommandScript, execFunc)
|
|
// Delete ExternalIP entries
|
|
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
|
|
fexec.CommandScript = append(fexec.CommandScript, execFunc)
|
|
// Delete LoadBalancerIP entries
|
|
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
|
|
fexec.CommandScript = append(fexec.CommandScript, execFunc)
|
|
// Delete NodePort entries
|
|
fcmd.CombinedOutputScript = append(fcmd.CombinedOutputScript, cmdFunc)
|
|
fexec.CommandScript = append(fexec.CommandScript, execFunc)
|
|
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.exec = &fexec
|
|
|
|
svcIP := "172.30.0.41"
|
|
extIP := "192.168.99.11"
|
|
lbIngressIP := "1.2.3.4"
|
|
svcPort := 80
|
|
nodePort := 31201
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolUDP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.ExternalIPs = []string{extIP}
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolUDP,
|
|
NodePort: int32(nodePort),
|
|
}}
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: lbIngressIP,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
if fexec.CommandCalls != 0 {
|
|
t.Fatalf("Created service without endpoints must not clear conntrack entries")
|
|
}
|
|
|
|
epIP := "10.180.0.1"
|
|
udpProtocol := v1.ProtocolUDP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.Bool(false),
|
|
},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
if fexec.CommandCalls != 0 {
|
|
t.Fatalf("Updated UDP service with not ready endpoints must not clear UDP entries")
|
|
}
|
|
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{epIP},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.Bool(true),
|
|
},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &udpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
if fexec.CommandCalls != 4 {
|
|
t.Fatalf("Updated UDP service with new endpoints must clear UDP entries 4 times: ClusterIP, NodePort, ExternalIP and LB")
|
|
}
|
|
|
|
// the order is not guaranteed so we have to compare the strings in any order
|
|
expectedCommands := []string{
|
|
// Delete ClusterIP Conntrack entries
|
|
fmt.Sprintf("conntrack -D --orig-dst %s -p %s", svcIP, strings.ToLower(string((v1.ProtocolUDP)))),
|
|
// Delete ExternalIP Conntrack entries
|
|
fmt.Sprintf("conntrack -D --orig-dst %s -p %s", extIP, strings.ToLower(string((v1.ProtocolUDP)))),
|
|
// Delete LoadBalancerIP Conntrack entries
|
|
fmt.Sprintf("conntrack -D --orig-dst %s -p %s", lbIngressIP, strings.ToLower(string((v1.ProtocolUDP)))),
|
|
// Delete NodePort Conntrack entrie
|
|
fmt.Sprintf("conntrack -D -p %s --dport %d", strings.ToLower(string((v1.ProtocolUDP))), nodePort),
|
|
}
|
|
actualCommands := []string{
|
|
strings.Join(fcmd.CombinedOutputLog[0], " "),
|
|
strings.Join(fcmd.CombinedOutputLog[1], " "),
|
|
strings.Join(fcmd.CombinedOutputLog[2], " "),
|
|
strings.Join(fcmd.CombinedOutputLog[3], " "),
|
|
}
|
|
sort.Strings(expectedCommands)
|
|
sort.Strings(actualCommands)
|
|
|
|
if !reflect.DeepEqual(expectedCommands, actualCommands) {
|
|
t.Errorf("Expected commands: %v, but executed %v", expectedCommands, actualCommands)
|
|
}
|
|
}
|
|
|
|
func TestProxierMetricsIptablesTotalRules(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
tcpProtocol := v1.ProtocolTCP
|
|
|
|
metrics.RegisterMetrics()
|
|
|
|
svcIP := "172.30.0.41"
|
|
svcPort := 80
|
|
nodePort := 31201
|
|
svcPortName := proxy.ServicePortName{
|
|
NamespacedName: makeNSN("ns1", "svc1"),
|
|
Port: "p80",
|
|
Protocol: v1.ProtocolTCP,
|
|
}
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService(svcPortName.Namespace, svcPortName.Name, func(svc *v1.Service) {
|
|
svc.Spec.ClusterIP = svcIP
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: svcPortName.Port,
|
|
Port: int32(svcPort),
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(nodePort),
|
|
}}
|
|
}),
|
|
)
|
|
fp.syncProxyRules()
|
|
iptablesData := fp.iptablesData.String()
|
|
|
|
nFilterRulesMetric, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableFilter)))
|
|
if err != nil {
|
|
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
|
|
}
|
|
nFilterRules := int(nFilterRulesMetric)
|
|
expectedFilterRules := countRules("filter", iptablesData)
|
|
|
|
if nFilterRules != expectedFilterRules {
|
|
t.Fatalf("Wrong number of filter rule: expected %d got %d\n%s", expectedFilterRules, nFilterRules, iptablesData)
|
|
}
|
|
|
|
nNatRulesMetric, err := testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)))
|
|
if err != nil {
|
|
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
|
|
}
|
|
nNatRules := int(nNatRulesMetric)
|
|
expectedNatRules := countRules("nat", iptablesData)
|
|
|
|
if nNatRules != expectedNatRules {
|
|
t.Fatalf("Wrong number of nat rules: expected %d got %d\n%s", expectedNatRules, nNatRules, iptablesData)
|
|
}
|
|
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice(svcPortName.Namespace, svcPortName.Name, 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"10.0.0.2"},
|
|
}, {
|
|
Addresses: []string{"10.0.0.5"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(svcPortName.Port),
|
|
Port: utilpointer.Int32(int32(svcPort)),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
iptablesData = fp.iptablesData.String()
|
|
|
|
nFilterRulesMetric, err = testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableFilter)))
|
|
if err != nil {
|
|
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
|
|
}
|
|
nFilterRules = int(nFilterRulesMetric)
|
|
expectedFilterRules = countRules("filter", iptablesData)
|
|
|
|
if nFilterRules != expectedFilterRules {
|
|
t.Fatalf("Wrong number of filter rule: expected %d got %d\n%s", expectedFilterRules, nFilterRules, iptablesData)
|
|
}
|
|
|
|
nNatRulesMetric, err = testutil.GetGaugeMetricValue(metrics.IptablesRulesTotal.WithLabelValues(string(utiliptables.TableNAT)))
|
|
if err != nil {
|
|
t.Errorf("failed to get %s value, err: %v", metrics.IptablesRulesTotal.Name, err)
|
|
}
|
|
nNatRules = int(nNatRulesMetric)
|
|
expectedNatRules = countRules("nat", iptablesData)
|
|
|
|
if nNatRules != expectedNatRules {
|
|
t.Fatalf("Wrong number of nat rules: expected %d got %d\n%s", expectedNatRules, nNatRules, iptablesData)
|
|
}
|
|
}
|
|
|
|
// TODO(thockin): add *more* tests for syncProxyRules() or break it down further and test the pieces.
|
|
|
|
// This test ensures that the iptables proxier supports translating Endpoints to
|
|
// iptables output when internalTrafficPolicy is specified
|
|
func TestInternalTrafficPolicyE2E(t *testing.T) {
|
|
type endpoint struct {
|
|
ip string
|
|
hostname string
|
|
}
|
|
|
|
cluster := v1.ServiceInternalTrafficPolicyCluster
|
|
local := v1.ServiceInternalTrafficPolicyLocal
|
|
|
|
clusterExpectedIPTables := dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.3:80" -j KUBE-SEP-XGJFVO3L2O5SRFNT
|
|
COMMIT
|
|
`)
|
|
|
|
testCases := []struct {
|
|
name string
|
|
line int
|
|
internalTrafficPolicy *v1.ServiceInternalTrafficPolicyType
|
|
featureGateOn bool
|
|
endpoints []endpoint
|
|
expectEndpointRule bool
|
|
expectedIPTablesWithSlice string
|
|
flowTests []packetFlowTest
|
|
}{
|
|
{
|
|
name: "internalTrafficPolicy is cluster",
|
|
line: getLine(),
|
|
internalTrafficPolicy: &cluster,
|
|
featureGateOn: true,
|
|
endpoints: []endpoint{
|
|
{"10.0.1.1", testHostname},
|
|
{"10.0.1.2", "host1"},
|
|
{"10.0.1.3", "host2"},
|
|
},
|
|
expectEndpointRule: true,
|
|
expectedIPTablesWithSlice: clusterExpectedIPTables,
|
|
flowTests: []packetFlowTest{
|
|
{
|
|
name: "pod to ClusterIP hits all endpoints",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.1.1",
|
|
destPort: 80,
|
|
output: "10.0.1.1:80, 10.0.1.2:80, 10.0.1.3:80",
|
|
masq: false,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "internalTrafficPolicy is local and there are local endpoints",
|
|
line: getLine(),
|
|
internalTrafficPolicy: &local,
|
|
featureGateOn: true,
|
|
endpoints: []endpoint{
|
|
{"10.0.1.1", testHostname},
|
|
{"10.0.1.2", "host1"},
|
|
{"10.0.1.3", "host2"},
|
|
},
|
|
expectEndpointRule: true,
|
|
expectedIPTablesWithSlice: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
:KUBE-SVL-AQI2S6QIMU7PVVRP - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVL-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
COMMIT
|
|
`),
|
|
flowTests: []packetFlowTest{
|
|
{
|
|
name: "pod to ClusterIP hits only local endpoint",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.1.1",
|
|
destPort: 80,
|
|
output: "10.0.1.1:80",
|
|
masq: false,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "internalTrafficPolicy is local and there are no local endpoints",
|
|
line: getLine(),
|
|
internalTrafficPolicy: &local,
|
|
featureGateOn: true,
|
|
endpoints: []endpoint{
|
|
{"10.0.1.1", "host0"},
|
|
{"10.0.1.2", "host1"},
|
|
{"10.0.1.3", "host2"},
|
|
},
|
|
expectEndpointRule: false,
|
|
expectedIPTablesWithSlice: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SVL-AQI2S6QIMU7PVVRP - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVL-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 has no local endpoints" -j KUBE-MARK-DROP
|
|
COMMIT
|
|
`),
|
|
flowTests: []packetFlowTest{
|
|
{
|
|
name: "no endpoints",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.1.1",
|
|
destPort: 80,
|
|
output: "DROP",
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "Local internalTrafficPolicy is ignored when feature gate is off",
|
|
line: getLine(),
|
|
internalTrafficPolicy: &local,
|
|
featureGateOn: false,
|
|
endpoints: []endpoint{
|
|
{"10.0.1.1", testHostname},
|
|
{"10.0.1.2", "host1"},
|
|
{"10.0.1.3", "host2"},
|
|
},
|
|
expectEndpointRule: false,
|
|
expectedIPTablesWithSlice: clusterExpectedIPTables,
|
|
flowTests: []packetFlowTest{
|
|
{
|
|
name: "pod to ClusterIP hits all endpoints",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.1.1",
|
|
destPort: 80,
|
|
output: "10.0.1.1:80, 10.0.1.2:80, 10.0.1.3:80",
|
|
masq: false,
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
for _, tc := range testCases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ServiceInternalTrafficPolicy, tc.featureGateOn)()
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.OnServiceSynced()
|
|
fp.OnEndpointSlicesSynced()
|
|
|
|
serviceName := "svc1"
|
|
namespaceName := "ns1"
|
|
|
|
svc := &v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
|
|
Spec: v1.ServiceSpec{
|
|
ClusterIP: "172.30.1.1",
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Ports: []v1.ServicePort{{Name: "", Port: 80, Protocol: v1.ProtocolTCP}},
|
|
},
|
|
}
|
|
if tc.internalTrafficPolicy != nil {
|
|
svc.Spec.InternalTrafficPolicy = tc.internalTrafficPolicy
|
|
}
|
|
|
|
fp.OnServiceAdd(svc)
|
|
|
|
tcpProtocol := v1.ProtocolTCP
|
|
endpointSlice := &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", serviceName),
|
|
Namespace: namespaceName,
|
|
Labels: map[string]string{discovery.LabelServiceName: serviceName},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
}
|
|
for _, ep := range tc.endpoints {
|
|
endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{
|
|
Addresses: []string{ep.ip},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr(ep.hostname),
|
|
})
|
|
}
|
|
|
|
fp.OnEndpointSliceAdd(endpointSlice)
|
|
fp.syncProxyRules()
|
|
assertIPTablesRulesEqual(t, tc.line, tc.expectedIPTablesWithSlice, fp.iptablesData.String())
|
|
runPacketFlowTests(t, tc.line, fp.iptablesData.String(), testNodeIP, tc.flowTests)
|
|
|
|
fp.OnEndpointSliceDelete(endpointSlice)
|
|
fp.syncProxyRules()
|
|
if tc.expectEndpointRule {
|
|
fp.OnEndpointSliceDelete(endpointSlice)
|
|
fp.syncProxyRules()
|
|
assertIPTablesRulesNotEqual(t, tc.line, tc.expectedIPTablesWithSlice, fp.iptablesData.String())
|
|
}
|
|
runPacketFlowTests(t, tc.line, fp.iptablesData.String(), testNodeIP, []packetFlowTest{
|
|
{
|
|
name: "endpoints deleted",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.1.1",
|
|
destPort: 80,
|
|
output: "REJECT",
|
|
},
|
|
})
|
|
})
|
|
}
|
|
}
|
|
|
|
// TestEndpointSliceWithTerminatingEndpointsTrafficPolicyLocal tests that when there are local ready and ready + terminating
|
|
// endpoints, only the ready endpoints are used.
|
|
func TestEndpointSliceWithTerminatingEndpointsTrafficPolicyLocal(t *testing.T) {
|
|
tcpProtocol := v1.ProtocolTCP
|
|
timeout := v1.DefaultClientIPServiceAffinitySeconds
|
|
service := &v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"},
|
|
Spec: v1.ServiceSpec{
|
|
ClusterIP: "172.30.1.1",
|
|
Type: v1.ServiceTypeLoadBalancer,
|
|
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeLocal,
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Ports: []v1.ServicePort{
|
|
{
|
|
Name: "",
|
|
TargetPort: intstr.FromInt(80),
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
},
|
|
},
|
|
HealthCheckNodePort: 30000,
|
|
SessionAffinity: v1.ServiceAffinityClientIP,
|
|
SessionAffinityConfig: &v1.SessionAffinityConfig{
|
|
ClientIP: &v1.ClientIPConfig{
|
|
TimeoutSeconds: &timeout,
|
|
},
|
|
},
|
|
},
|
|
Status: v1.ServiceStatus{
|
|
LoadBalancer: v1.LoadBalancerStatus{
|
|
Ingress: []v1.LoadBalancerIngress{
|
|
{IP: "1.2.3.4"},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
testcases := []struct {
|
|
name string
|
|
line int
|
|
terminatingFeatureGate bool
|
|
endpointslice *discovery.EndpointSlice
|
|
expectedIPTables string
|
|
noUsableEndpoints bool
|
|
flowTests []packetFlowTest
|
|
}{
|
|
{
|
|
name: "feature gate ProxyTerminatingEndpoints enabled, ready endpoints exist",
|
|
line: getLine(),
|
|
terminatingFeatureGate: true,
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored for external since there are ready non-terminating endpoints
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored for external since there are ready non-terminating endpoints
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored for external since it's not local
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr("host-1"),
|
|
},
|
|
},
|
|
},
|
|
expectedIPTables: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns1/svc1 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXT-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
:KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SVL-AQI2S6QIMU7PVVRP - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "pod traffic for ns1/svc1 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -j KUBE-SVL-AQI2S6QIMU7PVVRP
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --rcheck --seconds 10800 --reap -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --rcheck --seconds 10800 --reap -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
COMMIT
|
|
`),
|
|
flowTests: []packetFlowTest{
|
|
{
|
|
name: "pod to clusterIP",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.1.1",
|
|
destPort: 80,
|
|
output: "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "external to LB",
|
|
sourceIP: testExternalClient,
|
|
destIP: "1.2.3.4",
|
|
destPort: 80,
|
|
output: "10.0.1.1:80, 10.0.1.2:80",
|
|
masq: false,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "feature gate ProxyTerminatingEndpoints disabled, ready endpoints exist",
|
|
line: getLine(),
|
|
terminatingFeatureGate: false,
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored since it is not ready and the feature gate is off
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored since it is not ready and the feature gate is off
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored for external since it's not local
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr("host-1"),
|
|
},
|
|
},
|
|
},
|
|
expectedIPTables: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns1/svc1 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXT-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
:KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SVL-AQI2S6QIMU7PVVRP - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "pod traffic for ns1/svc1 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -j KUBE-SVL-AQI2S6QIMU7PVVRP
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --rcheck --seconds 10800 --reap -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --rcheck --seconds 10800 --reap -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
COMMIT
|
|
`),
|
|
flowTests: []packetFlowTest{
|
|
{
|
|
name: "pod to clusterIP",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.1.1",
|
|
destPort: 80,
|
|
output: "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "external to LB",
|
|
sourceIP: testExternalClient,
|
|
destIP: "1.2.3.4",
|
|
destPort: 80,
|
|
output: "10.0.1.1:80, 10.0.1.2:80",
|
|
masq: false,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "feature gate ProxyTerminatingEndpoints enabled, only terminating endpoints exist",
|
|
line: getLine(),
|
|
terminatingFeatureGate: true,
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
// this endpoint should be used since there are only ready terminating endpoints
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be used since there are only ready terminating endpoints
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should not be used since it is both terminating and not ready.
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored for external since it's not local
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr("host-1"),
|
|
},
|
|
},
|
|
},
|
|
expectedIPTables: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns1/svc1 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXT-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SVL-AQI2S6QIMU7PVVRP - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "pod traffic for ns1/svc1 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -j KUBE-SVL-AQI2S6QIMU7PVVRP
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-XGJFVO3L2O5SRFNT --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.3:80" -m recent --name KUBE-SEP-XGJFVO3L2O5SRFNT --rcheck --seconds 10800 --reap -j KUBE-SEP-XGJFVO3L2O5SRFNT
|
|
-A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.3:80" -j KUBE-SEP-XGJFVO3L2O5SRFNT
|
|
COMMIT
|
|
`),
|
|
flowTests: []packetFlowTest{
|
|
{
|
|
name: "pod to clusterIP",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.1.1",
|
|
destPort: 80,
|
|
output: "10.0.1.5:80",
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "external to LB",
|
|
sourceIP: testExternalClient,
|
|
destIP: "1.2.3.4",
|
|
destPort: 80,
|
|
output: "10.0.1.2:80, 10.0.1.3:80",
|
|
masq: false,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "with ProxyTerminatingEndpoints disabled, only non-local and terminating endpoints exist",
|
|
line: getLine(),
|
|
terminatingFeatureGate: false,
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr("host-1"),
|
|
},
|
|
},
|
|
},
|
|
expectedIPTables: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns1/svc1 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXT-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SVL-AQI2S6QIMU7PVVRP - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "pod traffic for ns1/svc1 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -j KUBE-SVL-AQI2S6QIMU7PVVRP
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 has no local endpoints" -j KUBE-MARK-DROP
|
|
COMMIT
|
|
`),
|
|
flowTests: []packetFlowTest{
|
|
{
|
|
name: "pod to clusterIP",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.1.1",
|
|
destPort: 80,
|
|
output: "10.0.1.5:80",
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "external to LB",
|
|
sourceIP: testExternalClient,
|
|
destIP: "1.2.3.4",
|
|
destPort: 80,
|
|
output: "DROP",
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "ProxyTerminatingEndpoints enabled, terminating endpoints on remote node",
|
|
line: getLine(),
|
|
terminatingFeatureGate: true,
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
// this endpoint won't be used because it's not local,
|
|
// but it will prevent a REJECT rule from being created
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr("host-1"),
|
|
},
|
|
},
|
|
},
|
|
expectedIPTables: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns1/svc1 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXT-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-SVL-AQI2S6QIMU7PVVRP - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "pod traffic for ns1/svc1 external destinations" -s 10.0.0.0/8 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "masquerade LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "route LOCAL traffic for ns1/svc1 external destinations" -m addrtype --src-type LOCAL -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -j KUBE-SVL-AQI2S6QIMU7PVVRP
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SVL-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 has no local endpoints" -j KUBE-MARK-DROP
|
|
COMMIT
|
|
`),
|
|
flowTests: []packetFlowTest{
|
|
{
|
|
name: "pod to clusterIP",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.1.1",
|
|
destPort: 80,
|
|
output: "10.0.1.5:80",
|
|
},
|
|
{
|
|
name: "external to LB, no locally-usable endpoints",
|
|
sourceIP: testExternalClient,
|
|
destIP: "1.2.3.4",
|
|
destPort: 80,
|
|
output: "DROP",
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "no usable endpoints on any node",
|
|
line: getLine(),
|
|
terminatingFeatureGate: true,
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
// Local but not ready or serving
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// Remote and not ready or serving
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr("host-1"),
|
|
},
|
|
},
|
|
},
|
|
noUsableEndpoints: true,
|
|
expectedIPTables: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-NODEPORTS -m comment --comment "ns1/svc1 health check node port" -m tcp -p tcp --dport 30000 -j ACCEPT
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 has no endpoints" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j REJECT
|
|
-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1 has no endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j REJECT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
COMMIT
|
|
`),
|
|
flowTests: []packetFlowTest{
|
|
{
|
|
name: "pod to clusterIP, no usable endpoints",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.1.1",
|
|
destPort: 80,
|
|
output: "REJECT",
|
|
},
|
|
{
|
|
name: "external to LB, no usable endpoints",
|
|
sourceIP: testExternalClient,
|
|
destIP: "1.2.3.4",
|
|
destPort: 80,
|
|
output: "REJECT",
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
for _, testcase := range testcases {
|
|
t.Run(testcase.name, func(t *testing.T) {
|
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ProxyTerminatingEndpoints, testcase.terminatingFeatureGate)()
|
|
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.OnServiceSynced()
|
|
fp.OnEndpointSlicesSynced()
|
|
|
|
fp.OnServiceAdd(service)
|
|
|
|
fp.OnEndpointSliceAdd(testcase.endpointslice)
|
|
fp.syncProxyRules()
|
|
assertIPTablesRulesEqual(t, testcase.line, testcase.expectedIPTables, fp.iptablesData.String())
|
|
runPacketFlowTests(t, testcase.line, fp.iptablesData.String(), testNodeIP, testcase.flowTests)
|
|
|
|
fp.OnEndpointSliceDelete(testcase.endpointslice)
|
|
fp.syncProxyRules()
|
|
if testcase.noUsableEndpoints {
|
|
// Deleting the EndpointSlice should have had no effect
|
|
assertIPTablesRulesEqual(t, testcase.line, testcase.expectedIPTables, fp.iptablesData.String())
|
|
} else {
|
|
assertIPTablesRulesNotEqual(t, testcase.line, testcase.expectedIPTables, fp.iptablesData.String())
|
|
}
|
|
runPacketFlowTests(t, testcase.line, fp.iptablesData.String(), testNodeIP, []packetFlowTest{
|
|
{
|
|
name: "pod to clusterIP after endpoints deleted",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.1.1",
|
|
destPort: 80,
|
|
output: "REJECT",
|
|
},
|
|
{
|
|
name: "external to LB after endpoints deleted",
|
|
sourceIP: testExternalClient,
|
|
destIP: "1.2.3.4",
|
|
destPort: 80,
|
|
output: "REJECT",
|
|
},
|
|
})
|
|
})
|
|
}
|
|
}
|
|
|
|
// TestEndpointSliceWithTerminatingEndpointsTrafficPolicyCluster tests that when there are cluster-wide ready and ready + terminating
|
|
// endpoints, only the ready endpoints are used.
|
|
func TestEndpointSliceWithTerminatingEndpointsTrafficPolicyCluster(t *testing.T) {
|
|
tcpProtocol := v1.ProtocolTCP
|
|
timeout := v1.DefaultClientIPServiceAffinitySeconds
|
|
service := &v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"},
|
|
Spec: v1.ServiceSpec{
|
|
ClusterIP: "172.30.1.1",
|
|
Type: v1.ServiceTypeLoadBalancer,
|
|
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyTypeCluster,
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Ports: []v1.ServicePort{
|
|
{
|
|
Name: "",
|
|
TargetPort: intstr.FromInt(80),
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
},
|
|
},
|
|
HealthCheckNodePort: 30000,
|
|
SessionAffinity: v1.ServiceAffinityClientIP,
|
|
SessionAffinityConfig: &v1.SessionAffinityConfig{
|
|
ClientIP: &v1.ClientIPConfig{
|
|
TimeoutSeconds: &timeout,
|
|
},
|
|
},
|
|
},
|
|
Status: v1.ServiceStatus{
|
|
LoadBalancer: v1.LoadBalancerStatus{
|
|
Ingress: []v1.LoadBalancerIngress{
|
|
{IP: "1.2.3.4"},
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
testcases := []struct {
|
|
name string
|
|
line int
|
|
terminatingFeatureGate bool
|
|
endpointslice *discovery.EndpointSlice
|
|
expectedIPTables string
|
|
noUsableEndpoints bool
|
|
flowTests []packetFlowTest
|
|
}{
|
|
{
|
|
name: "feature gate ProxyTerminatingEndpoints enabled, ready endpoints exist",
|
|
line: getLine(),
|
|
terminatingFeatureGate: true,
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be ignored since there are ready non-terminating endpoints
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr("another-host"),
|
|
},
|
|
{
|
|
// this endpoint should be ignored since it is not "serving"
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr("another-host"),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr("another-host"),
|
|
},
|
|
},
|
|
},
|
|
expectedIPTables: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXT-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
:KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "masquerade traffic for ns1/svc1 external destinations" -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --rcheck --seconds 10800 --reap -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
COMMIT
|
|
`),
|
|
flowTests: []packetFlowTest{
|
|
{
|
|
name: "pod to clusterIP",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.1.1",
|
|
destPort: 80,
|
|
output: "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "external to LB",
|
|
sourceIP: testExternalClient,
|
|
destIP: "1.2.3.4",
|
|
destPort: 80,
|
|
output: "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
|
|
masq: true,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "feature gate ProxyTerminatingEndpoints disabled, ready endpoints exist",
|
|
line: getLine(),
|
|
terminatingFeatureGate: false,
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// always ignored since feature gate is disabled
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr("another-host"),
|
|
},
|
|
{
|
|
// always ignored since serving=false
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr("another-host"),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(true),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(false),
|
|
},
|
|
NodeName: utilpointer.StringPtr("another-host"),
|
|
},
|
|
},
|
|
},
|
|
expectedIPTables: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXT-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-3JOIVZTXZZRGORX4 - [0:0]
|
|
:KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "masquerade traffic for ns1/svc1 external destinations" -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -s 10.0.1.1 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-3JOIVZTXZZRGORX4 -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.1:80
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m recent --name KUBE-SEP-3JOIVZTXZZRGORX4 --rcheck --seconds 10800 --reap -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.1:80" -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-3JOIVZTXZZRGORX4
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
COMMIT
|
|
`),
|
|
flowTests: []packetFlowTest{
|
|
{
|
|
name: "pod to clusterIP",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.1.1",
|
|
destPort: 80,
|
|
output: "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "external to LB",
|
|
sourceIP: testExternalClient,
|
|
destIP: "1.2.3.4",
|
|
destPort: 80,
|
|
output: "10.0.1.1:80, 10.0.1.2:80, 10.0.1.5:80",
|
|
masq: true,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "feature gate ProxyTerminatingEndpoints enabled, only terminating endpoints exist",
|
|
line: getLine(),
|
|
terminatingFeatureGate: true,
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
// this endpoint should be used since there are only ready terminating endpoints
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should be used since there are only ready terminating endpoints
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// this endpoint should not be used since it is both terminating and not ready.
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr("another-host"),
|
|
},
|
|
{
|
|
// this endpoint should be used since there are only ready terminating endpoints
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr("another-host"),
|
|
},
|
|
},
|
|
},
|
|
expectedIPTables: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXT-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0]
|
|
:KUBE-SEP-IO5XOSKPAXIFQXAJ - [0:0]
|
|
:KUBE-SEP-XGJFVO3L2O5SRFNT - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "masquerade traffic for ns1/svc1 external destinations" -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -s 10.0.1.2 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-IO5XOSKPAXIFQXAJ -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.2:80
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -s 10.0.1.3 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-XGJFVO3L2O5SRFNT -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-XGJFVO3L2O5SRFNT --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.3:80
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m recent --name KUBE-SEP-IO5XOSKPAXIFQXAJ --rcheck --seconds 10800 --reap -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.3:80" -m recent --name KUBE-SEP-XGJFVO3L2O5SRFNT --rcheck --seconds 10800 --reap -j KUBE-SEP-XGJFVO3L2O5SRFNT
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.2:80" -m statistic --mode random --probability 0.3333333333 -j KUBE-SEP-IO5XOSKPAXIFQXAJ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.3:80" -m statistic --mode random --probability 0.5000000000 -j KUBE-SEP-XGJFVO3L2O5SRFNT
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
COMMIT
|
|
`),
|
|
flowTests: []packetFlowTest{
|
|
{
|
|
name: "pod to clusterIP",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.1.1",
|
|
destPort: 80,
|
|
output: "10.0.1.2:80, 10.0.1.3:80, 10.0.1.5:80",
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "external to LB",
|
|
sourceIP: testExternalClient,
|
|
destIP: "1.2.3.4",
|
|
destPort: 80,
|
|
output: "10.0.1.2:80, 10.0.1.3:80, 10.0.1.5:80",
|
|
masq: true,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "with ProxyTerminatingEndpoints disabled, only terminating endpoints exist",
|
|
line: getLine(),
|
|
terminatingFeatureGate: false,
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
Addresses: []string{"10.0.1.1"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.2"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.3"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr("another-host"),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.4"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr("another-host"),
|
|
},
|
|
{
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr("another-host"),
|
|
},
|
|
},
|
|
},
|
|
noUsableEndpoints: true,
|
|
expectedIPTables: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 has no endpoints" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j REJECT
|
|
-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1 has no endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j REJECT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
COMMIT
|
|
`),
|
|
flowTests: []packetFlowTest{
|
|
{
|
|
name: "pod to clusterIP",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.1.1",
|
|
destPort: 80,
|
|
output: "REJECT",
|
|
},
|
|
{
|
|
name: "external to LB",
|
|
sourceIP: testExternalClient,
|
|
destIP: "1.2.3.4",
|
|
destPort: 80,
|
|
output: "REJECT",
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "ProxyTerminatingEndpoints enabled, terminating endpoints on remote node",
|
|
line: getLine(),
|
|
terminatingFeatureGate: true,
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(true),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr("host-1"),
|
|
},
|
|
},
|
|
},
|
|
expectedIPTables: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXT-AQI2S6QIMU7PVVRP - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
:KUBE-SEP-EQCHZ7S2PJ72OHAY - [0:0]
|
|
:KUBE-SVC-AQI2S6QIMU7PVVRP - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 loadbalancer IP" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j KUBE-EXT-AQI2S6QIMU7PVVRP
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -m comment --comment "masquerade traffic for ns1/svc1 external destinations" -j KUBE-MARK-MASQ
|
|
-A KUBE-EXT-AQI2S6QIMU7PVVRP -j KUBE-SVC-AQI2S6QIMU7PVVRP
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -s 10.0.1.5 -j KUBE-MARK-MASQ
|
|
-A KUBE-SEP-EQCHZ7S2PJ72OHAY -m comment --comment ns1/svc1 -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --set -m tcp -p tcp -j DNAT --to-destination 10.0.1.5:80
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 cluster IP" -m tcp -p tcp -d 172.30.1.1 --dport 80 ! -s 10.0.0.0/8 -j KUBE-MARK-MASQ
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -m recent --name KUBE-SEP-EQCHZ7S2PJ72OHAY --rcheck --seconds 10800 --reap -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
-A KUBE-SVC-AQI2S6QIMU7PVVRP -m comment --comment "ns1/svc1 -> 10.0.1.5:80" -j KUBE-SEP-EQCHZ7S2PJ72OHAY
|
|
COMMIT
|
|
`),
|
|
flowTests: []packetFlowTest{
|
|
{
|
|
name: "pod to clusterIP",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.1.1",
|
|
destPort: 80,
|
|
output: "10.0.1.5:80",
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "external to LB",
|
|
sourceIP: testExternalClient,
|
|
destIP: "1.2.3.4",
|
|
destPort: 80,
|
|
output: "10.0.1.5:80",
|
|
masq: true,
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "no usable endpoints on any node",
|
|
line: getLine(),
|
|
terminatingFeatureGate: true,
|
|
endpointslice: &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", "svc1"),
|
|
Namespace: "ns1",
|
|
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
Endpoints: []discovery.Endpoint{
|
|
{
|
|
// Local, not ready or serving
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
// Remote, not ready or serving
|
|
Addresses: []string{"10.0.1.5"},
|
|
Conditions: discovery.EndpointConditions{
|
|
Ready: utilpointer.BoolPtr(false),
|
|
Serving: utilpointer.BoolPtr(false),
|
|
Terminating: utilpointer.BoolPtr(true),
|
|
},
|
|
NodeName: utilpointer.StringPtr("host-1"),
|
|
},
|
|
},
|
|
},
|
|
noUsableEndpoints: true,
|
|
expectedIPTables: dedent.Dedent(`
|
|
*filter
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-EXTERNAL-SERVICES - [0:0]
|
|
:KUBE-FORWARD - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "ns1/svc1 has no endpoints" -m tcp -p tcp -d 172.30.1.1 --dport 80 -j REJECT
|
|
-A KUBE-EXTERNAL-SERVICES -m comment --comment "ns1/svc1 has no endpoints" -m tcp -p tcp -d 1.2.3.4 --dport 80 -j REJECT
|
|
-A KUBE-FORWARD -m conntrack --ctstate INVALID -j DROP
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding rules" -m mark --mark 0x4000/0x4000 -j ACCEPT
|
|
-A KUBE-FORWARD -m comment --comment "kubernetes forwarding conntrack rule" -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
|
COMMIT
|
|
*nat
|
|
:KUBE-NODEPORTS - [0:0]
|
|
:KUBE-SERVICES - [0:0]
|
|
:KUBE-MARK-MASQ - [0:0]
|
|
:KUBE-POSTROUTING - [0:0]
|
|
-A KUBE-SERVICES -m comment --comment "kubernetes service nodeports; NOTE: this must be the last rule in this chain" -m addrtype --dst-type LOCAL -j KUBE-NODEPORTS
|
|
-A KUBE-MARK-MASQ -j MARK --or-mark 0x4000
|
|
-A KUBE-POSTROUTING -m mark ! --mark 0x4000/0x4000 -j RETURN
|
|
-A KUBE-POSTROUTING -j MARK --xor-mark 0x4000
|
|
-A KUBE-POSTROUTING -m comment --comment "kubernetes service traffic requiring SNAT" -j MASQUERADE
|
|
COMMIT
|
|
`),
|
|
flowTests: []packetFlowTest{
|
|
{
|
|
name: "pod to clusterIP",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.1.1",
|
|
destPort: 80,
|
|
output: "REJECT",
|
|
},
|
|
{
|
|
name: "external to LB",
|
|
sourceIP: testExternalClient,
|
|
destIP: "1.2.3.4",
|
|
destPort: 80,
|
|
output: "REJECT",
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
for _, testcase := range testcases {
|
|
t.Run(testcase.name, func(t *testing.T) {
|
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ProxyTerminatingEndpoints, testcase.terminatingFeatureGate)()
|
|
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.OnServiceSynced()
|
|
fp.OnEndpointSlicesSynced()
|
|
|
|
fp.OnServiceAdd(service)
|
|
|
|
fp.OnEndpointSliceAdd(testcase.endpointslice)
|
|
fp.syncProxyRules()
|
|
assertIPTablesRulesEqual(t, testcase.line, testcase.expectedIPTables, fp.iptablesData.String())
|
|
runPacketFlowTests(t, testcase.line, fp.iptablesData.String(), testNodeIP, testcase.flowTests)
|
|
|
|
fp.OnEndpointSliceDelete(testcase.endpointslice)
|
|
fp.syncProxyRules()
|
|
if testcase.noUsableEndpoints {
|
|
// Deleting the EndpointSlice should have had no effect
|
|
assertIPTablesRulesEqual(t, testcase.line, testcase.expectedIPTables, fp.iptablesData.String())
|
|
} else {
|
|
assertIPTablesRulesNotEqual(t, testcase.line, testcase.expectedIPTables, fp.iptablesData.String())
|
|
}
|
|
runPacketFlowTests(t, testcase.line, fp.iptablesData.String(), testNodeIP, []packetFlowTest{
|
|
{
|
|
name: "pod to clusterIP after endpoints deleted",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.1.1",
|
|
destPort: 80,
|
|
output: "REJECT",
|
|
},
|
|
{
|
|
name: "external to LB after endpoints deleted",
|
|
sourceIP: testExternalClient,
|
|
destIP: "1.2.3.4",
|
|
destPort: 80,
|
|
output: "REJECT",
|
|
},
|
|
})
|
|
})
|
|
}
|
|
}
|
|
|
|
func TestInternalExternalMasquerade(t *testing.T) {
|
|
// (Put the test setup code in an internal function so we can have it here at the
|
|
// top, before the test cases that will be run against it.)
|
|
setupTest := func(fp *Proxier) {
|
|
local := v1.ServiceInternalTrafficPolicyLocal
|
|
tcpProtocol := v1.ProtocolTCP
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService("ns1", "svc1", func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ClusterIP = "172.30.0.41"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(3001),
|
|
}}
|
|
svc.Spec.HealthCheckNodePort = 30001
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: "1.2.3.4",
|
|
}}
|
|
}),
|
|
makeTestService("ns2", "svc2", func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ClusterIP = "172.30.0.42"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(3002),
|
|
}}
|
|
svc.Spec.HealthCheckNodePort = 30002
|
|
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: "5.6.7.8",
|
|
}}
|
|
}),
|
|
makeTestService("ns3", "svc3", func(svc *v1.Service) {
|
|
svc.Spec.Type = "LoadBalancer"
|
|
svc.Spec.ClusterIP = "172.30.0.43"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
NodePort: int32(3003),
|
|
}}
|
|
svc.Spec.HealthCheckNodePort = 30003
|
|
svc.Spec.InternalTrafficPolicy = &local
|
|
svc.Status.LoadBalancer.Ingress = []v1.LoadBalancerIngress{{
|
|
IP: "9.10.11.12",
|
|
}}
|
|
}),
|
|
)
|
|
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{
|
|
{
|
|
Addresses: []string{"10.180.0.1"},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.180.1.1"},
|
|
NodeName: utilpointer.StringPtr("remote"),
|
|
},
|
|
}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{
|
|
{
|
|
Addresses: []string{"10.180.0.2"},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.180.1.2"},
|
|
NodeName: utilpointer.StringPtr("remote"),
|
|
},
|
|
}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{
|
|
{
|
|
Addresses: []string{"10.180.0.3"},
|
|
NodeName: utilpointer.StringPtr(testHostname),
|
|
},
|
|
{
|
|
Addresses: []string{"10.180.1.3"},
|
|
NodeName: utilpointer.StringPtr("remote"),
|
|
},
|
|
}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
}
|
|
|
|
// We use the same flowTests for all of the testCases. The "output" and "masq"
|
|
// values here represent the normal case (working localDetector, no masqueradeAll)
|
|
flowTests := []packetFlowTest{
|
|
{
|
|
name: "pod to ClusterIP",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.0.41",
|
|
destPort: 80,
|
|
output: "10.180.0.1:80, 10.180.1.1:80",
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "pod to NodePort",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: testNodeIP,
|
|
destPort: 3001,
|
|
output: "10.180.0.1:80, 10.180.1.1:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "pod to LB",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "1.2.3.4",
|
|
destPort: 80,
|
|
output: "10.180.0.1:80, 10.180.1.1:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "node to ClusterIP",
|
|
sourceIP: testNodeIP,
|
|
destIP: "172.30.0.41",
|
|
destPort: 80,
|
|
output: "10.180.0.1:80, 10.180.1.1:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "node to NodePort",
|
|
sourceIP: testNodeIP,
|
|
destIP: testNodeIP,
|
|
destPort: 3001,
|
|
output: "10.180.0.1:80, 10.180.1.1:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "localhost to NodePort",
|
|
sourceIP: "127.0.0.1",
|
|
destIP: "127.0.0.1",
|
|
destPort: 3001,
|
|
output: "10.180.0.1:80, 10.180.1.1:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "node to LB",
|
|
sourceIP: testNodeIP,
|
|
destIP: "1.2.3.4",
|
|
destPort: 80,
|
|
output: "10.180.0.1:80, 10.180.1.1:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "external to ClusterIP",
|
|
sourceIP: testExternalClient,
|
|
destIP: "172.30.0.41",
|
|
destPort: 80,
|
|
output: "10.180.0.1:80, 10.180.1.1:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "external to NodePort",
|
|
sourceIP: testExternalClient,
|
|
destIP: testNodeIP,
|
|
destPort: 3001,
|
|
output: "10.180.0.1:80, 10.180.1.1:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "external to LB",
|
|
sourceIP: testExternalClient,
|
|
destIP: "1.2.3.4",
|
|
destPort: 80,
|
|
output: "10.180.0.1:80, 10.180.1.1:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "pod to ClusterIP with eTP:Local",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.0.42",
|
|
destPort: 80,
|
|
|
|
// externalTrafficPolicy does not apply to ClusterIP traffic, so same
|
|
// as "Pod to ClusterIP"
|
|
output: "10.180.0.2:80, 10.180.1.2:80",
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "pod to NodePort with eTP:Local",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: testNodeIP,
|
|
destPort: 3002,
|
|
|
|
// FIXME: The short-circuit rule means we potentially send to a remote
|
|
// endpoint without masquerading, which is inconsistent with the
|
|
// eTP:Cluster case. We should either be masquerading here, or NOT
|
|
// masquerading in the "pod to NodePort" case above.
|
|
output: "10.180.0.2:80, 10.180.1.2:80",
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "pod to LB with eTP:Local",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "5.6.7.8",
|
|
destPort: 80,
|
|
|
|
// FIXME: The short-circuit rule means we potentially send to a remote
|
|
// endpoint without masquerading, which is inconsistent with the
|
|
// eTP:Cluster case. We should either be masquerading here, or NOT
|
|
// masquerading in the "pod to LB" case above.
|
|
output: "10.180.0.2:80, 10.180.1.2:80",
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "node to ClusterIP with eTP:Local",
|
|
sourceIP: testNodeIP,
|
|
destIP: "172.30.0.42",
|
|
destPort: 80,
|
|
|
|
// externalTrafficPolicy does not apply to ClusterIP traffic, so same
|
|
// as "node to ClusterIP"
|
|
output: "10.180.0.2:80, 10.180.1.2:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "node to NodePort with eTP:Local",
|
|
sourceIP: testNodeIP,
|
|
destIP: testNodeIP,
|
|
destPort: 3001,
|
|
|
|
// The traffic gets short-circuited, ignoring externalTrafficPolicy, so
|
|
// same as "node to NodePort" above.
|
|
output: "10.180.0.1:80, 10.180.1.1:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "localhost to NodePort with eTP:Local",
|
|
sourceIP: "127.0.0.1",
|
|
destIP: "127.0.0.1",
|
|
destPort: 3002,
|
|
|
|
// The traffic gets short-circuited, ignoring externalTrafficPolicy, so
|
|
// same as "localhost to NodePort" above.
|
|
output: "10.180.0.2:80, 10.180.1.2:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "node to LB with eTP:Local",
|
|
sourceIP: testNodeIP,
|
|
destIP: "5.6.7.8",
|
|
destPort: 80,
|
|
|
|
// The traffic gets short-circuited, ignoring externalTrafficPolicy, so
|
|
// same as "node to LB" above.
|
|
output: "10.180.0.2:80, 10.180.1.2:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "external to ClusterIP with eTP:Local",
|
|
sourceIP: testExternalClient,
|
|
destIP: "172.30.0.42",
|
|
destPort: 80,
|
|
|
|
// externalTrafficPolicy does not apply to ClusterIP traffic, so same
|
|
// as "external to ClusterIP" above.
|
|
output: "10.180.0.2:80, 10.180.1.2:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "external to NodePort with eTP:Local",
|
|
sourceIP: testExternalClient,
|
|
destIP: testNodeIP,
|
|
destPort: 3002,
|
|
|
|
// externalTrafficPolicy applies; only the local endpoint is
|
|
// selected, and we don't masquerade.
|
|
output: "10.180.0.2:80",
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "external to LB with eTP:Local",
|
|
sourceIP: testExternalClient,
|
|
destIP: "5.6.7.8",
|
|
destPort: 80,
|
|
|
|
// externalTrafficPolicy applies; only the local endpoint is
|
|
// selected, and we don't masquerade.
|
|
output: "10.180.0.2:80",
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "pod to ClusterIP with iTP:Local",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "172.30.0.43",
|
|
destPort: 80,
|
|
|
|
// internalTrafficPolicy applies; only the local endpoint is
|
|
// selected.
|
|
output: "10.180.0.3:80",
|
|
masq: false,
|
|
},
|
|
{
|
|
name: "pod to NodePort with iTP:Local",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: testNodeIP,
|
|
destPort: 3003,
|
|
|
|
// internalTrafficPolicy does not apply to NodePort traffic, so same as
|
|
// "pod to NodePort" above.
|
|
output: "10.180.0.3:80, 10.180.1.3:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "pod to LB with iTP:Local",
|
|
sourceIP: "10.0.0.2",
|
|
destIP: "9.10.11.12",
|
|
destPort: 80,
|
|
|
|
// internalTrafficPolicy does not apply to LoadBalancer traffic, so
|
|
// same as "pod to LB" above.
|
|
output: "10.180.0.3:80, 10.180.1.3:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "node to ClusterIP with iTP:Local",
|
|
sourceIP: testNodeIP,
|
|
destIP: "172.30.0.43",
|
|
destPort: 80,
|
|
|
|
// internalTrafficPolicy applies; only the local endpoint is selected.
|
|
// Traffic is masqueraded as in the "node to ClusterIP" case because
|
|
// internalTrafficPolicy does not affect masquerading.
|
|
output: "10.180.0.3:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "node to NodePort with iTP:Local",
|
|
sourceIP: testNodeIP,
|
|
destIP: testNodeIP,
|
|
destPort: 3003,
|
|
|
|
// internalTrafficPolicy does not apply to NodePort traffic, so same as
|
|
// "node to NodePort" above.
|
|
output: "10.180.0.3:80, 10.180.1.3:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "localhost to NodePort with iTP:Local",
|
|
sourceIP: "127.0.0.1",
|
|
destIP: "127.0.0.1",
|
|
destPort: 3003,
|
|
|
|
// internalTrafficPolicy does not apply to NodePort traffic, so same as
|
|
// "localhost to NodePort" above.
|
|
output: "10.180.0.3:80, 10.180.1.3:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "node to LB with iTP:Local",
|
|
sourceIP: testNodeIP,
|
|
destIP: "9.10.11.12",
|
|
destPort: 80,
|
|
|
|
// internalTrafficPolicy does not apply to LoadBalancer traffic, so
|
|
// same as "node to LB" above.
|
|
output: "10.180.0.3:80, 10.180.1.3:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "external to ClusterIP with iTP:Local",
|
|
sourceIP: testExternalClient,
|
|
destIP: "172.30.0.43",
|
|
destPort: 80,
|
|
|
|
// internalTrafficPolicy applies; only the local endpoint is selected.
|
|
// Traffic is masqueraded as in the "external to ClusterIP" case
|
|
// because internalTrafficPolicy does not affect masquerading.
|
|
output: "10.180.0.3:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "external to NodePort with iTP:Local",
|
|
sourceIP: testExternalClient,
|
|
destIP: testNodeIP,
|
|
destPort: 3003,
|
|
|
|
// internalTrafficPolicy does not apply to NodePort traffic, so same as
|
|
// "external to NodePort" above.
|
|
output: "10.180.0.3:80, 10.180.1.3:80",
|
|
masq: true,
|
|
},
|
|
{
|
|
name: "external to LB with iTP:Local",
|
|
sourceIP: testExternalClient,
|
|
destIP: "9.10.11.12",
|
|
destPort: 80,
|
|
|
|
// internalTrafficPolicy does not apply to LoadBalancer traffic, so
|
|
// same as "external to LB" above.
|
|
output: "10.180.0.3:80, 10.180.1.3:80",
|
|
masq: true,
|
|
},
|
|
}
|
|
|
|
type packetFlowTestOverride struct {
|
|
output *string
|
|
masq *bool
|
|
}
|
|
|
|
testCases := []struct {
|
|
name string
|
|
line int
|
|
masqueradeAll bool
|
|
localDetector bool
|
|
overrides map[string]packetFlowTestOverride
|
|
}{
|
|
{
|
|
name: "base",
|
|
line: getLine(),
|
|
masqueradeAll: false,
|
|
localDetector: true,
|
|
overrides: nil,
|
|
},
|
|
{
|
|
name: "no LocalTrafficDetector",
|
|
line: getLine(),
|
|
masqueradeAll: false,
|
|
localDetector: false,
|
|
overrides: map[string]packetFlowTestOverride{
|
|
// With no LocalTrafficDetector, all traffic to a
|
|
// ClusterIP is assumed to be from a pod, and thus to not
|
|
// require masquerading.
|
|
"node to ClusterIP": {
|
|
masq: utilpointer.Bool(false),
|
|
},
|
|
"node to ClusterIP with eTP:Local": {
|
|
masq: utilpointer.Bool(false),
|
|
},
|
|
"node to ClusterIP with iTP:Local": {
|
|
masq: utilpointer.Bool(false),
|
|
},
|
|
"external to ClusterIP": {
|
|
masq: utilpointer.Bool(false),
|
|
},
|
|
"external to ClusterIP with eTP:Local": {
|
|
masq: utilpointer.Bool(false),
|
|
},
|
|
"external to ClusterIP with iTP:Local": {
|
|
masq: utilpointer.Bool(false),
|
|
},
|
|
|
|
// And there's no eTP:Local short-circuit for pod traffic,
|
|
// so pods get only the local endpoints.
|
|
"pod to NodePort with eTP:Local": {
|
|
output: utilpointer.String("10.180.0.2:80"),
|
|
},
|
|
"pod to LB with eTP:Local": {
|
|
output: utilpointer.String("10.180.0.2:80"),
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "masqueradeAll",
|
|
line: getLine(),
|
|
masqueradeAll: true,
|
|
localDetector: true,
|
|
overrides: map[string]packetFlowTestOverride{
|
|
// All "to ClusterIP" traffic gets masqueraded when using
|
|
// --masquerade-all.
|
|
"pod to ClusterIP": {
|
|
masq: utilpointer.Bool(true),
|
|
},
|
|
"pod to ClusterIP with eTP:Local": {
|
|
masq: utilpointer.Bool(true),
|
|
},
|
|
"pod to ClusterIP with iTP:Local": {
|
|
masq: utilpointer.Bool(true),
|
|
},
|
|
},
|
|
},
|
|
{
|
|
name: "masqueradeAll, no LocalTrafficDetector",
|
|
line: getLine(),
|
|
masqueradeAll: true,
|
|
localDetector: false,
|
|
overrides: map[string]packetFlowTestOverride{
|
|
// As in "masqueradeAll"
|
|
"pod to ClusterIP": {
|
|
masq: utilpointer.Bool(true),
|
|
},
|
|
"pod to ClusterIP with eTP:Local": {
|
|
masq: utilpointer.Bool(true),
|
|
},
|
|
"pod to ClusterIP with iTP:Local": {
|
|
masq: utilpointer.Bool(true),
|
|
},
|
|
|
|
// As in "no LocalTrafficDetector"
|
|
"pod to NodePort with eTP:Local": {
|
|
output: utilpointer.String("10.180.0.2:80"),
|
|
},
|
|
"pod to LB with eTP:Local": {
|
|
output: utilpointer.String("10.180.0.2:80"),
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
for _, tc := range testCases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ServiceInternalTrafficPolicy, true)()
|
|
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.masqueradeAll = tc.masqueradeAll
|
|
if !tc.localDetector {
|
|
fp.localDetector = proxyutiliptables.NewNoOpLocalDetector()
|
|
}
|
|
setupTest(fp)
|
|
|
|
// Merge base flowTests with per-test-case overrides
|
|
tcFlowTests := make([]packetFlowTest, len(flowTests))
|
|
overridesApplied := 0
|
|
for i := range flowTests {
|
|
tcFlowTests[i] = flowTests[i]
|
|
if overrides, set := tc.overrides[flowTests[i].name]; set {
|
|
overridesApplied++
|
|
if overrides.masq != nil {
|
|
if tcFlowTests[i].masq == *overrides.masq {
|
|
t.Errorf("%q override value for masq is same as base value", flowTests[i].name)
|
|
}
|
|
tcFlowTests[i].masq = *overrides.masq
|
|
}
|
|
if overrides.output != nil {
|
|
if tcFlowTests[i].output == *overrides.output {
|
|
t.Errorf("%q override value for output is same as base value", flowTests[i].name)
|
|
}
|
|
tcFlowTests[i].output = *overrides.output
|
|
}
|
|
}
|
|
}
|
|
if overridesApplied != len(tc.overrides) {
|
|
t.Errorf("%d overrides did not match any test case name!", len(tc.overrides)-overridesApplied)
|
|
}
|
|
runPacketFlowTests(t, tc.line, fp.iptablesData.String(), testNodeIP, tcFlowTests)
|
|
})
|
|
}
|
|
}
|
|
|
|
func countEndpointsAndComments(iptablesData string, matchEndpoint string) (string, int, int) {
|
|
var numEndpoints, numComments int
|
|
var matched string
|
|
for _, line := range strings.Split(iptablesData, "\n") {
|
|
if strings.HasPrefix(line, "-A KUBE-SEP-") && strings.Contains(line, "-j DNAT") {
|
|
numEndpoints++
|
|
if strings.Contains(line, "--comment") {
|
|
numComments++
|
|
}
|
|
if strings.Contains(line, matchEndpoint) {
|
|
matched = line
|
|
}
|
|
}
|
|
}
|
|
return matched, numEndpoints, numComments
|
|
}
|
|
|
|
func TestEndpointCommentElision(t *testing.T) {
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.masqueradeAll = true
|
|
|
|
makeServiceMap(fp,
|
|
makeTestService("ns1", "svc1", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = "172.30.0.41"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p80",
|
|
Port: 80,
|
|
Protocol: v1.ProtocolTCP,
|
|
}}
|
|
}),
|
|
makeTestService("ns2", "svc2", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = "172.30.0.42"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p8080",
|
|
Port: 8080,
|
|
Protocol: v1.ProtocolTCP,
|
|
}}
|
|
}),
|
|
makeTestService("ns3", "svc3", func(svc *v1.Service) {
|
|
svc.Spec.Type = v1.ServiceTypeClusterIP
|
|
svc.Spec.ClusterIP = "172.30.0.43"
|
|
svc.Spec.Ports = []v1.ServicePort{{
|
|
Name: "p8081",
|
|
Port: 8081,
|
|
Protocol: v1.ProtocolTCP,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
tcpProtocol := v1.ProtocolTCP
|
|
populateEndpointSlices(fp,
|
|
makeTestEndpointSlice("ns1", "svc1", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = make([]discovery.Endpoint, endpointChainsNumberThreshold/2-1)
|
|
for i := range eps.Endpoints {
|
|
eps.Endpoints[i].Addresses = []string{fmt.Sprintf("10.0.%d.%d", i%256, i/256)}
|
|
}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p80"),
|
|
Port: utilpointer.Int32(80),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
makeTestEndpointSlice("ns2", "svc2", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = make([]discovery.Endpoint, endpointChainsNumberThreshold/2-1)
|
|
for i := range eps.Endpoints {
|
|
eps.Endpoints[i].Addresses = []string{fmt.Sprintf("10.1.%d.%d", i%256, i/256)}
|
|
}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p8080"),
|
|
Port: utilpointer.Int32(8080),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}),
|
|
)
|
|
|
|
fp.syncProxyRules()
|
|
|
|
expectedEndpoints := 2 * (endpointChainsNumberThreshold/2 - 1)
|
|
firstEndpoint, numEndpoints, numComments := countEndpointsAndComments(fp.iptablesData.String(), "10.0.0.0")
|
|
assert.Equal(t, "-A KUBE-SEP-DKGQUZGBKLTPAR56 -m comment --comment ns1/svc1:p80 -m tcp -p tcp -j DNAT --to-destination 10.0.0.0:80", firstEndpoint)
|
|
if numEndpoints != expectedEndpoints {
|
|
t.Errorf("Found wrong number of endpoints: expected %d, got %d", expectedEndpoints, numEndpoints)
|
|
}
|
|
if numComments != numEndpoints {
|
|
t.Errorf("numComments (%d) != numEndpoints (%d) when numEndpoints < threshold (%d)", numComments, numEndpoints, endpointChainsNumberThreshold)
|
|
}
|
|
|
|
fp.OnEndpointSliceAdd(makeTestEndpointSlice("ns3", "svc3", 1, func(eps *discovery.EndpointSlice) {
|
|
eps.AddressType = discovery.AddressTypeIPv4
|
|
eps.Endpoints = []discovery.Endpoint{{
|
|
Addresses: []string{"203.0.113.4"},
|
|
}, {
|
|
Addresses: []string{"203.0.113.8"},
|
|
}, {
|
|
Addresses: []string{"203.0.113.12"},
|
|
}}
|
|
eps.Ports = []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr("p8081"),
|
|
Port: utilpointer.Int32(8081),
|
|
Protocol: &tcpProtocol,
|
|
}}
|
|
}))
|
|
fp.syncProxyRules()
|
|
|
|
expectedEndpoints += 3
|
|
firstEndpoint, numEndpoints, numComments = countEndpointsAndComments(fp.iptablesData.String(), "10.0.0.0")
|
|
assert.Equal(t, "-A KUBE-SEP-DKGQUZGBKLTPAR56 -m tcp -p tcp -j DNAT --to-destination 10.0.0.0:80", firstEndpoint)
|
|
if numEndpoints != expectedEndpoints {
|
|
t.Errorf("Found wrong number of endpoints: expected %d, got %d", expectedEndpoints, numEndpoints)
|
|
}
|
|
if numComments != 0 {
|
|
t.Errorf("numComments (%d) != 0 when numEndpoints (%d) > threshold (%d)", numComments, numEndpoints, endpointChainsNumberThreshold)
|
|
}
|
|
}
|
|
|
|
func TestNoEndpointsMetric(t *testing.T) {
|
|
type endpoint struct {
|
|
ip string
|
|
hostname string
|
|
}
|
|
|
|
internalTrafficPolicyLocal := v1.ServiceInternalTrafficPolicyLocal
|
|
externalTrafficPolicyLocal := v1.ServiceExternalTrafficPolicyTypeLocal
|
|
|
|
metrics.RegisterMetrics()
|
|
testCases := []struct {
|
|
name string
|
|
internalTrafficPolicy *v1.ServiceInternalTrafficPolicyType
|
|
externalTrafficPolicy v1.ServiceExternalTrafficPolicyType
|
|
endpoints []endpoint
|
|
expectedSyncProxyRulesNoLocalEndpointsTotalInternal int
|
|
expectedSyncProxyRulesNoLocalEndpointsTotalExternal int
|
|
}{
|
|
{
|
|
name: "internalTrafficPolicy is set and there are local endpoints",
|
|
internalTrafficPolicy: &internalTrafficPolicyLocal,
|
|
endpoints: []endpoint{
|
|
{"10.0.1.1", testHostname},
|
|
{"10.0.1.2", "host1"},
|
|
{"10.0.1.3", "host2"},
|
|
},
|
|
},
|
|
{
|
|
name: "externalTrafficPolicy is set and there are local endpoints",
|
|
externalTrafficPolicy: externalTrafficPolicyLocal,
|
|
endpoints: []endpoint{
|
|
{"10.0.1.1", testHostname},
|
|
{"10.0.1.2", "host1"},
|
|
{"10.0.1.3", "host2"},
|
|
},
|
|
},
|
|
{
|
|
name: "both policies are set and there are local endpoints",
|
|
internalTrafficPolicy: &internalTrafficPolicyLocal,
|
|
externalTrafficPolicy: externalTrafficPolicyLocal,
|
|
endpoints: []endpoint{
|
|
{"10.0.1.1", testHostname},
|
|
{"10.0.1.2", "host1"},
|
|
{"10.0.1.3", "host2"},
|
|
},
|
|
},
|
|
{
|
|
name: "internalTrafficPolicy is set and there are no local endpoints",
|
|
internalTrafficPolicy: &internalTrafficPolicyLocal,
|
|
endpoints: []endpoint{
|
|
{"10.0.1.1", "host0"},
|
|
{"10.0.1.2", "host1"},
|
|
{"10.0.1.3", "host2"},
|
|
},
|
|
expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 1,
|
|
},
|
|
{
|
|
name: "externalTrafficPolicy is set and there are no local endpoints",
|
|
externalTrafficPolicy: externalTrafficPolicyLocal,
|
|
endpoints: []endpoint{
|
|
{"10.0.1.1", "host0"},
|
|
{"10.0.1.2", "host1"},
|
|
{"10.0.1.3", "host2"},
|
|
},
|
|
expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 1,
|
|
},
|
|
{
|
|
name: "both policies are set and there are no local endpoints",
|
|
internalTrafficPolicy: &internalTrafficPolicyLocal,
|
|
externalTrafficPolicy: externalTrafficPolicyLocal,
|
|
endpoints: []endpoint{
|
|
{"10.0.1.1", "host0"},
|
|
{"10.0.1.2", "host1"},
|
|
{"10.0.1.3", "host2"},
|
|
},
|
|
expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 1,
|
|
expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 1,
|
|
},
|
|
{
|
|
name: "both policies are set and there are no endpoints at all",
|
|
internalTrafficPolicy: &internalTrafficPolicyLocal,
|
|
externalTrafficPolicy: externalTrafficPolicyLocal,
|
|
endpoints: []endpoint{},
|
|
expectedSyncProxyRulesNoLocalEndpointsTotalInternal: 0,
|
|
expectedSyncProxyRulesNoLocalEndpointsTotalExternal: 0,
|
|
},
|
|
}
|
|
|
|
for _, tc := range testCases {
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ServiceInternalTrafficPolicy, true)()
|
|
ipt := iptablestest.NewFake()
|
|
fp := NewFakeProxier(ipt)
|
|
fp.OnServiceSynced()
|
|
fp.OnEndpointSlicesSynced()
|
|
|
|
serviceName := "svc1"
|
|
namespaceName := "ns1"
|
|
|
|
svc := &v1.Service{
|
|
ObjectMeta: metav1.ObjectMeta{Name: serviceName, Namespace: namespaceName},
|
|
Spec: v1.ServiceSpec{
|
|
ClusterIP: "172.30.1.1",
|
|
Selector: map[string]string{"foo": "bar"},
|
|
Ports: []v1.ServicePort{{Name: "", Port: 80, Protocol: v1.ProtocolTCP, NodePort: 123}},
|
|
},
|
|
}
|
|
if tc.internalTrafficPolicy != nil {
|
|
svc.Spec.InternalTrafficPolicy = tc.internalTrafficPolicy
|
|
}
|
|
if tc.externalTrafficPolicy != "" {
|
|
svc.Spec.Type = v1.ServiceTypeNodePort
|
|
svc.Spec.ExternalTrafficPolicy = tc.externalTrafficPolicy
|
|
}
|
|
|
|
fp.OnServiceAdd(svc)
|
|
|
|
tcpProtocol := v1.ProtocolTCP
|
|
endpointSlice := &discovery.EndpointSlice{
|
|
ObjectMeta: metav1.ObjectMeta{
|
|
Name: fmt.Sprintf("%s-1", serviceName),
|
|
Namespace: namespaceName,
|
|
Labels: map[string]string{discovery.LabelServiceName: serviceName},
|
|
},
|
|
Ports: []discovery.EndpointPort{{
|
|
Name: utilpointer.StringPtr(""),
|
|
Port: utilpointer.Int32Ptr(80),
|
|
Protocol: &tcpProtocol,
|
|
}},
|
|
AddressType: discovery.AddressTypeIPv4,
|
|
}
|
|
for _, ep := range tc.endpoints {
|
|
endpointSlice.Endpoints = append(endpointSlice.Endpoints, discovery.Endpoint{
|
|
Addresses: []string{ep.ip},
|
|
Conditions: discovery.EndpointConditions{Ready: utilpointer.BoolPtr(true)},
|
|
NodeName: utilpointer.StringPtr(ep.hostname),
|
|
})
|
|
}
|
|
|
|
fp.OnEndpointSliceAdd(endpointSlice)
|
|
fp.syncProxyRules()
|
|
syncProxyRulesNoLocalEndpointsTotalInternal, err := testutil.GetGaugeMetricValue(metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("internal"))
|
|
if err != nil {
|
|
t.Errorf("failed to get %s value, err: %v", metrics.SyncProxyRulesNoLocalEndpointsTotal.Name, err)
|
|
}
|
|
|
|
if tc.expectedSyncProxyRulesNoLocalEndpointsTotalInternal != int(syncProxyRulesNoLocalEndpointsTotalInternal) {
|
|
t.Errorf("sync_proxy_rules_no_endpoints_total metric mismatch(internal): got=%d, expected %d", int(syncProxyRulesNoLocalEndpointsTotalInternal), tc.expectedSyncProxyRulesNoLocalEndpointsTotalInternal)
|
|
}
|
|
|
|
syncProxyRulesNoLocalEndpointsTotalExternal, err := testutil.GetGaugeMetricValue(metrics.SyncProxyRulesNoLocalEndpointsTotal.WithLabelValues("external"))
|
|
if err != nil {
|
|
t.Errorf("failed to get %s value(external), err: %v", metrics.SyncProxyRulesNoLocalEndpointsTotal.Name, err)
|
|
}
|
|
|
|
if tc.expectedSyncProxyRulesNoLocalEndpointsTotalExternal != int(syncProxyRulesNoLocalEndpointsTotalExternal) {
|
|
t.Errorf("sync_proxy_rules_no_endpoints_total metric mismatch(internal): got=%d, expected %d", int(syncProxyRulesNoLocalEndpointsTotalExternal), tc.expectedSyncProxyRulesNoLocalEndpointsTotalExternal)
|
|
}
|
|
})
|
|
}
|
|
}
|