move endpointslice reconciler to staging endpointslice repo
This commit is contained in:
@@ -1,15 +0,0 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- bowei
|
||||
- freehan
|
||||
- MrHohn
|
||||
- thockin
|
||||
- sig-network-approvers
|
||||
reviewers:
|
||||
- robscott
|
||||
- freehan
|
||||
- bowei
|
||||
- sig-network-reviewers
|
||||
labels:
|
||||
- sig/network
|
||||
@@ -1,304 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpoint
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
discovery "k8s.io/api/discovery/v1"
|
||||
"k8s.io/apimachinery/pkg/conversion"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
v1listers "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/util/hash"
|
||||
)
|
||||
|
||||
// semanticIgnoreResourceVersion does semantic deep equality checks for objects
|
||||
// but excludes ResourceVersion of ObjectReference. They are used when comparing
|
||||
// endpoints in Endpoints and EndpointSlice objects to avoid unnecessary updates
|
||||
// caused by Pod resourceVersion change.
|
||||
var semanticIgnoreResourceVersion = conversion.EqualitiesOrDie(
|
||||
func(a, b v1.ObjectReference) bool {
|
||||
a.ResourceVersion = ""
|
||||
b.ResourceVersion = ""
|
||||
return a == b
|
||||
},
|
||||
)
|
||||
|
||||
// GetPodServiceMemberships returns a set of Service keys for Services that have
|
||||
// a selector matching the given pod.
|
||||
func GetPodServiceMemberships(serviceLister v1listers.ServiceLister, pod *v1.Pod) (sets.String, error) {
|
||||
set := sets.String{}
|
||||
services, err := serviceLister.Services(pod.Namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
return set, err
|
||||
}
|
||||
|
||||
for _, service := range services {
|
||||
if service.Spec.Selector == nil {
|
||||
// if the service has a nil selector this means selectors match nothing, not everything.
|
||||
continue
|
||||
}
|
||||
key, err := controller.KeyFunc(service)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if labels.ValidatedSetSelector(service.Spec.Selector).Matches(labels.Set(pod.Labels)) {
|
||||
set.Insert(key)
|
||||
}
|
||||
}
|
||||
return set, nil
|
||||
}
|
||||
|
||||
// PortMapKey is used to uniquely identify groups of endpoint ports.
|
||||
type PortMapKey string
|
||||
|
||||
// NewPortMapKey generates a PortMapKey from endpoint ports.
|
||||
func NewPortMapKey(endpointPorts []discovery.EndpointPort) PortMapKey {
|
||||
sort.Sort(portsInOrder(endpointPorts))
|
||||
return PortMapKey(DeepHashObjectToString(endpointPorts))
|
||||
}
|
||||
|
||||
// DeepHashObjectToString creates a unique hash string from a go object.
|
||||
func DeepHashObjectToString(objectToWrite interface{}) string {
|
||||
hasher := md5.New()
|
||||
hash.DeepHashObject(hasher, objectToWrite)
|
||||
return hex.EncodeToString(hasher.Sum(nil)[0:])
|
||||
}
|
||||
|
||||
// ShouldPodBeInEndpoints returns true if a specified pod should be in an
|
||||
// Endpoints or EndpointSlice resource. Terminating pods are only included if
|
||||
// includeTerminating is true.
|
||||
func ShouldPodBeInEndpoints(pod *v1.Pod, includeTerminating bool) bool {
|
||||
// "Terminal" describes when a Pod is complete (in a succeeded or failed phase).
|
||||
// This is distinct from the "Terminating" condition which represents when a Pod
|
||||
// is being terminated (metadata.deletionTimestamp is non nil).
|
||||
if podutil.IsPodTerminal(pod) {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(pod.Status.PodIP) == 0 && len(pod.Status.PodIPs) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
if !includeTerminating && pod.DeletionTimestamp != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// ShouldSetHostname returns true if the Hostname attribute should be set on an
|
||||
// Endpoints Address or EndpointSlice Endpoint.
|
||||
func ShouldSetHostname(pod *v1.Pod, svc *v1.Service) bool {
|
||||
return len(pod.Spec.Hostname) > 0 && pod.Spec.Subdomain == svc.Name && svc.Namespace == pod.Namespace
|
||||
}
|
||||
|
||||
// podEndpointsChanged returns two boolean values. The first is true if the pod has
|
||||
// changed in a way that may change existing endpoints. The second value is true if the
|
||||
// pod has changed in a way that may affect which Services it matches.
|
||||
func podEndpointsChanged(oldPod, newPod *v1.Pod) (bool, bool) {
|
||||
// Check if the pod labels have changed, indicating a possible
|
||||
// change in the service membership
|
||||
labelsChanged := false
|
||||
if !reflect.DeepEqual(newPod.Labels, oldPod.Labels) ||
|
||||
!hostNameAndDomainAreEqual(newPod, oldPod) {
|
||||
labelsChanged = true
|
||||
}
|
||||
|
||||
// If the pod's deletion timestamp is set, remove endpoint from ready address.
|
||||
if newPod.DeletionTimestamp != oldPod.DeletionTimestamp {
|
||||
return true, labelsChanged
|
||||
}
|
||||
// If the pod's readiness has changed, the associated endpoint address
|
||||
// will move from the unready endpoints set to the ready endpoints.
|
||||
// So for the purposes of an endpoint, a readiness change on a pod
|
||||
// means we have a changed pod.
|
||||
if podutil.IsPodReady(oldPod) != podutil.IsPodReady(newPod) {
|
||||
return true, labelsChanged
|
||||
}
|
||||
|
||||
// Check if the pod IPs have changed
|
||||
if len(oldPod.Status.PodIPs) != len(newPod.Status.PodIPs) {
|
||||
return true, labelsChanged
|
||||
}
|
||||
for i := range oldPod.Status.PodIPs {
|
||||
if oldPod.Status.PodIPs[i].IP != newPod.Status.PodIPs[i].IP {
|
||||
return true, labelsChanged
|
||||
}
|
||||
}
|
||||
|
||||
// Endpoints may also reference a pod's Name, Namespace, UID, and NodeName, but
|
||||
// the first three are immutable, and NodeName is immutable once initially set,
|
||||
// which happens before the pod gets an IP.
|
||||
|
||||
return false, labelsChanged
|
||||
}
|
||||
|
||||
// GetServicesToUpdateOnPodChange returns a set of Service keys for Services
|
||||
// that have potentially been affected by a change to this pod.
|
||||
func GetServicesToUpdateOnPodChange(serviceLister v1listers.ServiceLister, old, cur interface{}) sets.String {
|
||||
newPod := cur.(*v1.Pod)
|
||||
oldPod := old.(*v1.Pod)
|
||||
if newPod.ResourceVersion == oldPod.ResourceVersion {
|
||||
// Periodic resync will send update events for all known pods.
|
||||
// Two different versions of the same pod will always have different RVs
|
||||
return sets.String{}
|
||||
}
|
||||
|
||||
podChanged, labelsChanged := podEndpointsChanged(oldPod, newPod)
|
||||
|
||||
// If both the pod and labels are unchanged, no update is needed
|
||||
if !podChanged && !labelsChanged {
|
||||
return sets.String{}
|
||||
}
|
||||
|
||||
services, err := GetPodServiceMemberships(serviceLister, newPod)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to get pod %s/%s's service memberships: %v", newPod.Namespace, newPod.Name, err))
|
||||
return sets.String{}
|
||||
}
|
||||
|
||||
if labelsChanged {
|
||||
oldServices, err := GetPodServiceMemberships(serviceLister, oldPod)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to get pod %s/%s's service memberships: %v", oldPod.Namespace, oldPod.Name, err))
|
||||
}
|
||||
services = determineNeededServiceUpdates(oldServices, services, podChanged)
|
||||
}
|
||||
|
||||
return services
|
||||
}
|
||||
|
||||
// GetPodFromDeleteAction returns a pointer to a pod if one can be derived from
|
||||
// obj (could be a *v1.Pod, or a DeletionFinalStateUnknown marker item).
|
||||
func GetPodFromDeleteAction(obj interface{}) *v1.Pod {
|
||||
if pod, ok := obj.(*v1.Pod); ok {
|
||||
// Enqueue all the services that the pod used to be a member of.
|
||||
// This is the same thing we do when we add a pod.
|
||||
return pod
|
||||
}
|
||||
// If we reached here it means the pod was deleted but its final state is unrecorded.
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("couldn't get object from tombstone %#v", obj))
|
||||
return nil
|
||||
}
|
||||
pod, ok := tombstone.Obj.(*v1.Pod)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("tombstone contained object that is not a Pod: %#v", obj))
|
||||
return nil
|
||||
}
|
||||
return pod
|
||||
}
|
||||
|
||||
func hostNameAndDomainAreEqual(pod1, pod2 *v1.Pod) bool {
|
||||
return pod1.Spec.Hostname == pod2.Spec.Hostname &&
|
||||
pod1.Spec.Subdomain == pod2.Spec.Subdomain
|
||||
}
|
||||
|
||||
func determineNeededServiceUpdates(oldServices, services sets.String, podChanged bool) sets.String {
|
||||
if podChanged {
|
||||
// if the labels and pod changed, all services need to be updated
|
||||
services = services.Union(oldServices)
|
||||
} else {
|
||||
// if only the labels changed, services not common to both the new
|
||||
// and old service set (the disjuntive union) need to be updated
|
||||
services = services.Difference(oldServices).Union(oldServices.Difference(services))
|
||||
}
|
||||
return services
|
||||
}
|
||||
|
||||
// portsInOrder helps sort endpoint ports in a consistent way for hashing.
|
||||
type portsInOrder []discovery.EndpointPort
|
||||
|
||||
func (sl portsInOrder) Len() int { return len(sl) }
|
||||
func (sl portsInOrder) Swap(i, j int) { sl[i], sl[j] = sl[j], sl[i] }
|
||||
func (sl portsInOrder) Less(i, j int) bool {
|
||||
h1 := DeepHashObjectToString(sl[i])
|
||||
h2 := DeepHashObjectToString(sl[j])
|
||||
return h1 < h2
|
||||
}
|
||||
|
||||
// EndpointsEqualBeyondHash returns true if endpoints have equal attributes
|
||||
// but excludes equality checks that would have already been covered with
|
||||
// endpoint hashing (see hashEndpoint func for more info) and ignores difference
|
||||
// in ResourceVersion of TargetRef.
|
||||
func EndpointsEqualBeyondHash(ep1, ep2 *discovery.Endpoint) bool {
|
||||
if stringPtrChanged(ep1.NodeName, ep2.NodeName) {
|
||||
return false
|
||||
}
|
||||
|
||||
if stringPtrChanged(ep1.Zone, ep2.Zone) {
|
||||
return false
|
||||
}
|
||||
|
||||
if boolPtrChanged(ep1.Conditions.Ready, ep2.Conditions.Ready) {
|
||||
return false
|
||||
}
|
||||
|
||||
if boolPtrChanged(ep1.Conditions.Serving, ep2.Conditions.Serving) {
|
||||
return false
|
||||
}
|
||||
|
||||
if boolPtrChanged(ep1.Conditions.Terminating, ep2.Conditions.Terminating) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !semanticIgnoreResourceVersion.DeepEqual(ep1.TargetRef, ep2.TargetRef) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// boolPtrChanged returns true if a set of bool pointers have different values.
|
||||
func boolPtrChanged(ptr1, ptr2 *bool) bool {
|
||||
if (ptr1 == nil) != (ptr2 == nil) {
|
||||
return true
|
||||
}
|
||||
if ptr1 != nil && ptr2 != nil && *ptr1 != *ptr2 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// stringPtrChanged returns true if a set of string pointers have different values.
|
||||
func stringPtrChanged(ptr1, ptr2 *string) bool {
|
||||
if (ptr1 == nil) != (ptr2 == nil) {
|
||||
return true
|
||||
}
|
||||
if ptr1 != nil && ptr2 != nil && *ptr1 != *ptr2 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// EndpointSubsetsEqualIgnoreResourceVersion returns true if EndpointSubsets
|
||||
// have equal attributes but excludes ResourceVersion of Pod.
|
||||
func EndpointSubsetsEqualIgnoreResourceVersion(subsets1, subsets2 []v1.EndpointSubset) bool {
|
||||
return semanticIgnoreResourceVersion.DeepEqual(subsets1, subsets2)
|
||||
}
|
||||
@@ -1,924 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpoint
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
discovery "k8s.io/api/discovery/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/utils/pointer"
|
||||
)
|
||||
|
||||
func TestDetermineNeededServiceUpdates(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
a sets.String
|
||||
b sets.String
|
||||
union sets.String
|
||||
xor sets.String
|
||||
}{
|
||||
{
|
||||
name: "no services changed",
|
||||
a: sets.NewString("a", "b", "c"),
|
||||
b: sets.NewString("a", "b", "c"),
|
||||
xor: sets.NewString(),
|
||||
union: sets.NewString("a", "b", "c"),
|
||||
},
|
||||
{
|
||||
name: "all old services removed, new services added",
|
||||
a: sets.NewString("a", "b", "c"),
|
||||
b: sets.NewString("d", "e", "f"),
|
||||
xor: sets.NewString("a", "b", "c", "d", "e", "f"),
|
||||
union: sets.NewString("a", "b", "c", "d", "e", "f"),
|
||||
},
|
||||
{
|
||||
name: "all old services removed, no new services added",
|
||||
a: sets.NewString("a", "b", "c"),
|
||||
b: sets.NewString(),
|
||||
xor: sets.NewString("a", "b", "c"),
|
||||
union: sets.NewString("a", "b", "c"),
|
||||
},
|
||||
{
|
||||
name: "no old services, but new services added",
|
||||
a: sets.NewString(),
|
||||
b: sets.NewString("a", "b", "c"),
|
||||
xor: sets.NewString("a", "b", "c"),
|
||||
union: sets.NewString("a", "b", "c"),
|
||||
},
|
||||
{
|
||||
name: "one service removed, one service added, two unchanged",
|
||||
a: sets.NewString("a", "b", "c"),
|
||||
b: sets.NewString("b", "c", "d"),
|
||||
xor: sets.NewString("a", "d"),
|
||||
union: sets.NewString("a", "b", "c", "d"),
|
||||
},
|
||||
{
|
||||
name: "no services",
|
||||
a: sets.NewString(),
|
||||
b: sets.NewString(),
|
||||
xor: sets.NewString(),
|
||||
union: sets.NewString(),
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
retval := determineNeededServiceUpdates(testCase.a, testCase.b, false)
|
||||
if !retval.Equal(testCase.xor) {
|
||||
t.Errorf("%s (with podChanged=false): expected: %v got: %v", testCase.name, testCase.xor.List(), retval.List())
|
||||
}
|
||||
|
||||
retval = determineNeededServiceUpdates(testCase.a, testCase.b, true)
|
||||
if !retval.Equal(testCase.union) {
|
||||
t.Errorf("%s (with podChanged=true): expected: %v got: %v", testCase.name, testCase.union.List(), retval.List())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldPodBeInEndpoints(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
pod *v1.Pod
|
||||
expected bool
|
||||
includeTerminating bool
|
||||
}{
|
||||
// Pod should not be in endpoints:
|
||||
{
|
||||
name: "Failed pod with Never RestartPolicy",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodFailed,
|
||||
PodIP: "1.2.3.4",
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Succeeded pod with Never RestartPolicy",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodSucceeded,
|
||||
PodIP: "1.2.3.4",
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Succeeded pod with OnFailure RestartPolicy",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodSucceeded,
|
||||
PodIP: "1.2.3.4",
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Empty Pod IPs, Running pod with OnFailure RestartPolicy",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
PodIP: "",
|
||||
PodIPs: []v1.PodIP{},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Terminating Pod with includeTerminating=false",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
DeletionTimestamp: &metav1.Time{
|
||||
Time: time.Now(),
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
PodIP: "1.2.3.4",
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Failed pod with Always RestartPolicy",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyAlways,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodFailed,
|
||||
PodIP: "1.2.3.4",
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
// Pod should be in endpoints:
|
||||
{
|
||||
name: "Pending pod with Never RestartPolicy",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodPending,
|
||||
PodIP: "1.2.3.4",
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Unknown pod with OnFailure RestartPolicy",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyOnFailure,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodUnknown,
|
||||
PodIP: "1.2.3.4",
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Running pod with Never RestartPolicy",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
PodIP: "1.2.3.4",
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Multiple Pod IPs, Running pod with OnFailure RestartPolicy",
|
||||
pod: &v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
PodIPs: []v1.PodIP{{IP: "1.2.3.4"}, {IP: "1234::5678:0000:0000:9abc:def0"}},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Terminating Pod with includeTerminating=true",
|
||||
pod: &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
DeletionTimestamp: &metav1.Time{
|
||||
Time: time.Now(),
|
||||
},
|
||||
},
|
||||
Spec: v1.PodSpec{},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
PodIP: "1.2.3.4",
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
includeTerminating: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
result := ShouldPodBeInEndpoints(test.pod, test.includeTerminating)
|
||||
if result != test.expected {
|
||||
t.Errorf("expected: %t, got: %t", test.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestShouldSetHostname(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
pod *v1.Pod
|
||||
service *v1.Service
|
||||
expected bool
|
||||
}{
|
||||
"all matching": {
|
||||
pod: genSimplePod("ns", "foo", "svc-name"),
|
||||
service: genSimpleSvc("ns", "svc-name"),
|
||||
expected: true,
|
||||
},
|
||||
"all matching, hostname not set": {
|
||||
pod: genSimplePod("ns", "", "svc-name"),
|
||||
service: genSimpleSvc("ns", "svc-name"),
|
||||
expected: false,
|
||||
},
|
||||
"all set, different name/subdomain": {
|
||||
pod: genSimplePod("ns", "hostname", "subdomain"),
|
||||
service: genSimpleSvc("ns", "name"),
|
||||
expected: false,
|
||||
},
|
||||
"all set, different namespace": {
|
||||
pod: genSimplePod("ns1", "hostname", "svc-name"),
|
||||
service: genSimpleSvc("ns2", "svc-name"),
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for name, testCase := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
result := ShouldSetHostname(testCase.pod, testCase.service)
|
||||
if result != testCase.expected {
|
||||
t.Errorf("expected: %t, got: %t", testCase.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func genSimplePod(namespace, hostname, subdomain string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Hostname: hostname,
|
||||
Subdomain: subdomain,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func genSimpleSvc(namespace, name string) *v1.Service {
|
||||
return &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPodServiceMemberships(t *testing.T) {
|
||||
fakeInformerFactory := informers.NewSharedInformerFactory(&fake.Clientset{}, 0*time.Second)
|
||||
for i := 0; i < 3; i++ {
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("service-%d", i),
|
||||
Namespace: "test",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"app": fmt.Sprintf("test-%d", i),
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeInformerFactory.Core().V1().Services().Informer().GetStore().Add(service)
|
||||
}
|
||||
var pods []*v1.Pod
|
||||
for i := 0; i < 5; i++ {
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Name: fmt.Sprintf("test-pod-%d", i),
|
||||
Labels: map[string]string{
|
||||
"app": fmt.Sprintf("test-%d", i),
|
||||
"label": fmt.Sprintf("label-%d", i),
|
||||
},
|
||||
},
|
||||
}
|
||||
pods = append(pods, pod)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pod *v1.Pod
|
||||
expect sets.String
|
||||
}{
|
||||
{
|
||||
name: "get servicesMemberships for pod-0",
|
||||
pod: pods[0],
|
||||
expect: sets.NewString("test/service-0"),
|
||||
},
|
||||
{
|
||||
name: "get servicesMemberships for pod-1",
|
||||
pod: pods[1],
|
||||
expect: sets.NewString("test/service-1"),
|
||||
},
|
||||
{
|
||||
name: "get servicesMemberships for pod-2",
|
||||
pod: pods[2],
|
||||
expect: sets.NewString("test/service-2"),
|
||||
},
|
||||
{
|
||||
name: "get servicesMemberships for pod-3",
|
||||
pod: pods[3],
|
||||
expect: sets.NewString(),
|
||||
},
|
||||
{
|
||||
name: "get servicesMemberships for pod-4",
|
||||
pod: pods[4],
|
||||
expect: sets.NewString(),
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
services, err := GetPodServiceMemberships(fakeInformerFactory.Core().V1().Services().Lister(), test.pod)
|
||||
if err != nil {
|
||||
t.Errorf("Error from cache.GetPodServiceMemberships: %v", err)
|
||||
} else if !services.Equal(test.expect) {
|
||||
t.Errorf("Expect service %v, but got %v", test.expect, services)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkGetPodServiceMemberships(b *testing.B) {
|
||||
// init fake service informer.
|
||||
fakeInformerFactory := informers.NewSharedInformerFactory(&fake.Clientset{}, 0*time.Second)
|
||||
for i := 0; i < 1000; i++ {
|
||||
service := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("service-%d", i),
|
||||
Namespace: "test",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Selector: map[string]string{
|
||||
"app": fmt.Sprintf("test-%d", i),
|
||||
},
|
||||
},
|
||||
}
|
||||
fakeInformerFactory.Core().V1().Services().Informer().GetStore().Add(service)
|
||||
}
|
||||
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Name: "test-pod-0",
|
||||
Labels: map[string]string{
|
||||
"app": "test-0",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
expect := sets.NewString("test/service-0")
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
services, err := GetPodServiceMemberships(fakeInformerFactory.Core().V1().Services().Lister(), pod)
|
||||
if err != nil {
|
||||
b.Fatalf("Error from GetPodServiceMemberships(): %v", err)
|
||||
}
|
||||
if len(services) != len(expect) {
|
||||
b.Errorf("Expect services size %d, but got: %v", len(expect), len(services))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_podChanged(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
modifier func(*v1.Pod, *v1.Pod)
|
||||
podChanged bool
|
||||
labelsChanged bool
|
||||
}{
|
||||
{
|
||||
testName: "no changes",
|
||||
modifier: func(old, new *v1.Pod) {},
|
||||
podChanged: false,
|
||||
labelsChanged: false,
|
||||
}, {
|
||||
testName: "change NodeName",
|
||||
modifier: func(old, new *v1.Pod) {
|
||||
new.Spec.NodeName = "changed"
|
||||
},
|
||||
// NodeName can only change before the pod has an IP, and we don't care about the
|
||||
// pod yet at that point so we ignore this change
|
||||
podChanged: false,
|
||||
labelsChanged: false,
|
||||
}, {
|
||||
testName: "change ResourceVersion",
|
||||
modifier: func(old, new *v1.Pod) {
|
||||
new.ObjectMeta.ResourceVersion = "changed"
|
||||
},
|
||||
// ResourceVersion is intentionally ignored if nothing else changed
|
||||
podChanged: false,
|
||||
labelsChanged: false,
|
||||
}, {
|
||||
testName: "add primary IPv4",
|
||||
modifier: func(old, new *v1.Pod) {
|
||||
new.Status.PodIP = "1.2.3.4"
|
||||
new.Status.PodIPs = []v1.PodIP{{IP: "1.2.3.4"}}
|
||||
},
|
||||
podChanged: true,
|
||||
labelsChanged: false,
|
||||
}, {
|
||||
testName: "modify primary IPv4",
|
||||
modifier: func(old, new *v1.Pod) {
|
||||
old.Status.PodIP = "1.2.3.4"
|
||||
old.Status.PodIPs = []v1.PodIP{{IP: "1.2.3.4"}}
|
||||
new.Status.PodIP = "2.3.4.5"
|
||||
new.Status.PodIPs = []v1.PodIP{{IP: "2.3.4.5"}}
|
||||
},
|
||||
podChanged: true,
|
||||
labelsChanged: false,
|
||||
}, {
|
||||
testName: "add primary IPv6",
|
||||
modifier: func(old, new *v1.Pod) {
|
||||
new.Status.PodIP = "fd00:10:96::1"
|
||||
new.Status.PodIPs = []v1.PodIP{{IP: "fd00:10:96::1"}}
|
||||
},
|
||||
podChanged: true,
|
||||
labelsChanged: false,
|
||||
}, {
|
||||
testName: "modify primary IPv6",
|
||||
modifier: func(old, new *v1.Pod) {
|
||||
old.Status.PodIP = "fd00:10:96::1"
|
||||
old.Status.PodIPs = []v1.PodIP{{IP: "fd00:10:96::1"}}
|
||||
new.Status.PodIP = "fd00:10:96::2"
|
||||
new.Status.PodIPs = []v1.PodIP{{IP: "fd00:10:96::2"}}
|
||||
},
|
||||
podChanged: true,
|
||||
labelsChanged: false,
|
||||
}, {
|
||||
testName: "add secondary IP",
|
||||
modifier: func(old, new *v1.Pod) {
|
||||
old.Status.PodIP = "1.2.3.4"
|
||||
old.Status.PodIPs = []v1.PodIP{{IP: "1.2.3.4"}}
|
||||
new.Status.PodIP = "1.2.3.4"
|
||||
new.Status.PodIPs = []v1.PodIP{{IP: "1.2.3.4"}, {IP: "fd00:10:96::1"}}
|
||||
},
|
||||
podChanged: true,
|
||||
labelsChanged: false,
|
||||
}, {
|
||||
testName: "modify secondary IP",
|
||||
modifier: func(old, new *v1.Pod) {
|
||||
old.Status.PodIP = "1.2.3.4"
|
||||
old.Status.PodIPs = []v1.PodIP{{IP: "1.2.3.4"}, {IP: "fd00:10:96::1"}}
|
||||
new.Status.PodIP = "1.2.3.4"
|
||||
new.Status.PodIPs = []v1.PodIP{{IP: "1.2.3.4"}, {IP: "fd00:10:96::2"}}
|
||||
},
|
||||
podChanged: true,
|
||||
labelsChanged: false,
|
||||
}, {
|
||||
testName: "remove secondary IP",
|
||||
modifier: func(old, new *v1.Pod) {
|
||||
old.Status.PodIP = "1.2.3.4"
|
||||
old.Status.PodIPs = []v1.PodIP{{IP: "1.2.3.4"}, {IP: "fd00:10:96::1"}}
|
||||
new.Status.PodIP = "1.2.3.4"
|
||||
new.Status.PodIPs = []v1.PodIP{{IP: "1.2.3.4"}}
|
||||
},
|
||||
podChanged: true,
|
||||
labelsChanged: false,
|
||||
}, {
|
||||
testName: "change readiness",
|
||||
modifier: func(old, new *v1.Pod) {
|
||||
new.Status.Conditions[0].Status = v1.ConditionTrue
|
||||
},
|
||||
podChanged: true,
|
||||
labelsChanged: false,
|
||||
}, {
|
||||
testName: "mark for deletion",
|
||||
modifier: func(old, new *v1.Pod) {
|
||||
now := metav1.NewTime(time.Now().UTC())
|
||||
new.ObjectMeta.DeletionTimestamp = &now
|
||||
},
|
||||
podChanged: true,
|
||||
labelsChanged: false,
|
||||
}, {
|
||||
testName: "add label",
|
||||
modifier: func(old, new *v1.Pod) {
|
||||
new.Labels["label"] = "new"
|
||||
},
|
||||
podChanged: false,
|
||||
labelsChanged: true,
|
||||
}, {
|
||||
testName: "modify label",
|
||||
modifier: func(old, new *v1.Pod) {
|
||||
old.Labels["label"] = "old"
|
||||
new.Labels["label"] = "new"
|
||||
},
|
||||
podChanged: false,
|
||||
labelsChanged: true,
|
||||
}, {
|
||||
testName: "remove label",
|
||||
modifier: func(old, new *v1.Pod) {
|
||||
old.Labels["label"] = "old"
|
||||
},
|
||||
podChanged: false,
|
||||
labelsChanged: true,
|
||||
},
|
||||
}
|
||||
|
||||
orig := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: "test",
|
||||
Name: "pod",
|
||||
Labels: map[string]string{"foo": "bar"},
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Conditions: []v1.PodCondition{
|
||||
{Type: v1.PodReady, Status: v1.ConditionFalse},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
old := orig.DeepCopy()
|
||||
new := old.DeepCopy()
|
||||
tc.modifier(old, new)
|
||||
|
||||
podChanged, labelsChanged := podEndpointsChanged(old, new)
|
||||
if podChanged != tc.podChanged {
|
||||
t.Errorf("Expected podChanged to be %t, got %t", tc.podChanged, podChanged)
|
||||
}
|
||||
if labelsChanged != tc.labelsChanged {
|
||||
t.Errorf("Expected labelsChanged to be %t, got %t", tc.labelsChanged, labelsChanged)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndpointsEqualBeyondHash(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ep1 *discovery.Endpoint
|
||||
ep2 *discovery.Endpoint
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "No change",
|
||||
ep1: &discovery.Endpoint{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: pointer.Bool(true),
|
||||
},
|
||||
Addresses: []string{"10.0.0.1"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0"},
|
||||
NodeName: pointer.String("node-1"),
|
||||
},
|
||||
ep2: &discovery.Endpoint{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: pointer.Bool(true),
|
||||
},
|
||||
Addresses: []string{"10.0.0.1"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0"},
|
||||
NodeName: pointer.String("node-1"),
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "NodeName changed",
|
||||
ep1: &discovery.Endpoint{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: pointer.Bool(true),
|
||||
},
|
||||
Addresses: []string{"10.0.0.1"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0"},
|
||||
NodeName: pointer.String("node-1"),
|
||||
},
|
||||
ep2: &discovery.Endpoint{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: pointer.Bool(true),
|
||||
},
|
||||
Addresses: []string{"10.0.0.1"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0"},
|
||||
NodeName: pointer.String("node-2"),
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Zone changed",
|
||||
ep1: &discovery.Endpoint{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: pointer.Bool(true),
|
||||
},
|
||||
Addresses: []string{"10.0.0.1"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0"},
|
||||
Zone: pointer.String("zone-1"),
|
||||
},
|
||||
ep2: &discovery.Endpoint{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: pointer.Bool(true),
|
||||
},
|
||||
Addresses: []string{"10.0.0.1"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0"},
|
||||
Zone: pointer.String("zone-2"),
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Ready condition changed",
|
||||
ep1: &discovery.Endpoint{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: pointer.Bool(true),
|
||||
},
|
||||
Addresses: []string{"10.0.0.1"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0"},
|
||||
Zone: pointer.String("zone-1"),
|
||||
NodeName: pointer.String("node-1"),
|
||||
},
|
||||
ep2: &discovery.Endpoint{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: pointer.Bool(false),
|
||||
},
|
||||
Addresses: []string{"10.0.0.1"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0"},
|
||||
Zone: pointer.String("zone-1"),
|
||||
NodeName: pointer.String("node-1"),
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Serving condition changed from nil to true",
|
||||
ep1: &discovery.Endpoint{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: pointer.Bool(true),
|
||||
Serving: nil,
|
||||
Terminating: nil,
|
||||
},
|
||||
Addresses: []string{"10.0.0.1"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0"},
|
||||
Zone: pointer.String("zone-1"),
|
||||
NodeName: pointer.String("node-1"),
|
||||
},
|
||||
ep2: &discovery.Endpoint{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: pointer.Bool(true),
|
||||
Serving: pointer.Bool(true),
|
||||
Terminating: pointer.Bool(false),
|
||||
},
|
||||
Addresses: []string{"10.0.0.1"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0"},
|
||||
Zone: pointer.String("zone-1"),
|
||||
NodeName: pointer.String("node-1"),
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Serving condition changed from false to true",
|
||||
ep1: &discovery.Endpoint{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: pointer.Bool(true),
|
||||
Serving: pointer.Bool(false),
|
||||
Terminating: pointer.Bool(false),
|
||||
},
|
||||
Addresses: []string{"10.0.0.1"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0"},
|
||||
Zone: pointer.String("zone-1"),
|
||||
NodeName: pointer.String("node-1"),
|
||||
},
|
||||
ep2: &discovery.Endpoint{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: pointer.Bool(true),
|
||||
Serving: pointer.Bool(true),
|
||||
Terminating: pointer.Bool(false),
|
||||
},
|
||||
Addresses: []string{"10.0.0.1"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0"},
|
||||
Zone: pointer.String("zone-1"),
|
||||
NodeName: pointer.String("node-1"),
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Pod name changed",
|
||||
ep1: &discovery.Endpoint{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: pointer.Bool(true),
|
||||
},
|
||||
Addresses: []string{"10.0.0.1"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0"},
|
||||
Zone: pointer.String("zone-1"),
|
||||
NodeName: pointer.String("node-1"),
|
||||
},
|
||||
ep2: &discovery.Endpoint{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: pointer.Bool(true),
|
||||
},
|
||||
Addresses: []string{"10.0.0.1"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod1"},
|
||||
Zone: pointer.String("zone-1"),
|
||||
NodeName: pointer.String("node-1"),
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Pod resourceVersion changed",
|
||||
ep1: &discovery.Endpoint{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: pointer.Bool(true),
|
||||
},
|
||||
Addresses: []string{"10.0.0.1"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0", ResourceVersion: "1"},
|
||||
Zone: pointer.String("zone-1"),
|
||||
NodeName: pointer.String("node-1"),
|
||||
},
|
||||
ep2: &discovery.Endpoint{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: pointer.Bool(true),
|
||||
},
|
||||
Addresses: []string{"10.0.0.1"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0", ResourceVersion: "2"},
|
||||
Zone: pointer.String("zone-1"),
|
||||
NodeName: pointer.String("node-1"),
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Pod resourceVersion removed",
|
||||
ep1: &discovery.Endpoint{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: pointer.Bool(true),
|
||||
},
|
||||
Addresses: []string{"10.0.0.1"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0", ResourceVersion: "1"},
|
||||
Zone: pointer.String("zone-1"),
|
||||
NodeName: pointer.String("node-1"),
|
||||
},
|
||||
ep2: &discovery.Endpoint{
|
||||
Conditions: discovery.EndpointConditions{
|
||||
Ready: pointer.Bool(true),
|
||||
},
|
||||
Addresses: []string{"10.0.0.1"},
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Namespace: "default", Name: "pod0", ResourceVersion: ""},
|
||||
Zone: pointer.String("zone-1"),
|
||||
NodeName: pointer.String("node-1"),
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := EndpointsEqualBeyondHash(tt.ep1, tt.ep2); got != tt.expected {
|
||||
t.Errorf("EndpointsEqualBeyondHash() = %v, want %v", got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndpointSubsetsEqualIgnoreResourceVersion(t *testing.T) {
|
||||
copyAndMutateEndpointSubset := func(orig *v1.EndpointSubset, mutator func(*v1.EndpointSubset)) *v1.EndpointSubset {
|
||||
newSubSet := orig.DeepCopy()
|
||||
mutator(newSubSet)
|
||||
return newSubSet
|
||||
}
|
||||
es1 := &v1.EndpointSubset{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{
|
||||
IP: "1.1.1.1",
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod1-1", Namespace: "ns", ResourceVersion: "1"},
|
||||
},
|
||||
},
|
||||
NotReadyAddresses: []v1.EndpointAddress{
|
||||
{
|
||||
IP: "1.1.1.2",
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod1-2", Namespace: "ns2", ResourceVersion: "2"},
|
||||
},
|
||||
},
|
||||
Ports: []v1.EndpointPort{{Port: 8081, Protocol: "TCP"}},
|
||||
}
|
||||
es2 := &v1.EndpointSubset{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
{
|
||||
IP: "2.2.2.1",
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod2-1", Namespace: "ns", ResourceVersion: "3"},
|
||||
},
|
||||
},
|
||||
NotReadyAddresses: []v1.EndpointAddress{
|
||||
{
|
||||
IP: "2.2.2.2",
|
||||
TargetRef: &v1.ObjectReference{Kind: "Pod", Name: "pod2-2", Namespace: "ns2", ResourceVersion: "4"},
|
||||
},
|
||||
},
|
||||
Ports: []v1.EndpointPort{{Port: 8082, Protocol: "TCP"}},
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
subsets1 []v1.EndpointSubset
|
||||
subsets2 []v1.EndpointSubset
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "Subsets removed",
|
||||
subsets1: []v1.EndpointSubset{*es1, *es2},
|
||||
subsets2: []v1.EndpointSubset{*es1},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Ready Pod IP changed",
|
||||
subsets1: []v1.EndpointSubset{*es1, *es2},
|
||||
subsets2: []v1.EndpointSubset{*copyAndMutateEndpointSubset(es1, func(es *v1.EndpointSubset) {
|
||||
es.Addresses[0].IP = "1.1.1.10"
|
||||
}), *es2},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "NotReady Pod IP changed",
|
||||
subsets1: []v1.EndpointSubset{*es1, *es2},
|
||||
subsets2: []v1.EndpointSubset{*es1, *copyAndMutateEndpointSubset(es2, func(es *v1.EndpointSubset) {
|
||||
es.NotReadyAddresses[0].IP = "2.2.2.10"
|
||||
})},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Pod ResourceVersion changed",
|
||||
subsets1: []v1.EndpointSubset{*es1, *es2},
|
||||
subsets2: []v1.EndpointSubset{*es1, *copyAndMutateEndpointSubset(es2, func(es *v1.EndpointSubset) {
|
||||
es.Addresses[0].TargetRef.ResourceVersion = "100"
|
||||
})},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Pod ResourceVersion removed",
|
||||
subsets1: []v1.EndpointSubset{*es1, *es2},
|
||||
subsets2: []v1.EndpointSubset{*es1, *copyAndMutateEndpointSubset(es2, func(es *v1.EndpointSubset) {
|
||||
es.Addresses[0].TargetRef.ResourceVersion = ""
|
||||
})},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Ports changed",
|
||||
subsets1: []v1.EndpointSubset{*es1, *es2},
|
||||
subsets2: []v1.EndpointSubset{*es1, *copyAndMutateEndpointSubset(es1, func(es *v1.EndpointSubset) {
|
||||
es.Ports[0].Port = 8082
|
||||
})},
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := EndpointSubsetsEqualIgnoreResourceVersion(tt.subsets1, tt.subsets2); got != tt.expected {
|
||||
t.Errorf("semanticIgnoreResourceVersion.DeepEqual() = %v, expected %v", got, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,161 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpoint
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
|
||||
)
|
||||
|
||||
// TriggerTimeTracker is used to compute an EndpointsLastChangeTriggerTime
|
||||
// annotation. See the documentation for that annotation for more details.
|
||||
//
|
||||
// Please note that this util may compute a wrong EndpointsLastChangeTriggerTime
|
||||
// if the same object changes multiple times between two consecutive syncs.
|
||||
// We're aware of this limitation but we decided to accept it, as fixing it
|
||||
// would require a major rewrite of the endpoint(Slice) controller and
|
||||
// Informer framework. Such situations, i.e. frequent updates of the same object
|
||||
// in a single sync period, should be relatively rare and therefore this util
|
||||
// should provide a good approximation of the EndpointsLastChangeTriggerTime.
|
||||
type TriggerTimeTracker struct {
|
||||
// ServiceStates is a map, indexed by Service object key, storing the last
|
||||
// known Service object state observed during the most recent call of the
|
||||
// ComputeEndpointLastChangeTriggerTime function.
|
||||
ServiceStates map[ServiceKey]ServiceState
|
||||
|
||||
// mutex guarding the serviceStates map.
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// NewTriggerTimeTracker creates a new instance of the TriggerTimeTracker.
|
||||
func NewTriggerTimeTracker() *TriggerTimeTracker {
|
||||
return &TriggerTimeTracker{
|
||||
ServiceStates: make(map[ServiceKey]ServiceState),
|
||||
}
|
||||
}
|
||||
|
||||
// ServiceKey is a key uniquely identifying a Service.
|
||||
type ServiceKey struct {
|
||||
// namespace, name composing a namespaced name - an unique identifier of every Service.
|
||||
Namespace, Name string
|
||||
}
|
||||
|
||||
// ServiceState represents a state of an Service object that is known to this util.
|
||||
type ServiceState struct {
|
||||
// lastServiceTriggerTime is a service trigger time observed most recently.
|
||||
lastServiceTriggerTime time.Time
|
||||
// lastPodTriggerTimes is a map (Pod name -> time) storing the pod trigger
|
||||
// times that were observed during the most recent call of the
|
||||
// ComputeEndpointLastChangeTriggerTime function.
|
||||
lastPodTriggerTimes map[string]time.Time
|
||||
}
|
||||
|
||||
// ComputeEndpointLastChangeTriggerTime updates the state of the Service/Endpoint
|
||||
// object being synced and returns the time that should be exported as the
|
||||
// EndpointsLastChangeTriggerTime annotation.
|
||||
//
|
||||
// If the method returns a 'zero' time the EndpointsLastChangeTriggerTime
|
||||
// annotation shouldn't be exported.
|
||||
//
|
||||
// Please note that this function may compute a wrong value if the same object
|
||||
// (pod/service) changes multiple times between two consecutive syncs.
|
||||
//
|
||||
// Important: This method is go-routing safe but only when called for different
|
||||
// keys. The method shouldn't be called concurrently for the same key! This
|
||||
// contract is fulfilled in the current implementation of the endpoint(slice)
|
||||
// controller.
|
||||
func (t *TriggerTimeTracker) ComputeEndpointLastChangeTriggerTime(
|
||||
namespace string, service *v1.Service, pods []*v1.Pod) time.Time {
|
||||
|
||||
key := ServiceKey{Namespace: namespace, Name: service.Name}
|
||||
// As there won't be any concurrent calls for the same key, we need to guard
|
||||
// access only to the serviceStates map.
|
||||
t.mutex.Lock()
|
||||
state, wasKnown := t.ServiceStates[key]
|
||||
t.mutex.Unlock()
|
||||
|
||||
// Update the state before returning.
|
||||
defer func() {
|
||||
t.mutex.Lock()
|
||||
t.ServiceStates[key] = state
|
||||
t.mutex.Unlock()
|
||||
}()
|
||||
|
||||
// minChangedTriggerTime is the min trigger time of all trigger times that
|
||||
// have changed since the last sync.
|
||||
var minChangedTriggerTime time.Time
|
||||
podTriggerTimes := make(map[string]time.Time)
|
||||
for _, pod := range pods {
|
||||
if podTriggerTime := getPodTriggerTime(pod); !podTriggerTime.IsZero() {
|
||||
podTriggerTimes[pod.Name] = podTriggerTime
|
||||
if podTriggerTime.After(state.lastPodTriggerTimes[pod.Name]) {
|
||||
// Pod trigger time has changed since the last sync, update minChangedTriggerTime.
|
||||
minChangedTriggerTime = min(minChangedTriggerTime, podTriggerTime)
|
||||
}
|
||||
}
|
||||
}
|
||||
serviceTriggerTime := getServiceTriggerTime(service)
|
||||
if serviceTriggerTime.After(state.lastServiceTriggerTime) {
|
||||
// Service trigger time has changed since the last sync, update minChangedTriggerTime.
|
||||
minChangedTriggerTime = min(minChangedTriggerTime, serviceTriggerTime)
|
||||
}
|
||||
|
||||
state.lastPodTriggerTimes = podTriggerTimes
|
||||
state.lastServiceTriggerTime = serviceTriggerTime
|
||||
|
||||
if !wasKnown {
|
||||
// New Service, use Service creationTimestamp.
|
||||
return service.CreationTimestamp.Time
|
||||
}
|
||||
|
||||
// Regular update of endpoint objects, return min of changed trigger times.
|
||||
return minChangedTriggerTime
|
||||
}
|
||||
|
||||
// DeleteService deletes service state stored in this util.
|
||||
func (t *TriggerTimeTracker) DeleteService(namespace, name string) {
|
||||
key := ServiceKey{Namespace: namespace, Name: name}
|
||||
t.mutex.Lock()
|
||||
defer t.mutex.Unlock()
|
||||
delete(t.ServiceStates, key)
|
||||
}
|
||||
|
||||
// getPodTriggerTime returns the time of the pod change (trigger) that resulted
|
||||
// or will result in the endpoint object change.
|
||||
func getPodTriggerTime(pod *v1.Pod) (triggerTime time.Time) {
|
||||
if readyCondition := podutil.GetPodReadyCondition(pod.Status); readyCondition != nil {
|
||||
triggerTime = readyCondition.LastTransitionTime.Time
|
||||
}
|
||||
return triggerTime
|
||||
}
|
||||
|
||||
// getServiceTriggerTime returns the time of the service change (trigger) that
|
||||
// resulted or will result in the endpoint change.
|
||||
func getServiceTriggerTime(service *v1.Service) (triggerTime time.Time) {
|
||||
return service.CreationTimestamp.Time
|
||||
}
|
||||
|
||||
// min returns minimum of the currentMin and newValue or newValue if the currentMin is not set.
|
||||
func min(currentMin, newValue time.Time) time.Time {
|
||||
if currentMin.IsZero() || newValue.Before(currentMin) {
|
||||
return newValue
|
||||
}
|
||||
return currentMin
|
||||
}
|
||||
@@ -1,204 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpoint
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
)
|
||||
|
||||
var (
|
||||
t0 = time.Date(2019, 01, 01, 0, 0, 0, 0, time.UTC)
|
||||
t1 = t0.Add(time.Second)
|
||||
t2 = t1.Add(time.Second)
|
||||
t3 = t2.Add(time.Second)
|
||||
t4 = t3.Add(time.Second)
|
||||
t5 = t4.Add(time.Second)
|
||||
|
||||
ttNamespace = "ttNamespace1"
|
||||
ttServiceName = "my-service"
|
||||
)
|
||||
|
||||
func TestNewServiceNoPods(t *testing.T) {
|
||||
tester := newTester(t)
|
||||
|
||||
service := createService(ttNamespace, ttServiceName, t2)
|
||||
tester.whenComputeEndpointLastChangeTriggerTime(ttNamespace, service).expect(t2)
|
||||
}
|
||||
|
||||
func TestNewServiceExistingPods(t *testing.T) {
|
||||
tester := newTester(t)
|
||||
|
||||
service := createService(ttNamespace, ttServiceName, t3)
|
||||
pod1 := createPod(ttNamespace, "pod1", t0)
|
||||
pod2 := createPod(ttNamespace, "pod2", t1)
|
||||
pod3 := createPod(ttNamespace, "pod3", t5)
|
||||
tester.whenComputeEndpointLastChangeTriggerTime(ttNamespace, service, pod1, pod2, pod3).
|
||||
// Pods were created before service, but trigger time is the time when service was created.
|
||||
expect(t3)
|
||||
}
|
||||
|
||||
func TestPodsAdded(t *testing.T) {
|
||||
tester := newTester(t)
|
||||
|
||||
service := createService(ttNamespace, ttServiceName, t0)
|
||||
tester.whenComputeEndpointLastChangeTriggerTime(ttNamespace, service).expect(t0)
|
||||
|
||||
pod1 := createPod(ttNamespace, "pod1", t2)
|
||||
pod2 := createPod(ttNamespace, "pod2", t1)
|
||||
tester.whenComputeEndpointLastChangeTriggerTime(ttNamespace, service, pod1, pod2).expect(t1)
|
||||
}
|
||||
|
||||
func TestPodsUpdated(t *testing.T) {
|
||||
tester := newTester(t)
|
||||
|
||||
service := createService(ttNamespace, ttServiceName, t0)
|
||||
pod1 := createPod(ttNamespace, "pod1", t1)
|
||||
pod2 := createPod(ttNamespace, "pod2", t2)
|
||||
pod3 := createPod(ttNamespace, "pod3", t3)
|
||||
tester.whenComputeEndpointLastChangeTriggerTime(ttNamespace, service, pod1, pod2, pod3).expect(t0)
|
||||
|
||||
pod1 = createPod(ttNamespace, "pod1", t5)
|
||||
pod2 = createPod(ttNamespace, "pod2", t4)
|
||||
// pod3 doesn't change.
|
||||
tester.whenComputeEndpointLastChangeTriggerTime(ttNamespace, service, pod1, pod2, pod3).expect(t4)
|
||||
}
|
||||
|
||||
func TestPodsUpdatedNoOp(t *testing.T) {
|
||||
tester := newTester(t)
|
||||
|
||||
service := createService(ttNamespace, ttServiceName, t0)
|
||||
pod1 := createPod(ttNamespace, "pod1", t1)
|
||||
pod2 := createPod(ttNamespace, "pod2", t2)
|
||||
pod3 := createPod(ttNamespace, "pod3", t3)
|
||||
tester.whenComputeEndpointLastChangeTriggerTime(ttNamespace, service, pod1, pod2, pod3).expect(t0)
|
||||
|
||||
// Nothing has changed.
|
||||
tester.whenComputeEndpointLastChangeTriggerTime(ttNamespace, service, pod1, pod2, pod3).expectNil()
|
||||
}
|
||||
|
||||
func TestPodDeletedThenAdded(t *testing.T) {
|
||||
tester := newTester(t)
|
||||
|
||||
service := createService(ttNamespace, ttServiceName, t0)
|
||||
pod1 := createPod(ttNamespace, "pod1", t1)
|
||||
pod2 := createPod(ttNamespace, "pod2", t2)
|
||||
tester.whenComputeEndpointLastChangeTriggerTime(ttNamespace, service, pod1, pod2).expect(t0)
|
||||
|
||||
tester.whenComputeEndpointLastChangeTriggerTime(ttNamespace, service, pod1).expectNil()
|
||||
|
||||
pod2 = createPod(ttNamespace, "pod2", t4)
|
||||
tester.whenComputeEndpointLastChangeTriggerTime(ttNamespace, service, pod1, pod2).expect(t4)
|
||||
}
|
||||
|
||||
func TestServiceDeletedThenAdded(t *testing.T) {
|
||||
tester := newTester(t)
|
||||
|
||||
service := createService(ttNamespace, ttServiceName, t0)
|
||||
pod1 := createPod(ttNamespace, "pod1", t1)
|
||||
pod2 := createPod(ttNamespace, "pod2", t2)
|
||||
tester.whenComputeEndpointLastChangeTriggerTime(ttNamespace, service, pod1, pod2).expect(t0)
|
||||
|
||||
tester.DeleteService(ttNamespace, ttServiceName)
|
||||
|
||||
service = createService(ttNamespace, ttServiceName, t3)
|
||||
tester.whenComputeEndpointLastChangeTriggerTime(ttNamespace, service, pod1, pod2).expect(t3)
|
||||
}
|
||||
|
||||
func TestServiceUpdatedNoPodChange(t *testing.T) {
|
||||
tester := newTester(t)
|
||||
|
||||
service := createService(ttNamespace, ttServiceName, t0)
|
||||
pod1 := createPod(ttNamespace, "pod1", t1)
|
||||
pod2 := createPod(ttNamespace, "pod2", t2)
|
||||
tester.whenComputeEndpointLastChangeTriggerTime(ttNamespace, service, pod1, pod2).expect(t0)
|
||||
|
||||
// service's ports have changed.
|
||||
service.Spec = v1.ServiceSpec{
|
||||
Selector: map[string]string{},
|
||||
Ports: []v1.ServicePort{{Port: 80, TargetPort: intstr.FromInt32(8080), Protocol: "TCP"}},
|
||||
}
|
||||
|
||||
// Currently we're not able to calculate trigger time for service updates, hence the returned
|
||||
// value is a nil time.
|
||||
tester.whenComputeEndpointLastChangeTriggerTime(ttNamespace, service, pod1, pod2).expectNil()
|
||||
}
|
||||
|
||||
// ------- Test Utils -------
|
||||
|
||||
type tester struct {
|
||||
*TriggerTimeTracker
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func newTester(t *testing.T) *tester {
|
||||
return &tester{NewTriggerTimeTracker(), t}
|
||||
}
|
||||
|
||||
func (t *tester) whenComputeEndpointLastChangeTriggerTime(
|
||||
namespace string, service *v1.Service, pods ...*v1.Pod) subject {
|
||||
return subject{t.ComputeEndpointLastChangeTriggerTime(namespace, service, pods), t.t}
|
||||
}
|
||||
|
||||
type subject struct {
|
||||
got time.Time
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
func (s subject) expect(expected time.Time) {
|
||||
s.doExpect(expected)
|
||||
}
|
||||
|
||||
func (s subject) expectNil() {
|
||||
s.doExpect(time.Time{})
|
||||
}
|
||||
|
||||
func (s subject) doExpect(expected time.Time) {
|
||||
if s.got != expected {
|
||||
_, fn, line, _ := runtime.Caller(2)
|
||||
s.t.Errorf("Wrong trigger time in %s:%d expected %s, got %s", fn, line, expected, s.got)
|
||||
}
|
||||
}
|
||||
|
||||
func createPod(namespace, ttServiceName string, readyTime time.Time) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: namespace, Name: ttServiceName},
|
||||
Status: v1.PodStatus{Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
LastTransitionTime: metav1.NewTime(readyTime),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createService(namespace, ttServiceName string, creationTime time.Time) *v1.Service {
|
||||
return &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: namespace,
|
||||
Name: ttServiceName,
|
||||
CreationTimestamp: metav1.NewTime(creationTime),
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpointslice
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
discovery "k8s.io/api/discovery/v1"
|
||||
endpointutil "k8s.io/kubernetes/pkg/controller/util/endpoint"
|
||||
)
|
||||
|
||||
// endpointHash is used to uniquely identify endpoints. Only including addresses
|
||||
// and hostnames as unique identifiers allows us to do more in place updates
|
||||
// should attributes such as topology or conditions change.
|
||||
type endpointHash string
|
||||
type endpointHashObj struct {
|
||||
Addresses []string
|
||||
Hostname string
|
||||
Namespace string
|
||||
Name string
|
||||
}
|
||||
|
||||
func hashEndpoint(endpoint *discovery.Endpoint) endpointHash {
|
||||
sort.Strings(endpoint.Addresses)
|
||||
hashObj := endpointHashObj{Addresses: endpoint.Addresses}
|
||||
if endpoint.Hostname != nil {
|
||||
hashObj.Hostname = *endpoint.Hostname
|
||||
}
|
||||
if endpoint.TargetRef != nil {
|
||||
hashObj.Namespace = endpoint.TargetRef.Namespace
|
||||
hashObj.Name = endpoint.TargetRef.Name
|
||||
}
|
||||
|
||||
return endpointHash(endpointutil.DeepHashObjectToString(hashObj))
|
||||
}
|
||||
|
||||
// EndpointSet provides simple methods for comparing sets of Endpoints.
|
||||
type EndpointSet map[endpointHash]*discovery.Endpoint
|
||||
|
||||
// Insert adds items to the set.
|
||||
func (s EndpointSet) Insert(items ...*discovery.Endpoint) EndpointSet {
|
||||
for _, item := range items {
|
||||
s[hashEndpoint(item)] = item
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Delete removes all items from the set.
|
||||
func (s EndpointSet) Delete(items ...*discovery.Endpoint) EndpointSet {
|
||||
for _, item := range items {
|
||||
delete(s, hashEndpoint(item))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Has returns true if and only if item is contained in the set.
|
||||
func (s EndpointSet) Has(item *discovery.Endpoint) bool {
|
||||
_, contained := s[hashEndpoint(item)]
|
||||
return contained
|
||||
}
|
||||
|
||||
// Returns an endpoint matching the hash if contained in the set.
|
||||
func (s EndpointSet) Get(item *discovery.Endpoint) *discovery.Endpoint {
|
||||
return s[hashEndpoint(item)]
|
||||
}
|
||||
|
||||
// UnsortedList returns the slice with contents in random order.
|
||||
func (s EndpointSet) UnsortedList() []*discovery.Endpoint {
|
||||
endpoints := make([]*discovery.Endpoint, 0, len(s))
|
||||
for _, endpoint := range s {
|
||||
endpoints = append(endpoints, endpoint)
|
||||
}
|
||||
return endpoints
|
||||
}
|
||||
|
||||
// Returns a single element from the set.
|
||||
func (s EndpointSet) PopAny() (*discovery.Endpoint, bool) {
|
||||
for _, endpoint := range s {
|
||||
s.Delete(endpoint)
|
||||
return endpoint, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// Len returns the size of the set.
|
||||
func (s EndpointSet) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
@@ -1,191 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpointslice
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
discovery "k8s.io/api/discovery/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
const (
|
||||
deletionExpected = -1
|
||||
)
|
||||
|
||||
// GenerationsBySlice tracks expected EndpointSlice generations by EndpointSlice
|
||||
// uid. A value of deletionExpected (-1) may be used here to indicate that we
|
||||
// expect this EndpointSlice to be deleted.
|
||||
type GenerationsBySlice map[types.UID]int64
|
||||
|
||||
// EndpointSliceTracker tracks EndpointSlices and their associated generation to
|
||||
// help determine if a change to an EndpointSlice has been processed by the
|
||||
// EndpointSlice controller.
|
||||
type EndpointSliceTracker struct {
|
||||
// lock protects generationsByService.
|
||||
lock sync.Mutex
|
||||
// generationsByService tracks the generations of EndpointSlices for each
|
||||
// Service.
|
||||
generationsByService map[types.NamespacedName]GenerationsBySlice
|
||||
}
|
||||
|
||||
// NewEndpointSliceTracker creates and initializes a new endpointSliceTracker.
|
||||
func NewEndpointSliceTracker() *EndpointSliceTracker {
|
||||
return &EndpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]GenerationsBySlice{},
|
||||
}
|
||||
}
|
||||
|
||||
// Has returns true if the endpointSliceTracker has a generation for the
|
||||
// provided EndpointSlice.
|
||||
func (est *EndpointSliceTracker) Has(endpointSlice *discovery.EndpointSlice) bool {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
gfs, ok := est.GenerationsForSliceUnsafe(endpointSlice)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_, ok = gfs[endpointSlice.UID]
|
||||
return ok
|
||||
}
|
||||
|
||||
// ShouldSync returns true if this endpointSliceTracker does not have a
|
||||
// generation for the provided EndpointSlice or it is greater than the
|
||||
// generation of the tracked EndpointSlice.
|
||||
func (est *EndpointSliceTracker) ShouldSync(endpointSlice *discovery.EndpointSlice) bool {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
gfs, ok := est.GenerationsForSliceUnsafe(endpointSlice)
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
g, ok := gfs[endpointSlice.UID]
|
||||
return !ok || endpointSlice.Generation > g
|
||||
}
|
||||
|
||||
// StaleSlices returns true if any of the following are true:
|
||||
// 1. One or more of the provided EndpointSlices have older generations than the
|
||||
// corresponding tracked ones.
|
||||
// 2. The tracker is expecting one or more of the provided EndpointSlices to be
|
||||
// deleted. (EndpointSlices that have already been marked for deletion are ignored here.)
|
||||
// 3. The tracker is tracking EndpointSlices that have not been provided.
|
||||
func (est *EndpointSliceTracker) StaleSlices(service *v1.Service, endpointSlices []*discovery.EndpointSlice) bool {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
nn := types.NamespacedName{Name: service.Name, Namespace: service.Namespace}
|
||||
gfs, ok := est.generationsByService[nn]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
providedSlices := map[types.UID]int64{}
|
||||
for _, endpointSlice := range endpointSlices {
|
||||
providedSlices[endpointSlice.UID] = endpointSlice.Generation
|
||||
g, ok := gfs[endpointSlice.UID]
|
||||
if ok && (g == deletionExpected || g > endpointSlice.Generation) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for uid, generation := range gfs {
|
||||
if generation == deletionExpected {
|
||||
continue
|
||||
}
|
||||
_, ok := providedSlices[uid]
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Update adds or updates the generation in this endpointSliceTracker for the
|
||||
// provided EndpointSlice.
|
||||
func (est *EndpointSliceTracker) Update(endpointSlice *discovery.EndpointSlice) {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
gfs, ok := est.GenerationsForSliceUnsafe(endpointSlice)
|
||||
|
||||
if !ok {
|
||||
gfs = GenerationsBySlice{}
|
||||
est.generationsByService[getServiceNN(endpointSlice)] = gfs
|
||||
}
|
||||
gfs[endpointSlice.UID] = endpointSlice.Generation
|
||||
}
|
||||
|
||||
// DeleteService removes the set of generations tracked for the Service.
|
||||
func (est *EndpointSliceTracker) DeleteService(namespace, name string) {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
serviceNN := types.NamespacedName{Name: name, Namespace: namespace}
|
||||
delete(est.generationsByService, serviceNN)
|
||||
}
|
||||
|
||||
// ExpectDeletion sets the generation to deletionExpected in this
|
||||
// endpointSliceTracker for the provided EndpointSlice.
|
||||
func (est *EndpointSliceTracker) ExpectDeletion(endpointSlice *discovery.EndpointSlice) {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
gfs, ok := est.GenerationsForSliceUnsafe(endpointSlice)
|
||||
|
||||
if !ok {
|
||||
gfs = GenerationsBySlice{}
|
||||
est.generationsByService[getServiceNN(endpointSlice)] = gfs
|
||||
}
|
||||
gfs[endpointSlice.UID] = deletionExpected
|
||||
}
|
||||
|
||||
// HandleDeletion removes the generation in this endpointSliceTracker for the
|
||||
// provided EndpointSlice. This returns true if the tracker expected this
|
||||
// EndpointSlice to be deleted and false if not.
|
||||
func (est *EndpointSliceTracker) HandleDeletion(endpointSlice *discovery.EndpointSlice) bool {
|
||||
est.lock.Lock()
|
||||
defer est.lock.Unlock()
|
||||
|
||||
gfs, ok := est.GenerationsForSliceUnsafe(endpointSlice)
|
||||
|
||||
if ok {
|
||||
g, ok := gfs[endpointSlice.UID]
|
||||
delete(gfs, endpointSlice.UID)
|
||||
if ok && g != deletionExpected {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// GenerationsForSliceUnsafe returns the generations for the Service
|
||||
// corresponding to the provided EndpointSlice, and a bool to indicate if it
|
||||
// exists. A lock must be applied before calling this function.
|
||||
func (est *EndpointSliceTracker) GenerationsForSliceUnsafe(endpointSlice *discovery.EndpointSlice) (GenerationsBySlice, bool) {
|
||||
serviceNN := getServiceNN(endpointSlice)
|
||||
generations, ok := est.generationsByService[serviceNN]
|
||||
return generations, ok
|
||||
}
|
||||
|
||||
// getServiceNN returns a namespaced name for the Service corresponding to the
|
||||
// provided EndpointSlice.
|
||||
func getServiceNN(endpointSlice *discovery.EndpointSlice) types.NamespacedName {
|
||||
serviceName, _ := endpointSlice.Labels[discovery.LabelServiceName]
|
||||
return types.NamespacedName{Name: serviceName, Namespace: endpointSlice.Namespace}
|
||||
}
|
||||
@@ -1,405 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpointslice
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
discovery "k8s.io/api/discovery/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
func TestEndpointSliceTrackerUpdate(t *testing.T) {
|
||||
epSlice1 := &discovery.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "example-1",
|
||||
Namespace: "ns1",
|
||||
UID: "original",
|
||||
Generation: 1,
|
||||
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
||||
},
|
||||
}
|
||||
|
||||
epSlice1DifferentNS := epSlice1.DeepCopy()
|
||||
epSlice1DifferentNS.Namespace = "ns2"
|
||||
epSlice1DifferentNS.UID = "diff-ns"
|
||||
|
||||
epSlice1DifferentService := epSlice1.DeepCopy()
|
||||
epSlice1DifferentService.Labels[discovery.LabelServiceName] = "svc2"
|
||||
epSlice1DifferentService.UID = "diff-svc"
|
||||
|
||||
epSlice1NewerGen := epSlice1.DeepCopy()
|
||||
epSlice1NewerGen.Generation = 2
|
||||
|
||||
testCases := map[string]struct {
|
||||
updateParam *discovery.EndpointSlice
|
||||
checksParam *discovery.EndpointSlice
|
||||
expectHas bool
|
||||
expectShouldSync bool
|
||||
expectGeneration int64
|
||||
}{
|
||||
"same slice": {
|
||||
updateParam: epSlice1,
|
||||
checksParam: epSlice1,
|
||||
expectHas: true,
|
||||
expectShouldSync: false,
|
||||
expectGeneration: epSlice1.Generation,
|
||||
},
|
||||
"different namespace": {
|
||||
updateParam: epSlice1,
|
||||
checksParam: epSlice1DifferentNS,
|
||||
expectHas: false,
|
||||
expectShouldSync: true,
|
||||
expectGeneration: epSlice1.Generation,
|
||||
},
|
||||
"different service": {
|
||||
updateParam: epSlice1,
|
||||
checksParam: epSlice1DifferentService,
|
||||
expectHas: false,
|
||||
expectShouldSync: true,
|
||||
expectGeneration: epSlice1.Generation,
|
||||
},
|
||||
"newer generation": {
|
||||
updateParam: epSlice1,
|
||||
checksParam: epSlice1NewerGen,
|
||||
expectHas: true,
|
||||
expectShouldSync: true,
|
||||
expectGeneration: epSlice1.Generation,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
esTracker := NewEndpointSliceTracker()
|
||||
esTracker.Update(tc.updateParam)
|
||||
if esTracker.Has(tc.checksParam) != tc.expectHas {
|
||||
t.Errorf("tc.tracker.Has(%+v) == %t, expected %t", tc.checksParam, esTracker.Has(tc.checksParam), tc.expectHas)
|
||||
}
|
||||
if esTracker.ShouldSync(tc.checksParam) != tc.expectShouldSync {
|
||||
t.Errorf("tc.tracker.ShouldSync(%+v) == %t, expected %t", tc.checksParam, esTracker.ShouldSync(tc.checksParam), tc.expectShouldSync)
|
||||
}
|
||||
serviceNN := types.NamespacedName{Namespace: epSlice1.Namespace, Name: "svc1"}
|
||||
gfs, ok := esTracker.generationsByService[serviceNN]
|
||||
if !ok {
|
||||
t.Fatalf("expected tracker to have generations for %s Service", serviceNN.Name)
|
||||
}
|
||||
generation, ok := gfs[epSlice1.UID]
|
||||
if !ok {
|
||||
t.Fatalf("expected tracker to have generation for %s EndpointSlice", epSlice1.Name)
|
||||
}
|
||||
if tc.expectGeneration != generation {
|
||||
t.Fatalf("expected generation to be %d, got %d", tc.expectGeneration, generation)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndpointSliceTrackerStaleSlices(t *testing.T) {
|
||||
epSlice1 := &discovery.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "example-1",
|
||||
Namespace: "ns1",
|
||||
UID: "original",
|
||||
Generation: 1,
|
||||
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
||||
},
|
||||
}
|
||||
|
||||
epSlice1NewerGen := epSlice1.DeepCopy()
|
||||
epSlice1NewerGen.Generation = 2
|
||||
|
||||
epTerminatingSlice := epSlice1.DeepCopy()
|
||||
now := metav1.Now()
|
||||
epTerminatingSlice.DeletionTimestamp = &now
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
tracker *EndpointSliceTracker
|
||||
serviceParam *v1.Service
|
||||
slicesParam []*discovery.EndpointSlice
|
||||
expectNewer bool
|
||||
}{{
|
||||
name: "empty tracker",
|
||||
tracker: &EndpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]GenerationsBySlice{},
|
||||
},
|
||||
serviceParam: &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"}},
|
||||
slicesParam: []*discovery.EndpointSlice{},
|
||||
expectNewer: false,
|
||||
}, {
|
||||
name: "empty slices",
|
||||
tracker: &EndpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]GenerationsBySlice{
|
||||
{Name: "svc1", Namespace: "ns1"}: {},
|
||||
},
|
||||
},
|
||||
serviceParam: &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"}},
|
||||
slicesParam: []*discovery.EndpointSlice{},
|
||||
expectNewer: false,
|
||||
}, {
|
||||
name: "matching slices",
|
||||
tracker: &EndpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]GenerationsBySlice{
|
||||
{Name: "svc1", Namespace: "ns1"}: {
|
||||
epSlice1.UID: epSlice1.Generation,
|
||||
},
|
||||
},
|
||||
},
|
||||
serviceParam: &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"}},
|
||||
slicesParam: []*discovery.EndpointSlice{epSlice1},
|
||||
expectNewer: false,
|
||||
}, {
|
||||
name: "newer slice in tracker",
|
||||
tracker: &EndpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]GenerationsBySlice{
|
||||
{Name: "svc1", Namespace: "ns1"}: {
|
||||
epSlice1.UID: epSlice1NewerGen.Generation,
|
||||
},
|
||||
},
|
||||
},
|
||||
serviceParam: &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"}},
|
||||
slicesParam: []*discovery.EndpointSlice{epSlice1},
|
||||
expectNewer: true,
|
||||
}, {
|
||||
name: "newer slice in params",
|
||||
tracker: &EndpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]GenerationsBySlice{
|
||||
{Name: "svc1", Namespace: "ns1"}: {
|
||||
epSlice1.UID: epSlice1.Generation,
|
||||
},
|
||||
},
|
||||
},
|
||||
serviceParam: &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"}},
|
||||
slicesParam: []*discovery.EndpointSlice{epSlice1NewerGen},
|
||||
expectNewer: false,
|
||||
}, {
|
||||
name: "slice in params is expected to be deleted",
|
||||
tracker: &EndpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]GenerationsBySlice{
|
||||
{Name: "svc1", Namespace: "ns1"}: {
|
||||
epSlice1.UID: deletionExpected,
|
||||
},
|
||||
},
|
||||
},
|
||||
serviceParam: &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"}},
|
||||
slicesParam: []*discovery.EndpointSlice{epSlice1},
|
||||
expectNewer: true,
|
||||
}, {
|
||||
name: "slice in tracker but not in params",
|
||||
tracker: &EndpointSliceTracker{
|
||||
generationsByService: map[types.NamespacedName]GenerationsBySlice{
|
||||
{Name: "svc1", Namespace: "ns1"}: {
|
||||
epSlice1.UID: epSlice1.Generation,
|
||||
},
|
||||
},
|
||||
},
|
||||
serviceParam: &v1.Service{ObjectMeta: metav1.ObjectMeta{Name: "svc1", Namespace: "ns1"}},
|
||||
slicesParam: []*discovery.EndpointSlice{},
|
||||
expectNewer: true,
|
||||
}}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
actualNewer := tc.tracker.StaleSlices(tc.serviceParam, tc.slicesParam)
|
||||
if actualNewer != tc.expectNewer {
|
||||
t.Errorf("Expected %t, got %t", tc.expectNewer, actualNewer)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestEndpointSliceTrackerDeletion(t *testing.T) {
|
||||
epSlice1 := &discovery.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "example-1",
|
||||
Namespace: "ns1",
|
||||
UID: "original",
|
||||
Generation: 1,
|
||||
Labels: map[string]string{discovery.LabelServiceName: "svc1"},
|
||||
},
|
||||
}
|
||||
|
||||
epSlice1DifferentNS := epSlice1.DeepCopy()
|
||||
epSlice1DifferentNS.Namespace = "ns2"
|
||||
epSlice1DifferentNS.UID = "diff-ns"
|
||||
|
||||
epSlice1DifferentService := epSlice1.DeepCopy()
|
||||
epSlice1DifferentService.Labels[discovery.LabelServiceName] = "svc2"
|
||||
epSlice1DifferentService.UID = "diff-svc"
|
||||
|
||||
epSlice1NewerGen := epSlice1.DeepCopy()
|
||||
epSlice1NewerGen.Generation = 2
|
||||
|
||||
testCases := map[string]struct {
|
||||
expectDeletionParam *discovery.EndpointSlice
|
||||
checksParam *discovery.EndpointSlice
|
||||
deleteParam *discovery.EndpointSlice
|
||||
expectHas bool
|
||||
expectShouldSync bool
|
||||
expectedHandleDeletionResp bool
|
||||
}{
|
||||
"same slice": {
|
||||
expectDeletionParam: epSlice1,
|
||||
checksParam: epSlice1,
|
||||
deleteParam: epSlice1,
|
||||
expectHas: true,
|
||||
expectShouldSync: true,
|
||||
expectedHandleDeletionResp: true,
|
||||
},
|
||||
"different namespace": {
|
||||
expectDeletionParam: epSlice1DifferentNS,
|
||||
checksParam: epSlice1DifferentNS,
|
||||
deleteParam: epSlice1DifferentNS,
|
||||
expectHas: true,
|
||||
expectShouldSync: true,
|
||||
expectedHandleDeletionResp: false,
|
||||
},
|
||||
"different namespace, check original ep slice": {
|
||||
expectDeletionParam: epSlice1DifferentNS,
|
||||
checksParam: epSlice1,
|
||||
deleteParam: epSlice1DifferentNS,
|
||||
expectHas: true,
|
||||
expectShouldSync: false,
|
||||
expectedHandleDeletionResp: false,
|
||||
},
|
||||
"different service": {
|
||||
expectDeletionParam: epSlice1DifferentService,
|
||||
checksParam: epSlice1DifferentService,
|
||||
deleteParam: epSlice1DifferentService,
|
||||
expectHas: true,
|
||||
expectShouldSync: true,
|
||||
expectedHandleDeletionResp: false,
|
||||
},
|
||||
"expectDelete different service, check original ep slice, delete original": {
|
||||
expectDeletionParam: epSlice1DifferentService,
|
||||
checksParam: epSlice1,
|
||||
deleteParam: epSlice1,
|
||||
expectHas: true,
|
||||
expectShouldSync: false,
|
||||
expectedHandleDeletionResp: false,
|
||||
},
|
||||
"different generation": {
|
||||
expectDeletionParam: epSlice1NewerGen,
|
||||
checksParam: epSlice1NewerGen,
|
||||
deleteParam: epSlice1NewerGen,
|
||||
expectHas: true,
|
||||
expectShouldSync: true,
|
||||
expectedHandleDeletionResp: true,
|
||||
},
|
||||
"expectDelete different generation, check original ep slice, delete original": {
|
||||
expectDeletionParam: epSlice1NewerGen,
|
||||
checksParam: epSlice1,
|
||||
deleteParam: epSlice1,
|
||||
expectHas: true,
|
||||
expectShouldSync: true,
|
||||
expectedHandleDeletionResp: true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
esTracker := NewEndpointSliceTracker()
|
||||
esTracker.Update(epSlice1)
|
||||
|
||||
esTracker.ExpectDeletion(tc.expectDeletionParam)
|
||||
if esTracker.Has(tc.checksParam) != tc.expectHas {
|
||||
t.Errorf("esTracker.Has(%+v) == %t, expected %t", tc.checksParam, esTracker.Has(tc.checksParam), tc.expectHas)
|
||||
}
|
||||
if esTracker.ShouldSync(tc.checksParam) != tc.expectShouldSync {
|
||||
t.Errorf("esTracker.ShouldSync(%+v) == %t, expected %t", tc.checksParam, esTracker.ShouldSync(tc.checksParam), tc.expectShouldSync)
|
||||
}
|
||||
if esTracker.HandleDeletion(epSlice1) != tc.expectedHandleDeletionResp {
|
||||
t.Errorf("esTracker.ShouldSync(%+v) == %t, expected %t", epSlice1, esTracker.HandleDeletion(epSlice1), tc.expectedHandleDeletionResp)
|
||||
}
|
||||
if esTracker.Has(epSlice1) != false {
|
||||
t.Errorf("esTracker.Has(%+v) == %t, expected false", epSlice1, esTracker.Has(epSlice1))
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestEndpointSliceTrackerDeleteService(t *testing.T) {
|
||||
svcName1, svcNS1 := "svc1", "ns1"
|
||||
svcName2, svcNS2 := "svc2", "ns2"
|
||||
epSlice1 := &discovery.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "example-1",
|
||||
Namespace: svcNS1,
|
||||
Generation: 1,
|
||||
Labels: map[string]string{discovery.LabelServiceName: svcName1},
|
||||
},
|
||||
}
|
||||
|
||||
testCases := map[string]struct {
|
||||
updateParam *discovery.EndpointSlice
|
||||
deleteServiceParam *types.NamespacedName
|
||||
expectHas bool
|
||||
expectShouldSync bool
|
||||
expectGeneration int64
|
||||
}{
|
||||
"same service": {
|
||||
updateParam: epSlice1,
|
||||
deleteServiceParam: &types.NamespacedName{Namespace: svcNS1, Name: svcName1},
|
||||
expectHas: false,
|
||||
expectShouldSync: true,
|
||||
},
|
||||
"different namespace": {
|
||||
updateParam: epSlice1,
|
||||
deleteServiceParam: &types.NamespacedName{Namespace: svcNS2, Name: svcName1},
|
||||
expectHas: true,
|
||||
expectShouldSync: false,
|
||||
expectGeneration: epSlice1.Generation,
|
||||
},
|
||||
"different service": {
|
||||
updateParam: epSlice1,
|
||||
deleteServiceParam: &types.NamespacedName{Namespace: svcNS1, Name: svcName2},
|
||||
expectHas: true,
|
||||
expectShouldSync: false,
|
||||
expectGeneration: epSlice1.Generation,
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
esTracker := NewEndpointSliceTracker()
|
||||
esTracker.Update(tc.updateParam)
|
||||
esTracker.DeleteService(tc.deleteServiceParam.Namespace, tc.deleteServiceParam.Name)
|
||||
if esTracker.Has(tc.updateParam) != tc.expectHas {
|
||||
t.Errorf("tc.tracker.Has(%+v) == %t, expected %t", tc.updateParam, esTracker.Has(tc.updateParam), tc.expectHas)
|
||||
}
|
||||
if esTracker.ShouldSync(tc.updateParam) != tc.expectShouldSync {
|
||||
t.Errorf("tc.tracker.ShouldSync(%+v) == %t, expected %t", tc.updateParam, esTracker.ShouldSync(tc.updateParam), tc.expectShouldSync)
|
||||
}
|
||||
if tc.expectGeneration != 0 {
|
||||
serviceNN := types.NamespacedName{Namespace: epSlice1.Namespace, Name: "svc1"}
|
||||
gfs, ok := esTracker.generationsByService[serviceNN]
|
||||
if !ok {
|
||||
t.Fatalf("expected tracker to have status for %s Service", serviceNN.Name)
|
||||
}
|
||||
generation, ok := gfs[epSlice1.UID]
|
||||
if !ok {
|
||||
t.Fatalf("expected tracker to have generation for %s EndpointSlice", epSlice1.Name)
|
||||
}
|
||||
if tc.expectGeneration != generation {
|
||||
t.Fatalf("expected generation to be %d, got %d", tc.expectGeneration, generation)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package endpointslice
|
||||
|
||||
import (
|
||||
discovery "k8s.io/api/discovery/v1"
|
||||
)
|
||||
|
||||
// EndpointReady returns true if an Endpoint has the Ready condition set to
|
||||
// true.
|
||||
func EndpointReady(endpoint discovery.Endpoint) bool {
|
||||
return endpoint.Conditions.Ready != nil && *endpoint.Conditions.Ready
|
||||
}
|
||||
Reference in New Issue
Block a user