@@ -17,6 +17,8 @@ limitations under the License.
|
||||
package fuzzer
|
||||
|
||||
import (
|
||||
"net/netip"
|
||||
|
||||
fuzz "github.com/google/gofuzz"
|
||||
runtimeserializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
"k8s.io/kubernetes/pkg/apis/networking"
|
||||
@@ -74,5 +76,35 @@ var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
|
||||
}
|
||||
}
|
||||
},
|
||||
func(obj *networking.IPAddress, c fuzz.Continue) {
|
||||
c.FuzzNoCustom(obj) // fuzz self without calling this function again
|
||||
// length in bytes of the IP Family: IPv4: 4 bytes IPv6: 16 bytes
|
||||
boolean := []bool{false, true}
|
||||
is6 := boolean[c.Rand.Intn(2)]
|
||||
ip := generateRandomIP(is6, c)
|
||||
obj.Name = ip
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func generateRandomIP(is6 bool, c fuzz.Continue) string {
|
||||
n := 4
|
||||
if is6 {
|
||||
n = 16
|
||||
}
|
||||
bytes := make([]byte, n)
|
||||
for i := 0; i < n; i++ {
|
||||
bytes[i] = uint8(c.Rand.Intn(255))
|
||||
}
|
||||
|
||||
ip, ok := netip.AddrFromSlice(bytes)
|
||||
if ok {
|
||||
return ip.String()
|
||||
}
|
||||
// this should not happen but is better to
|
||||
// return a good IP address than nothing
|
||||
if is6 {
|
||||
return "2001:db8::1"
|
||||
}
|
||||
return "192.168.1.1"
|
||||
}
|
||||
|
@@ -23,7 +23,7 @@ import (
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/networking"
|
||||
"k8s.io/kubernetes/pkg/apis/networking/v1"
|
||||
v1 "k8s.io/kubernetes/pkg/apis/networking/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/networking/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/apis/networking/v1beta1"
|
||||
)
|
||||
|
@@ -54,6 +54,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
&IngressClassList{},
|
||||
&ClusterCIDR{},
|
||||
&ClusterCIDRList{},
|
||||
&IPAddress{},
|
||||
&IPAddressList{},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
@@ -18,6 +18,7 @@ package networking
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
@@ -699,3 +700,55 @@ type ClusterCIDRList struct {
|
||||
// items is the list of ClusterCIDRs.
|
||||
Items []ClusterCIDR
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs
|
||||
// that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses.
|
||||
// An IP address can be represented in different formats, to guarantee the uniqueness of the IP,
|
||||
// the name of the object is the IP address in canonical format, four decimal digits separated
|
||||
// by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6.
|
||||
// Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1
|
||||
// Invalid: 10.01.2.3 or 2001:db8:0:0:0::1
|
||||
type IPAddress struct {
|
||||
metav1.TypeMeta
|
||||
// +optional
|
||||
metav1.ObjectMeta
|
||||
// +optional
|
||||
Spec IPAddressSpec
|
||||
}
|
||||
|
||||
// IPAddressSpec describe the attributes in an IP Address,
|
||||
type IPAddressSpec struct {
|
||||
// ParentRef references the resource that an IPAddress is attached to.
|
||||
// An IPAddress must reference a parent object.
|
||||
// +required
|
||||
ParentRef *ParentReference
|
||||
}
|
||||
type ParentReference struct {
|
||||
// Group is the group of the object being referenced.
|
||||
Group string
|
||||
// Resource is the resource of the object being referenced.
|
||||
Resource string
|
||||
// Namespace is the namespace of the object being referenced.
|
||||
Namespace string
|
||||
// Name is the name of the object being referenced.
|
||||
Name string
|
||||
// UID is the uid of the object being referenced.
|
||||
// +optional
|
||||
UID types.UID
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// IPAddressList contains a list of IPAddress.
|
||||
type IPAddressList struct {
|
||||
metav1.TypeMeta
|
||||
// +optional
|
||||
metav1.ListMeta
|
||||
|
||||
// Items is the list of IPAddress
|
||||
Items []IPAddress
|
||||
}
|
||||
|
137
pkg/apis/networking/v1alpha1/zz_generated.conversion.go
generated
137
pkg/apis/networking/v1alpha1/zz_generated.conversion.go
generated
@@ -28,6 +28,7 @@ import (
|
||||
v1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
core "k8s.io/kubernetes/pkg/apis/core"
|
||||
networking "k8s.io/kubernetes/pkg/apis/networking"
|
||||
)
|
||||
@@ -69,6 +70,46 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha1.IPAddress)(nil), (*networking.IPAddress)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_IPAddress_To_networking_IPAddress(a.(*v1alpha1.IPAddress), b.(*networking.IPAddress), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*networking.IPAddress)(nil), (*v1alpha1.IPAddress)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_networking_IPAddress_To_v1alpha1_IPAddress(a.(*networking.IPAddress), b.(*v1alpha1.IPAddress), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha1.IPAddressList)(nil), (*networking.IPAddressList)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_IPAddressList_To_networking_IPAddressList(a.(*v1alpha1.IPAddressList), b.(*networking.IPAddressList), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*networking.IPAddressList)(nil), (*v1alpha1.IPAddressList)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_networking_IPAddressList_To_v1alpha1_IPAddressList(a.(*networking.IPAddressList), b.(*v1alpha1.IPAddressList), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha1.IPAddressSpec)(nil), (*networking.IPAddressSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_IPAddressSpec_To_networking_IPAddressSpec(a.(*v1alpha1.IPAddressSpec), b.(*networking.IPAddressSpec), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*networking.IPAddressSpec)(nil), (*v1alpha1.IPAddressSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_networking_IPAddressSpec_To_v1alpha1_IPAddressSpec(a.(*networking.IPAddressSpec), b.(*v1alpha1.IPAddressSpec), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha1.ParentReference)(nil), (*networking.ParentReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_ParentReference_To_networking_ParentReference(a.(*v1alpha1.ParentReference), b.(*networking.ParentReference), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*networking.ParentReference)(nil), (*v1alpha1.ParentReference)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_networking_ParentReference_To_v1alpha1_ParentReference(a.(*networking.ParentReference), b.(*v1alpha1.ParentReference), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -145,3 +186,99 @@ func autoConvert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(in *netw
|
||||
func Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(in *networking.ClusterCIDRSpec, out *v1alpha1.ClusterCIDRSpec, s conversion.Scope) error {
|
||||
return autoConvert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_IPAddress_To_networking_IPAddress(in *v1alpha1.IPAddress, out *networking.IPAddress, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_v1alpha1_IPAddressSpec_To_networking_IPAddressSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_IPAddress_To_networking_IPAddress is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_IPAddress_To_networking_IPAddress(in *v1alpha1.IPAddress, out *networking.IPAddress, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_IPAddress_To_networking_IPAddress(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_networking_IPAddress_To_v1alpha1_IPAddress(in *networking.IPAddress, out *v1alpha1.IPAddress, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_networking_IPAddressSpec_To_v1alpha1_IPAddressSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_networking_IPAddress_To_v1alpha1_IPAddress is an autogenerated conversion function.
|
||||
func Convert_networking_IPAddress_To_v1alpha1_IPAddress(in *networking.IPAddress, out *v1alpha1.IPAddress, s conversion.Scope) error {
|
||||
return autoConvert_networking_IPAddress_To_v1alpha1_IPAddress(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_IPAddressList_To_networking_IPAddressList(in *v1alpha1.IPAddressList, out *networking.IPAddressList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]networking.IPAddress)(unsafe.Pointer(&in.Items))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_IPAddressList_To_networking_IPAddressList is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_IPAddressList_To_networking_IPAddressList(in *v1alpha1.IPAddressList, out *networking.IPAddressList, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_IPAddressList_To_networking_IPAddressList(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_networking_IPAddressList_To_v1alpha1_IPAddressList(in *networking.IPAddressList, out *v1alpha1.IPAddressList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]v1alpha1.IPAddress)(unsafe.Pointer(&in.Items))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_networking_IPAddressList_To_v1alpha1_IPAddressList is an autogenerated conversion function.
|
||||
func Convert_networking_IPAddressList_To_v1alpha1_IPAddressList(in *networking.IPAddressList, out *v1alpha1.IPAddressList, s conversion.Scope) error {
|
||||
return autoConvert_networking_IPAddressList_To_v1alpha1_IPAddressList(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_IPAddressSpec_To_networking_IPAddressSpec(in *v1alpha1.IPAddressSpec, out *networking.IPAddressSpec, s conversion.Scope) error {
|
||||
out.ParentRef = (*networking.ParentReference)(unsafe.Pointer(in.ParentRef))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_IPAddressSpec_To_networking_IPAddressSpec is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_IPAddressSpec_To_networking_IPAddressSpec(in *v1alpha1.IPAddressSpec, out *networking.IPAddressSpec, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_IPAddressSpec_To_networking_IPAddressSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_networking_IPAddressSpec_To_v1alpha1_IPAddressSpec(in *networking.IPAddressSpec, out *v1alpha1.IPAddressSpec, s conversion.Scope) error {
|
||||
out.ParentRef = (*v1alpha1.ParentReference)(unsafe.Pointer(in.ParentRef))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_networking_IPAddressSpec_To_v1alpha1_IPAddressSpec is an autogenerated conversion function.
|
||||
func Convert_networking_IPAddressSpec_To_v1alpha1_IPAddressSpec(in *networking.IPAddressSpec, out *v1alpha1.IPAddressSpec, s conversion.Scope) error {
|
||||
return autoConvert_networking_IPAddressSpec_To_v1alpha1_IPAddressSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_ParentReference_To_networking_ParentReference(in *v1alpha1.ParentReference, out *networking.ParentReference, s conversion.Scope) error {
|
||||
out.Group = in.Group
|
||||
out.Resource = in.Resource
|
||||
out.Namespace = in.Namespace
|
||||
out.Name = in.Name
|
||||
out.UID = types.UID(in.UID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_ParentReference_To_networking_ParentReference is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_ParentReference_To_networking_ParentReference(in *v1alpha1.ParentReference, out *networking.ParentReference, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_ParentReference_To_networking_ParentReference(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_networking_ParentReference_To_v1alpha1_ParentReference(in *networking.ParentReference, out *v1alpha1.ParentReference, s conversion.Scope) error {
|
||||
out.Group = in.Group
|
||||
out.Resource = in.Resource
|
||||
out.Namespace = in.Namespace
|
||||
out.Name = in.Name
|
||||
out.UID = types.UID(in.UID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_networking_ParentReference_To_v1alpha1_ParentReference is an autogenerated conversion function.
|
||||
func Convert_networking_ParentReference_To_v1alpha1_ParentReference(in *networking.ParentReference, out *v1alpha1.ParentReference, s conversion.Scope) error {
|
||||
return autoConvert_networking_ParentReference_To_v1alpha1_ParentReference(in, out, s)
|
||||
}
|
||||
|
@@ -18,6 +18,7 @@ package validation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -741,3 +742,77 @@ func validateClusterCIDRUpdateSpec(update, old *networking.ClusterCIDRSpec, fldP
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateIPAddressName validates that the name is the decimal representation of an IP address.
|
||||
// IPAddress does not support generating names, prefix is not considered.
|
||||
func ValidateIPAddressName(name string, prefix bool) []string {
|
||||
var errs []string
|
||||
ip, err := netip.ParseAddr(name)
|
||||
if err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
} else if ip.String() != name {
|
||||
errs = append(errs, "not a valid ip in canonical format")
|
||||
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
func ValidateIPAddress(ipAddress *networking.IPAddress) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMeta(&ipAddress.ObjectMeta, false, ValidateIPAddressName, field.NewPath("metadata"))
|
||||
errs := validateIPAddressParentReference(ipAddress.Spec.ParentRef, field.NewPath("spec"))
|
||||
allErrs = append(allErrs, errs...)
|
||||
return allErrs
|
||||
|
||||
}
|
||||
|
||||
// validateIPAddressParentReference ensures that the IPAddress ParenteReference exists and is valid.
|
||||
func validateIPAddressParentReference(params *networking.ParentReference, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
|
||||
if params == nil {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("parentRef"), ""))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
fldPath = fldPath.Child("parentRef")
|
||||
// group is required but the Core group used by Services is the empty value, so it can not be enforced
|
||||
if params.Group != "" {
|
||||
for _, msg := range validation.IsDNS1123Subdomain(params.Group) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("group"), params.Group, msg))
|
||||
}
|
||||
}
|
||||
|
||||
// resource is required
|
||||
if params.Resource == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("resource"), ""))
|
||||
} else {
|
||||
for _, msg := range pathvalidation.IsValidPathSegmentName(params.Resource) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("resource"), params.Resource, msg))
|
||||
}
|
||||
}
|
||||
|
||||
// name is required
|
||||
if params.Name == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
|
||||
} else {
|
||||
for _, msg := range pathvalidation.IsValidPathSegmentName(params.Name) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), params.Name, msg))
|
||||
}
|
||||
}
|
||||
|
||||
// namespace is optional
|
||||
if params.Namespace != "" {
|
||||
for _, msg := range pathvalidation.IsValidPathSegmentName(params.Namespace) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), params.Namespace, msg))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateIPAddressUpdate tests if an update to an IPAddress is valid.
|
||||
func ValidateIPAddressUpdate(update, old *networking.IPAddress) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.Spec.ParentRef, old.Spec.ParentRef, field.NewPath("spec").Child("parentRef"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
@@ -2195,3 +2195,215 @@ func TestValidateClusterConfigUpdate(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateIPAddress(t *testing.T) {
|
||||
testCases := map[string]struct {
|
||||
expectedErrors int
|
||||
ipAddress *networking.IPAddress
|
||||
}{
|
||||
"empty-ipaddress-bad-name": {
|
||||
expectedErrors: 1,
|
||||
ipAddress: &networking.IPAddress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-name",
|
||||
},
|
||||
Spec: networking.IPAddressSpec{
|
||||
ParentRef: &networking.ParentReference{
|
||||
Group: "",
|
||||
Resource: "services",
|
||||
Name: "foo",
|
||||
Namespace: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"empty-ipaddress-bad-name-no-parent-reference": {
|
||||
expectedErrors: 2,
|
||||
ipAddress: &networking.IPAddress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-name",
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
"good-ipaddress": {
|
||||
expectedErrors: 0,
|
||||
ipAddress: &networking.IPAddress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "192.168.1.1",
|
||||
},
|
||||
Spec: networking.IPAddressSpec{
|
||||
ParentRef: &networking.ParentReference{
|
||||
Group: "",
|
||||
Resource: "services",
|
||||
Name: "foo",
|
||||
Namespace: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"good-ipaddress-gateway": {
|
||||
expectedErrors: 0,
|
||||
ipAddress: &networking.IPAddress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "192.168.1.1",
|
||||
},
|
||||
Spec: networking.IPAddressSpec{
|
||||
ParentRef: &networking.ParentReference{
|
||||
Group: "gateway.networking.k8s.io",
|
||||
Resource: "gateway",
|
||||
Name: "foo",
|
||||
Namespace: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"good-ipv6address": {
|
||||
expectedErrors: 0,
|
||||
ipAddress: &networking.IPAddress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "2001:4860:4860::8888",
|
||||
},
|
||||
Spec: networking.IPAddressSpec{
|
||||
ParentRef: &networking.ParentReference{
|
||||
Group: "",
|
||||
Resource: "services",
|
||||
Name: "foo",
|
||||
Namespace: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"non-canonica-ipv6address": {
|
||||
expectedErrors: 1,
|
||||
ipAddress: &networking.IPAddress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "2001:4860:4860:0::8888",
|
||||
},
|
||||
Spec: networking.IPAddressSpec{
|
||||
ParentRef: &networking.ParentReference{
|
||||
Group: "",
|
||||
Resource: "services",
|
||||
Name: "foo",
|
||||
Namespace: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"missing-ipaddress-reference": {
|
||||
expectedErrors: 1,
|
||||
ipAddress: &networking.IPAddress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "192.168.1.1",
|
||||
},
|
||||
},
|
||||
},
|
||||
"wrong-ipaddress-reference": {
|
||||
expectedErrors: 1,
|
||||
ipAddress: &networking.IPAddress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "192.168.1.1",
|
||||
},
|
||||
Spec: networking.IPAddressSpec{
|
||||
ParentRef: &networking.ParentReference{
|
||||
Group: "custom.resource.com",
|
||||
Resource: "services",
|
||||
Name: "foo$%&",
|
||||
Namespace: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"wrong-ipaddress-reference-multiple-errors": {
|
||||
expectedErrors: 4,
|
||||
ipAddress: &networking.IPAddress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "192.168.1.1",
|
||||
},
|
||||
Spec: networking.IPAddressSpec{
|
||||
ParentRef: &networking.ParentReference{
|
||||
Group: ".cust@m.resource.com",
|
||||
Resource: "",
|
||||
Name: "",
|
||||
Namespace: "bar$$$$$%@",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, testCase := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
errs := ValidateIPAddress(testCase.ipAddress)
|
||||
if len(errs) != testCase.expectedErrors {
|
||||
t.Errorf("Expected %d errors, got %d errors: %v", testCase.expectedErrors, len(errs), errs)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateIPAddressUpdate(t *testing.T) {
|
||||
old := &networking.IPAddress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "192.168.1.1",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: networking.IPAddressSpec{
|
||||
ParentRef: &networking.ParentReference{
|
||||
Group: "custom.resource.com",
|
||||
Resource: "services",
|
||||
Name: "foo",
|
||||
Namespace: "bar",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
new func(svc *networking.IPAddress) *networking.IPAddress
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "Successful update, no changes",
|
||||
new: func(old *networking.IPAddress) *networking.IPAddress {
|
||||
out := old.DeepCopy()
|
||||
return out
|
||||
},
|
||||
expectErr: false,
|
||||
},
|
||||
|
||||
{
|
||||
name: "Failed update, update spec.ParentRef",
|
||||
new: func(svc *networking.IPAddress) *networking.IPAddress {
|
||||
out := svc.DeepCopy()
|
||||
out.Spec.ParentRef = &networking.ParentReference{
|
||||
Group: "custom.resource.com",
|
||||
Resource: "Gateway",
|
||||
Name: "foo",
|
||||
Namespace: "bar",
|
||||
}
|
||||
|
||||
return out
|
||||
}, expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "Failed update, delete spec.ParentRef",
|
||||
new: func(svc *networking.IPAddress) *networking.IPAddress {
|
||||
out := svc.DeepCopy()
|
||||
out.Spec.ParentRef = nil
|
||||
return out
|
||||
}, expectErr: true,
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
err := ValidateIPAddressUpdate(testCase.new(old), old)
|
||||
if !testCase.expectErr && err != nil {
|
||||
t.Errorf("ValidateIPAddressUpdate must be successful for test '%s', got %v", testCase.name, err)
|
||||
}
|
||||
if testCase.expectErr && err == nil {
|
||||
t.Errorf("ValidateIPAddressUpdate must return error for test: %s, but got nil", testCase.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
97
pkg/apis/networking/zz_generated.deepcopy.go
generated
97
pkg/apis/networking/zz_generated.deepcopy.go
generated
@@ -154,6 +154,87 @@ func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IPAddress) DeepCopyInto(out *IPAddress) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddress.
|
||||
func (in *IPAddress) DeepCopy() *IPAddress {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IPAddress)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *IPAddress) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IPAddressList) DeepCopyInto(out *IPAddressList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]IPAddress, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressList.
|
||||
func (in *IPAddressList) DeepCopy() *IPAddressList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IPAddressList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *IPAddressList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IPAddressSpec) DeepCopyInto(out *IPAddressSpec) {
|
||||
*out = *in
|
||||
if in.ParentRef != nil {
|
||||
in, out := &in.ParentRef, &out.ParentRef
|
||||
*out = new(ParentReference)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPAddressSpec.
|
||||
func (in *IPAddressSpec) DeepCopy() *IPAddressSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(IPAddressSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *IPBlock) DeepCopyInto(out *IPBlock) {
|
||||
*out = *in
|
||||
@@ -816,6 +897,22 @@ func (in *NetworkPolicyStatus) DeepCopy() *NetworkPolicyStatus {
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ParentReference) DeepCopyInto(out *ParentReference) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParentReference.
|
||||
func (in *ParentReference) DeepCopy() *ParentReference {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ParentReference)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceBackendPort) DeepCopyInto(out *ServiceBackendPort) {
|
||||
*out = *in
|
||||
|
@@ -33,9 +33,12 @@ import (
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
"k8s.io/apiserver/pkg/storage"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/controlplane/reconcilers"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/registry/core/rangeallocation"
|
||||
corerest "k8s.io/kubernetes/pkg/registry/core/rest"
|
||||
servicecontroller "k8s.io/kubernetes/pkg/registry/core/service/ipallocator/controller"
|
||||
@@ -52,7 +55,8 @@ const (
|
||||
// controller loops, which manage creating the "kubernetes" service and
|
||||
// provide the IP repair check on service IPs
|
||||
type Controller struct {
|
||||
client kubernetes.Interface
|
||||
client kubernetes.Interface
|
||||
informers informers.SharedInformerFactory
|
||||
|
||||
ServiceClusterIPRegistry rangeallocation.RangeRegistry
|
||||
ServiceClusterIPRange net.IPNet
|
||||
@@ -98,7 +102,8 @@ func (c *completedConfig) NewBootstrapController(legacyRESTStorage corerest.Lega
|
||||
}
|
||||
|
||||
return &Controller{
|
||||
client: client,
|
||||
client: client,
|
||||
informers: c.ExtraConfig.VersionedInformers,
|
||||
|
||||
EndpointReconciler: c.ExtraConfig.EndpointReconcilerConfig.Reconciler,
|
||||
EndpointInterval: c.ExtraConfig.EndpointReconcilerConfig.Interval,
|
||||
@@ -150,7 +155,6 @@ func (c *Controller) Start() {
|
||||
klog.Errorf("Error removing old endpoints from kubernetes service: %v", err)
|
||||
}
|
||||
|
||||
repairClusterIPs := servicecontroller.NewRepair(c.ServiceClusterIPInterval, c.client.CoreV1(), c.client.EventsV1(), &c.ServiceClusterIPRange, c.ServiceClusterIPRegistry, &c.SecondaryServiceClusterIPRange, c.SecondaryServiceClusterIPRegistry)
|
||||
repairNodePorts := portallocatorcontroller.NewRepair(c.ServiceNodePortInterval, c.client.CoreV1(), c.client.EventsV1(), c.ServiceNodePortRange, c.ServiceNodePortRegistry)
|
||||
|
||||
// We start both repairClusterIPs and repairNodePorts to ensure repair
|
||||
@@ -163,15 +167,37 @@ func (c *Controller) Start() {
|
||||
// than 1 minute for backward compatibility of failing the whole
|
||||
// apiserver if we can't repair them.
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(2)
|
||||
wg.Add(1)
|
||||
|
||||
runRepairClusterIPs := func(stopCh chan struct{}) {
|
||||
repairClusterIPs.RunUntil(wg.Done, stopCh)
|
||||
}
|
||||
runRepairNodePorts := func(stopCh chan struct{}) {
|
||||
repairNodePorts.RunUntil(wg.Done, stopCh)
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
var runRepairClusterIPs func(stopCh chan struct{})
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRServiceAllocator) {
|
||||
repairClusterIPs := servicecontroller.NewRepair(c.ServiceClusterIPInterval,
|
||||
c.client.CoreV1(),
|
||||
c.client.EventsV1(),
|
||||
&c.ServiceClusterIPRange,
|
||||
c.ServiceClusterIPRegistry,
|
||||
&c.SecondaryServiceClusterIPRange,
|
||||
c.SecondaryServiceClusterIPRegistry)
|
||||
runRepairClusterIPs = func(stopCh chan struct{}) {
|
||||
repairClusterIPs.RunUntil(wg.Done, stopCh)
|
||||
}
|
||||
} else {
|
||||
repairClusterIPs := servicecontroller.NewRepairIPAddress(c.ServiceClusterIPInterval,
|
||||
c.client,
|
||||
&c.ServiceClusterIPRange,
|
||||
&c.SecondaryServiceClusterIPRange,
|
||||
c.informers.Core().V1().Services(),
|
||||
c.informers.Networking().V1alpha1().IPAddresses(),
|
||||
)
|
||||
runRepairClusterIPs = func(stopCh chan struct{}) {
|
||||
repairClusterIPs.RunUntil(wg.Done, stopCh)
|
||||
}
|
||||
}
|
||||
c.runner = async.NewRunner(c.RunKubernetesService, runRepairClusterIPs, runRepairNodePorts)
|
||||
c.runner.Start()
|
||||
|
||||
|
@@ -590,6 +590,7 @@ func (m *Instance) InstallLegacyAPI(c *completedConfig, restOptionsGetter generi
|
||||
ExtendExpiration: c.ExtraConfig.ExtendExpiration,
|
||||
ServiceAccountMaxExpiration: c.ExtraConfig.ServiceAccountMaxExpiration,
|
||||
APIAudiences: c.GenericConfig.Authentication.APIAudiences,
|
||||
Informers: c.ExtraConfig.VersionedInformers,
|
||||
}
|
||||
legacyRESTStorage, apiGroupInfo, err := legacyRESTStorageProvider.NewLegacyRESTStorage(c.ExtraConfig.APIResourceConfigSource, restOptionsGetter)
|
||||
if err != nil {
|
||||
|
@@ -154,6 +154,7 @@ func TestLegacyRestStorageStrategies(t *testing.T) {
|
||||
ServiceIPRange: apiserverCfg.ExtraConfig.ServiceIPRange,
|
||||
ServiceNodePortRange: apiserverCfg.ExtraConfig.ServiceNodePortRange,
|
||||
LoopbackClientConfig: apiserverCfg.GenericConfig.LoopbackClientConfig,
|
||||
Informers: apiserverCfg.ExtraConfig.VersionedInformers,
|
||||
}
|
||||
|
||||
_, apiGroupInfo, err := storageProvider.NewLegacyRESTStorage(serverstorage.NewResourceConfig(), apiserverCfg.GenericConfig.RESTOptionsGetter)
|
||||
|
@@ -554,6 +554,13 @@ const (
|
||||
// Enables the MultiCIDR Range allocator.
|
||||
MultiCIDRRangeAllocator featuregate.Feature = "MultiCIDRRangeAllocator"
|
||||
|
||||
// owner: @aojea
|
||||
// kep: https://kep.k8s.io/1880
|
||||
// alpha: v1.27
|
||||
//
|
||||
// Enables the dynamic configuration of Service IP ranges
|
||||
MultiCIDRServiceAllocator featuregate.Feature = "MultiCIDRServiceAllocator"
|
||||
|
||||
// owner: @rikatz
|
||||
// kep: https://kep.k8s.io/2943
|
||||
// alpha: v1.24
|
||||
@@ -1041,6 +1048,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
MultiCIDRRangeAllocator: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
MultiCIDRServiceAllocator: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
NetworkPolicyStatus: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
NewVolumeManagerReconstruction: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
167
pkg/generated/openapi/zz_generated.openapi.go
generated
167
pkg/generated/openapi/zz_generated.openapi.go
generated
@@ -734,6 +734,10 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
|
||||
"k8s.io/api/networking/v1alpha1.ClusterCIDR": schema_k8sio_api_networking_v1alpha1_ClusterCIDR(ref),
|
||||
"k8s.io/api/networking/v1alpha1.ClusterCIDRList": schema_k8sio_api_networking_v1alpha1_ClusterCIDRList(ref),
|
||||
"k8s.io/api/networking/v1alpha1.ClusterCIDRSpec": schema_k8sio_api_networking_v1alpha1_ClusterCIDRSpec(ref),
|
||||
"k8s.io/api/networking/v1alpha1.IPAddress": schema_k8sio_api_networking_v1alpha1_IPAddress(ref),
|
||||
"k8s.io/api/networking/v1alpha1.IPAddressList": schema_k8sio_api_networking_v1alpha1_IPAddressList(ref),
|
||||
"k8s.io/api/networking/v1alpha1.IPAddressSpec": schema_k8sio_api_networking_v1alpha1_IPAddressSpec(ref),
|
||||
"k8s.io/api/networking/v1alpha1.ParentReference": schema_k8sio_api_networking_v1alpha1_ParentReference(ref),
|
||||
"k8s.io/api/networking/v1beta1.HTTPIngressPath": schema_k8sio_api_networking_v1beta1_HTTPIngressPath(ref),
|
||||
"k8s.io/api/networking/v1beta1.HTTPIngressRuleValue": schema_k8sio_api_networking_v1beta1_HTTPIngressRuleValue(ref),
|
||||
"k8s.io/api/networking/v1beta1.Ingress": schema_k8sio_api_networking_v1beta1_Ingress(ref),
|
||||
@@ -36714,6 +36718,169 @@ func schema_k8sio_api_networking_v1alpha1_ClusterCIDRSpec(ref common.ReferenceCa
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_networking_v1alpha1_IPAddress(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"apiVersion": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"metadata": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
|
||||
},
|
||||
},
|
||||
"spec": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "spec is the desired state of the IPAddress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/api/networking/v1alpha1.IPAddressSpec"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"k8s.io/api/networking/v1alpha1.IPAddressSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_networking_v1alpha1_IPAddressList(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "IPAddressList contains a list of IPAddress.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"apiVersion": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"metadata": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
|
||||
},
|
||||
},
|
||||
"items": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "items is the list of IPAddresses.",
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/api/networking/v1alpha1.IPAddress"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"items"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"k8s.io/api/networking/v1alpha1.IPAddress", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_networking_v1alpha1_IPAddressSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "IPAddressSpec describe the attributes in an IP Address.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"parentRef": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "ParentRef references the resource that an IPAddress is attached to. An IPAddress must reference a parent object.",
|
||||
Ref: ref("k8s.io/api/networking/v1alpha1.ParentReference"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"k8s.io/api/networking/v1alpha1.ParentReference"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_networking_v1alpha1_ParentReference(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "ParentReference describes a reference to a parent object.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"group": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Group is the group of the object being referenced.",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"resource": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Resource is the resource of the object being referenced.",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"namespace": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Namespace is the namespace of the object being referenced.",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"name": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Name is the name of the object being referenced.",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"uid": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "UID is the uid of the object being referenced.",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_networking_v1beta1_HTTPIngressPath(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
|
@@ -68,9 +68,10 @@ func NewStorageFactoryConfig() *StorageFactoryConfig {
|
||||
//
|
||||
// TODO (https://github.com/kubernetes/kubernetes/issues/108451): remove the override in 1.25.
|
||||
// apisstorage.Resource("csistoragecapacities").WithVersion("v1beta1"),
|
||||
networking.Resource("clustercidrs").WithVersion("v1alpha1"),
|
||||
admissionregistration.Resource("validatingadmissionpolicies").WithVersion("v1alpha1"),
|
||||
admissionregistration.Resource("validatingadmissionpolicybindings").WithVersion("v1alpha1"),
|
||||
networking.Resource("clustercidrs").WithVersion("v1alpha1"),
|
||||
networking.Resource("ipaddresses").WithVersion("v1alpha1"),
|
||||
}
|
||||
|
||||
return &StorageFactoryConfig{
|
||||
|
@@ -645,6 +645,14 @@ func AddHandlers(h printers.PrintHandler) {
|
||||
}
|
||||
_ = h.TableHandler(podSchedulingCtxColumnDefinitions, printPodSchedulingContext)
|
||||
_ = h.TableHandler(podSchedulingCtxColumnDefinitions, printPodSchedulingContextList)
|
||||
|
||||
ipAddressColumnDefinitions := []metav1.TableColumnDefinition{
|
||||
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
|
||||
{Name: "ParentRef", Type: "string", Description: networkingv1alpha1.IPAddressSpec{}.SwaggerDoc()["parentRef"]},
|
||||
}
|
||||
|
||||
h.TableHandler(ipAddressColumnDefinitions, printIPAddress)
|
||||
h.TableHandler(ipAddressColumnDefinitions, printIPAddressList)
|
||||
}
|
||||
|
||||
// Pass ports=nil for all ports.
|
||||
@@ -2779,6 +2787,41 @@ func printClusterCIDRList(list *networking.ClusterCIDRList, options printers.Gen
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
func printIPAddress(obj *networking.IPAddress, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
row := metav1.TableRow{
|
||||
Object: runtime.RawExtension{Object: obj},
|
||||
}
|
||||
|
||||
parentRefName := "<none>"
|
||||
if obj.Spec.ParentRef != nil {
|
||||
gr := schema.GroupResource{
|
||||
Group: obj.Spec.ParentRef.Group,
|
||||
Resource: obj.Spec.ParentRef.Resource,
|
||||
}
|
||||
parentRefName = strings.ToLower(gr.String())
|
||||
if obj.Spec.ParentRef.Namespace != "" {
|
||||
parentRefName += "/" + obj.Spec.ParentRef.Namespace
|
||||
}
|
||||
parentRefName += "/" + obj.Spec.ParentRef.Name
|
||||
}
|
||||
age := translateTimestampSince(obj.CreationTimestamp)
|
||||
row.Cells = append(row.Cells, obj.Name, parentRefName, age)
|
||||
|
||||
return []metav1.TableRow{row}, nil
|
||||
}
|
||||
|
||||
func printIPAddressList(list *networking.IPAddressList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
rows := make([]metav1.TableRow, 0, len(list.Items))
|
||||
for i := range list.Items {
|
||||
r, err := printIPAddress(&list.Items[i], options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, r...)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
func printScale(obj *autoscaling.Scale, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
row := metav1.TableRow{
|
||||
Object: runtime.RawExtension{Object: obj},
|
||||
|
@@ -6482,3 +6482,83 @@ func TestPrintClusterCIDRList(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrintIPAddress(t *testing.T) {
|
||||
ip := networking.IPAddress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "192.168.2.2",
|
||||
CreationTimestamp: metav1.Time{Time: time.Now().AddDate(-10, 0, 0)},
|
||||
},
|
||||
Spec: networking.IPAddressSpec{
|
||||
ParentRef: &networking.ParentReference{
|
||||
Group: "mygroup",
|
||||
Resource: "myresource",
|
||||
Namespace: "mynamespace",
|
||||
Name: "myname",
|
||||
},
|
||||
},
|
||||
}
|
||||
// Columns: Name, ParentRef, Age
|
||||
expected := []metav1.TableRow{{Cells: []interface{}{"192.168.2.2", "myresource.mygroup/mynamespace/myname", "10y"}}}
|
||||
|
||||
rows, err := printIPAddress(&ip, printers.GenerateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error generating table rows for IPAddress: %#v", err)
|
||||
}
|
||||
rows[0].Object.Object = nil
|
||||
if !reflect.DeepEqual(expected, rows) {
|
||||
t.Errorf("mismatch: %s", diff.ObjectReflectDiff(expected, rows))
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrintIPAddressList(t *testing.T) {
|
||||
ipList := networking.IPAddressList{
|
||||
Items: []networking.IPAddress{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "192.168.2.2",
|
||||
CreationTimestamp: metav1.Time{Time: time.Now().AddDate(-10, 0, 0)},
|
||||
},
|
||||
Spec: networking.IPAddressSpec{
|
||||
ParentRef: &networking.ParentReference{
|
||||
Group: "mygroup",
|
||||
Resource: "myresource",
|
||||
Namespace: "mynamespace",
|
||||
Name: "myname",
|
||||
},
|
||||
},
|
||||
}, {
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "2001:db8::2",
|
||||
CreationTimestamp: metav1.Time{Time: time.Now().AddDate(-5, 0, 0)},
|
||||
},
|
||||
Spec: networking.IPAddressSpec{
|
||||
ParentRef: &networking.ParentReference{
|
||||
Group: "mygroup2",
|
||||
Resource: "myresource2",
|
||||
Namespace: "mynamespace2",
|
||||
Name: "myname2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
// Columns: Name, ParentRef, Age
|
||||
expected := []metav1.TableRow{
|
||||
{Cells: []interface{}{"192.168.2.2", "myresource.mygroup/mynamespace/myname", "10y"}},
|
||||
{Cells: []interface{}{"2001:db8::2", "myresource2.mygroup2/mynamespace2/myname2", "5y1d"}},
|
||||
}
|
||||
|
||||
rows, err := printIPAddressList(&ipList, printers.GenerateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error generating table rows for IPAddress: %#v", err)
|
||||
}
|
||||
for i := range rows {
|
||||
rows[i].Object.Object = nil
|
||||
|
||||
}
|
||||
if !reflect.DeepEqual(expected, rows) {
|
||||
t.Errorf("mismatch: %s", diff.ObjectReflectDiff(expected, rows))
|
||||
}
|
||||
|
||||
}
|
||||
|
@@ -36,11 +36,15 @@ import (
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
serverstorage "k8s.io/apiserver/pkg/server/storage"
|
||||
"k8s.io/apiserver/pkg/storage/etcd3"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
networkingv1alpha1client "k8s.io/client-go/kubernetes/typed/networking/v1alpha1"
|
||||
policyclient "k8s.io/client-go/kubernetes/typed/policy/v1"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/cluster/ports"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
|
||||
"k8s.io/kubernetes/pkg/registry/core/componentstatus"
|
||||
configmapstore "k8s.io/kubernetes/pkg/registry/core/configmap/storage"
|
||||
@@ -90,6 +94,7 @@ type LegacyRESTStorageProvider struct {
|
||||
APIAudiences authenticator.Audiences
|
||||
|
||||
LoopbackClientConfig *restclient.Config
|
||||
Informers informers.SharedInformerFactory
|
||||
}
|
||||
|
||||
// LegacyRESTStorage returns stateful information about particular instances of REST storage to
|
||||
@@ -196,41 +201,64 @@ func (c LegacyRESTStorageProvider) NewLegacyRESTStorage(apiResourceConfigSource
|
||||
if err != nil {
|
||||
return LegacyRESTStorage{}, genericapiserver.APIGroupInfo{}, err
|
||||
}
|
||||
var serviceClusterIPAllocator, secondaryServiceClusterIPAllocator ipallocator.Interface
|
||||
|
||||
serviceClusterIPAllocator, err := ipallocator.New(&serviceClusterIPRange, func(max int, rangeSpec string, offset int) (allocator.Interface, error) {
|
||||
var mem allocator.Snapshottable
|
||||
mem = allocator.NewAllocationMapWithOffset(max, rangeSpec, offset)
|
||||
// TODO etcdallocator package to return a storage interface via the storageFactory
|
||||
etcd, err := serviceallocator.NewEtcd(mem, "/ranges/serviceips", serviceStorageConfig.ForResource(api.Resource("serviceipallocations")))
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRServiceAllocator) {
|
||||
serviceClusterIPAllocator, err = ipallocator.New(&serviceClusterIPRange, func(max int, rangeSpec string, offset int) (allocator.Interface, error) {
|
||||
var mem allocator.Snapshottable
|
||||
mem = allocator.NewAllocationMapWithOffset(max, rangeSpec, offset)
|
||||
// TODO etcdallocator package to return a storage interface via the storageFactory
|
||||
etcd, err := serviceallocator.NewEtcd(mem, "/ranges/serviceips", serviceStorageConfig.ForResource(api.Resource("serviceipallocations")))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serviceClusterIPRegistry = etcd
|
||||
return etcd, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return LegacyRESTStorage{}, genericapiserver.APIGroupInfo{}, fmt.Errorf("cannot create cluster IP allocator: %v", err)
|
||||
}
|
||||
} else {
|
||||
networkingv1alphaClient, err := networkingv1alpha1client.NewForConfig(c.LoopbackClientConfig)
|
||||
if err != nil {
|
||||
return LegacyRESTStorage{}, genericapiserver.APIGroupInfo{}, err
|
||||
}
|
||||
serviceClusterIPAllocator, err = ipallocator.NewIPAllocator(&serviceClusterIPRange, networkingv1alphaClient, c.Informers.Networking().V1alpha1().IPAddresses())
|
||||
if err != nil {
|
||||
return LegacyRESTStorage{}, genericapiserver.APIGroupInfo{}, fmt.Errorf("cannot create cluster IP allocator: %v", err)
|
||||
}
|
||||
serviceClusterIPRegistry = etcd
|
||||
return etcd, nil
|
||||
})
|
||||
if err != nil {
|
||||
return LegacyRESTStorage{}, genericapiserver.APIGroupInfo{}, fmt.Errorf("cannot create cluster IP allocator: %v", err)
|
||||
}
|
||||
|
||||
serviceClusterIPAllocator.EnableMetrics()
|
||||
restStorage.ServiceClusterIPAllocator = serviceClusterIPRegistry
|
||||
|
||||
// allocator for secondary service ip range
|
||||
var secondaryServiceClusterIPAllocator ipallocator.Interface
|
||||
if c.SecondaryServiceIPRange.IP != nil {
|
||||
var secondaryServiceClusterIPRegistry rangeallocation.RangeRegistry
|
||||
secondaryServiceClusterIPAllocator, err = ipallocator.New(&c.SecondaryServiceIPRange, func(max int, rangeSpec string, offset int) (allocator.Interface, error) {
|
||||
var mem allocator.Snapshottable
|
||||
mem = allocator.NewAllocationMapWithOffset(max, rangeSpec, offset)
|
||||
// TODO etcdallocator package to return a storage interface via the storageFactory
|
||||
etcd, err := serviceallocator.NewEtcd(mem, "/ranges/secondaryserviceips", serviceStorageConfig.ForResource(api.Resource("serviceipallocations")))
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRServiceAllocator) {
|
||||
secondaryServiceClusterIPAllocator, err = ipallocator.New(&c.SecondaryServiceIPRange, func(max int, rangeSpec string, offset int) (allocator.Interface, error) {
|
||||
var mem allocator.Snapshottable
|
||||
mem = allocator.NewAllocationMapWithOffset(max, rangeSpec, offset)
|
||||
// TODO etcdallocator package to return a storage interface via the storageFactory
|
||||
etcd, err := serviceallocator.NewEtcd(mem, "/ranges/secondaryserviceips", serviceStorageConfig.ForResource(api.Resource("serviceipallocations")))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
secondaryServiceClusterIPRegistry = etcd
|
||||
return etcd, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return LegacyRESTStorage{}, genericapiserver.APIGroupInfo{}, fmt.Errorf("cannot create cluster secondary IP allocator: %v", err)
|
||||
}
|
||||
} else {
|
||||
networkingv1alphaClient, err := networkingv1alpha1client.NewForConfig(c.LoopbackClientConfig)
|
||||
if err != nil {
|
||||
return LegacyRESTStorage{}, genericapiserver.APIGroupInfo{}, err
|
||||
}
|
||||
secondaryServiceClusterIPAllocator, err = ipallocator.NewIPAllocator(&c.SecondaryServiceIPRange, networkingv1alphaClient, c.Informers.Networking().V1alpha1().IPAddresses())
|
||||
if err != nil {
|
||||
return LegacyRESTStorage{}, genericapiserver.APIGroupInfo{}, fmt.Errorf("cannot create cluster secondary IP allocator: %v", err)
|
||||
}
|
||||
secondaryServiceClusterIPRegistry = etcd
|
||||
return etcd, nil
|
||||
})
|
||||
if err != nil {
|
||||
return LegacyRESTStorage{}, genericapiserver.APIGroupInfo{}, fmt.Errorf("cannot create cluster secondary IP allocator: %v", err)
|
||||
}
|
||||
secondaryServiceClusterIPAllocator.EnableMetrics()
|
||||
restStorage.SecondaryServiceClusterIPAllocator = secondaryServiceClusterIPRegistry
|
||||
|
590
pkg/registry/core/service/ipallocator/controller/repairip.go
Normal file
590
pkg/registry/core/service/ipallocator/controller/repairip.go
Normal file
@@ -0,0 +1,590 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
networkinginformers "k8s.io/client-go/informers/networking/v1alpha1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
networkinglisters "k8s.io/client-go/listers/networking/v1alpha1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
|
||||
"k8s.io/utils/clock"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxRetries is the number of times a service will be retried before it is dropped out of the queue.
|
||||
// With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the
|
||||
// sequence of delays between successive queuings of a service.
|
||||
//
|
||||
// 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s
|
||||
maxRetries = 15
|
||||
workers = 5
|
||||
)
|
||||
|
||||
// Repair is a controller loop that examines all service ClusterIP allocations and logs any errors,
|
||||
// and then creates the accurate list of IPAddresses objects with all allocated ClusterIPs.
|
||||
//
|
||||
// Handles:
|
||||
// * Duplicate ClusterIP assignments caused by operator action or undetected race conditions
|
||||
// * Allocations to services that were not actually created due to a crash or powerloss
|
||||
// * Migrates old versions of Kubernetes services into the new ipallocator automatically
|
||||
// creating the corresponding IPAddress objects
|
||||
// * IPAddress objects with wrong references or labels
|
||||
//
|
||||
// Logs about:
|
||||
// * ClusterIPs that do not match the currently configured range
|
||||
//
|
||||
// There is a one-to-one relation between Service ClusterIPs and IPAddresses.
|
||||
// The bidirectional relation is achieved using the following fields:
|
||||
// Service.Spec.Cluster == IPAddress.Name AND IPAddress.ParentRef == Service
|
||||
//
|
||||
// The controller use two reconcile loops, one for Services and other for IPAddress.
|
||||
// The Service reconcile loop verifies the bidirectional relation exists and is correct.
|
||||
// 1. Service_X [ClusterIP_X] <------> IPAddress_X [Ref:Service_X] ok
|
||||
// 2. Service_Y [ClusterIP_Y] <------> IPAddress_Y [Ref:GatewayA] !ok, wrong reference
|
||||
// 3. Service_Z [ClusterIP_Z] <------> !ok, missing IPAddress
|
||||
// 4. Service_A [ClusterIP_A] <------> IPAddress_A [Ref:Service_B] !ok, duplicate IPAddress
|
||||
// Service_B [ClusterIP_A] <------> only one service can verify the relation
|
||||
// The IPAddress reconcile loop checks there are no orphan IPAddresses, the rest of the
|
||||
// cases are covered by the Services loop
|
||||
// 1. <------> IPAddress_Z [Ref:Service_C] !ok, orphan IPAddress
|
||||
|
||||
type RepairIPAddress struct {
|
||||
client kubernetes.Interface
|
||||
interval time.Duration
|
||||
|
||||
networkByFamily map[netutils.IPFamily]*net.IPNet // networks we operate on, by their family
|
||||
|
||||
serviceLister corelisters.ServiceLister
|
||||
servicesSynced cache.InformerSynced
|
||||
|
||||
ipAddressLister networkinglisters.IPAddressLister
|
||||
ipAddressSynced cache.InformerSynced
|
||||
|
||||
svcQueue workqueue.RateLimitingInterface
|
||||
ipQueue workqueue.RateLimitingInterface
|
||||
workerLoopPeriod time.Duration
|
||||
|
||||
broadcaster events.EventBroadcaster
|
||||
recorder events.EventRecorder
|
||||
clock clock.Clock
|
||||
}
|
||||
|
||||
// NewRepair creates a controller that periodically ensures that all clusterIPs are uniquely allocated across the cluster
|
||||
// and generates informational warnings for a cluster that is not in sync.
|
||||
func NewRepairIPAddress(interval time.Duration,
|
||||
client kubernetes.Interface,
|
||||
network *net.IPNet,
|
||||
secondaryNetwork *net.IPNet,
|
||||
serviceInformer coreinformers.ServiceInformer,
|
||||
ipAddressInformer networkinginformers.IPAddressInformer) *RepairIPAddress {
|
||||
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
|
||||
recorder := eventBroadcaster.NewRecorder(legacyscheme.Scheme, "ipallocator-repair-controller")
|
||||
|
||||
networkByFamily := make(map[netutils.IPFamily]*net.IPNet)
|
||||
primary := netutils.IPFamilyOfCIDR(network)
|
||||
networkByFamily[primary] = network
|
||||
if secondaryNetwork != nil {
|
||||
secondary := netutils.IPFamilyOfCIDR(secondaryNetwork)
|
||||
networkByFamily[secondary] = secondaryNetwork
|
||||
}
|
||||
|
||||
r := &RepairIPAddress{
|
||||
interval: interval,
|
||||
client: client,
|
||||
networkByFamily: networkByFamily,
|
||||
serviceLister: serviceInformer.Lister(),
|
||||
servicesSynced: serviceInformer.Informer().HasSynced,
|
||||
ipAddressLister: ipAddressInformer.Lister(),
|
||||
ipAddressSynced: ipAddressInformer.Informer().HasSynced,
|
||||
svcQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "services"),
|
||||
ipQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ipaddresses"),
|
||||
workerLoopPeriod: time.Second,
|
||||
broadcaster: eventBroadcaster,
|
||||
recorder: recorder,
|
||||
clock: clock.RealClock{},
|
||||
}
|
||||
|
||||
serviceInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(obj)
|
||||
if err == nil {
|
||||
r.svcQueue.Add(key)
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(old interface{}, new interface{}) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(new)
|
||||
if err == nil {
|
||||
r.svcQueue.Add(key)
|
||||
}
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
// IndexerInformer uses a delta queue, therefore for deletes we have to use this
|
||||
// key function.
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err == nil {
|
||||
r.svcQueue.Add(key)
|
||||
}
|
||||
},
|
||||
}, interval)
|
||||
|
||||
ipAddressInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(obj)
|
||||
if err == nil {
|
||||
r.ipQueue.Add(key)
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(old interface{}, new interface{}) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(new)
|
||||
if err == nil {
|
||||
r.ipQueue.Add(key)
|
||||
}
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
// IndexerInformer uses a delta queue, therefore for deletes we have to use this
|
||||
// key function.
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err == nil {
|
||||
r.ipQueue.Add(key)
|
||||
}
|
||||
},
|
||||
}, interval)
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// RunUntil starts the controller until the provided ch is closed.
|
||||
func (r *RepairIPAddress) RunUntil(onFirstSuccess func(), stopCh chan struct{}) {
|
||||
defer r.ipQueue.ShutDown()
|
||||
defer r.svcQueue.ShutDown()
|
||||
r.broadcaster.StartRecordingToSink(stopCh)
|
||||
defer r.broadcaster.Shutdown()
|
||||
|
||||
klog.Info("Starting ipallocator-repair-controller")
|
||||
defer klog.Info("Shutting down ipallocator-repair-controller")
|
||||
|
||||
if !cache.WaitForNamedCacheSync("ipallocator-repair-controller", stopCh, r.ipAddressSynced, r.servicesSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
// First sync goes through all the Services and IPAddresses in the cache,
|
||||
// once synced, it signals the main loop and works using the handlers, since
|
||||
// it's less expensive and more optimal.
|
||||
if err := r.runOnce(); err != nil {
|
||||
runtime.HandleError(err)
|
||||
return
|
||||
}
|
||||
onFirstSuccess()
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(r.ipWorker, r.workerLoopPeriod, stopCh)
|
||||
go wait.Until(r.svcWorker, r.workerLoopPeriod, stopCh)
|
||||
}
|
||||
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
// runOnce verifies the state of the ClusterIP allocations and returns an error if an unrecoverable problem occurs.
|
||||
func (r *RepairIPAddress) runOnce() error {
|
||||
return retry.RetryOnConflict(retry.DefaultBackoff, r.doRunOnce)
|
||||
}
|
||||
|
||||
// doRunOnce verifies the state of the ClusterIP allocations and returns an error if an unrecoverable problem occurs.
|
||||
func (r *RepairIPAddress) doRunOnce() error {
|
||||
services, err := r.serviceLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to refresh the service IP block: %v", err)
|
||||
}
|
||||
|
||||
// Check every Service's ClusterIP, and rebuild the state as we think it should be.
|
||||
for _, svc := range services {
|
||||
key, err := cache.MetaNamespaceKeyFunc(svc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = r.syncService(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We have checked that every Service has its corresponding IP.
|
||||
// Check that there is no IP created by the allocator without
|
||||
// a Service associated.
|
||||
ipLabelSelector := labels.Set(map[string]string{
|
||||
networkingv1alpha1.LabelManagedBy: ipallocator.ControllerName,
|
||||
}).AsSelectorPreValidated()
|
||||
ipAddresses, err := r.ipAddressLister.List(ipLabelSelector)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to refresh the IPAddress block: %v", err)
|
||||
}
|
||||
// Check every IPAddress matches the corresponding Service, and rebuild the state as we think it should be.
|
||||
for _, ipAddress := range ipAddresses {
|
||||
key, err := cache.MetaNamespaceKeyFunc(ipAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = r.syncIPAddress(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RepairIPAddress) svcWorker() {
|
||||
for r.processNextWorkSvc() {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RepairIPAddress) processNextWorkSvc() bool {
|
||||
eKey, quit := r.svcQueue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer r.svcQueue.Done(eKey)
|
||||
|
||||
err := r.syncService(eKey.(string))
|
||||
r.handleSvcErr(err, eKey)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *RepairIPAddress) handleSvcErr(err error, key interface{}) {
|
||||
if err == nil {
|
||||
r.svcQueue.Forget(key)
|
||||
return
|
||||
}
|
||||
|
||||
if r.svcQueue.NumRequeues(key) < maxRetries {
|
||||
klog.V(2).InfoS("Error syncing Service, retrying", "service", key, "err", err)
|
||||
r.svcQueue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
|
||||
klog.Warningf("Dropping Service %q out of the queue: %v", key, err)
|
||||
r.svcQueue.Forget(key)
|
||||
runtime.HandleError(err)
|
||||
}
|
||||
|
||||
// syncServices reconcile the Service ClusterIPs to verify that each one has the corresponding IPAddress object associated
|
||||
func (r *RepairIPAddress) syncService(key string) error {
|
||||
var syncError error
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
svc, err := r.serviceLister.Services(namespace).Get(name)
|
||||
if err != nil {
|
||||
// nothing to do
|
||||
return nil
|
||||
}
|
||||
if !helper.IsServiceIPSet(svc) {
|
||||
// didn't need a ClusterIP
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, clusterIP := range svc.Spec.ClusterIPs {
|
||||
ip := netutils.ParseIPSloppy(clusterIP)
|
||||
if ip == nil {
|
||||
// ClusterIP is corrupt, ClusterIPs are already validated, but double checking here
|
||||
// in case there are some inconsistencies with the parsers
|
||||
r.recorder.Eventf(svc, nil, v1.EventTypeWarning, "ClusterIPNotValid", "ClusterIPValidation", "Cluster IP %s is not a valid IP; please recreate Service", ip)
|
||||
runtime.HandleError(fmt.Errorf("the ClusterIP %s for Service %s/%s is not a valid IP; please recreate Service", ip, svc.Namespace, svc.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
family := netutils.IPFamilyOf(ip)
|
||||
v1Family := getFamilyByIP(ip)
|
||||
network, ok := r.networkByFamily[family]
|
||||
if !ok {
|
||||
// this service is using an IPFamily no longer configured on cluster
|
||||
r.recorder.Eventf(svc, nil, v1.EventTypeWarning, "ClusterIPNotValid", "ClusterIPValidation", "Cluster IP %s(%s) is of ip family that is no longer configured on cluster; please recreate Service", ip, v1Family)
|
||||
runtime.HandleError(fmt.Errorf("the ClusterIP [%v]: %s for Service %s/%s is of ip family that is no longer configured on cluster; please recreate Service", v1Family, ip, svc.Namespace, svc.Name))
|
||||
continue
|
||||
}
|
||||
if !network.Contains(ip) {
|
||||
// ClusterIP is out of range
|
||||
r.recorder.Eventf(svc, nil, v1.EventTypeWarning, "ClusterIPOutOfRange", "ClusterIPAllocation", "Cluster IP [%v]: %s is not within the configured Service CIDR %s; please recreate service", v1Family, ip, network.String())
|
||||
runtime.HandleError(fmt.Errorf("the ClusterIP [%v]: %s for Service %s/%s is not within the service CIDR %s; please recreate", v1Family, ip, svc.Namespace, svc.Name, network.String()))
|
||||
continue
|
||||
}
|
||||
|
||||
// Get the IPAddress object associated to the ClusterIP
|
||||
ipAddress, err := r.ipAddressLister.Get(ip.String())
|
||||
if apierrors.IsNotFound(err) {
|
||||
// ClusterIP doesn't seem to be allocated, create it.
|
||||
r.recorder.Eventf(svc, nil, v1.EventTypeWarning, "ClusterIPNotAllocated", "ClusterIPAllocation", "Cluster IP [%v]: %s is not allocated; repairing", v1Family, ip)
|
||||
runtime.HandleError(fmt.Errorf("the ClusterIP [%v]: %s for Service %s/%s is not allocated; repairing", v1Family, ip, svc.Namespace, svc.Name))
|
||||
_, err := r.client.NetworkingV1alpha1().IPAddresses().Create(context.Background(), newIPAddress(ip.String(), svc), metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
r.recorder.Eventf(svc, nil, v1.EventTypeWarning, "UnknownError", "ClusterIPAllocation", "Unable to allocate ClusterIP [%v]: %s due to an unknown error", v1Family, ip)
|
||||
return fmt.Errorf("unable to allocate ClusterIP [%v]: %s for Service %s/%s due to an unknown error, will retry later: %v", v1Family, ip, svc.Namespace, svc.Name, err)
|
||||
}
|
||||
|
||||
// IPAddress that belongs to a Service must reference a Service
|
||||
if ipAddress.Spec.ParentRef.Group != "" ||
|
||||
ipAddress.Spec.ParentRef.Resource != "services" {
|
||||
r.recorder.Eventf(svc, nil, v1.EventTypeWarning, "ClusterIPNotAllocated", "ClusterIPAllocation", "the ClusterIP [%v]: %s for Service %s/%s has a wrong reference; repairing", v1Family, ip, svc.Namespace, svc.Name)
|
||||
if err := r.recreateIPAddress(ipAddress.Name, svc); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// IPAddress that belongs to a Service must reference the current Service
|
||||
if ipAddress.Spec.ParentRef.Namespace != svc.Namespace ||
|
||||
ipAddress.Spec.ParentRef.Name != svc.Name {
|
||||
// verify that there are no two Services with the same IP, otherwise
|
||||
// it will keep deleting and recreating the same IPAddress changing the reference
|
||||
refService, err := r.serviceLister.Services(ipAddress.Spec.ParentRef.Namespace).Get(ipAddress.Spec.ParentRef.Name)
|
||||
if err != nil {
|
||||
r.recorder.Eventf(svc, nil, v1.EventTypeWarning, "ClusterIPNotAllocated", "ClusterIPAllocation", "the ClusterIP [%v]: %s for Service %s/%s has a wrong reference; repairing", v1Family, ip, svc.Namespace, svc.Name)
|
||||
if err := r.recreateIPAddress(ipAddress.Name, svc); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
// the IPAddress is duplicate but current Service is not the referenced, it has to be recreated
|
||||
for _, clusterIP := range refService.Spec.ClusterIPs {
|
||||
if ipAddress.Name == clusterIP {
|
||||
r.recorder.Eventf(svc, nil, v1.EventTypeWarning, "ClusterIPAlreadyAllocated", "ClusterIPAllocation", "Cluster IP [%v]:%s was assigned to multiple services; please recreate service", family, ip)
|
||||
runtime.HandleError(fmt.Errorf("the cluster IP [%v]:%s for service %s/%s was assigned to other services %s/%s; please recreate", family, ip, svc.Namespace, svc.Name, refService.Namespace, refService.Name))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IPAddress must have the corresponding labels assigned by the allocator
|
||||
if !verifyIPAddressLabels(ipAddress) {
|
||||
if err := r.recreateIPAddress(ipAddress.Name, svc); err != nil {
|
||||
return err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
}
|
||||
return syncError
|
||||
}
|
||||
|
||||
func (r *RepairIPAddress) recreateIPAddress(name string, svc *v1.Service) error {
|
||||
err := r.client.NetworkingV1alpha1().IPAddresses().Delete(context.Background(), name, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
_, err = r.client.NetworkingV1alpha1().IPAddresses().Create(context.Background(), newIPAddress(name, svc), metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RepairIPAddress) ipWorker() {
|
||||
for r.processNextWorkIp() {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RepairIPAddress) processNextWorkIp() bool {
|
||||
eKey, quit := r.ipQueue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer r.ipQueue.Done(eKey)
|
||||
|
||||
err := r.syncIPAddress(eKey.(string))
|
||||
r.handleIpErr(err, eKey)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *RepairIPAddress) handleIpErr(err error, key interface{}) {
|
||||
if err == nil {
|
||||
r.ipQueue.Forget(key)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ipQueue.NumRequeues(key) < maxRetries {
|
||||
klog.V(2).InfoS("Error syncing Service, retrying", "service", key, "err", err)
|
||||
r.ipQueue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
|
||||
klog.Warningf("Dropping Service %q out of the queue: %v", key, err)
|
||||
r.ipQueue.Forget(key)
|
||||
runtime.HandleError(err)
|
||||
}
|
||||
|
||||
// syncIPAddress verify that the IPAddress that are owned by the ipallocator controller reference an existing Service
|
||||
// to avoid leaking IPAddresses. IPAddresses that are owned by other controllers are not processed to avoid hotloops.
|
||||
// IPAddress that reference Services and are part of the ClusterIP are validated in the syncService loop.
|
||||
func (r *RepairIPAddress) syncIPAddress(key string) error {
|
||||
ipAddress, err := r.ipAddressLister.Get(key)
|
||||
if err != nil {
|
||||
// nothing to do
|
||||
return nil
|
||||
}
|
||||
|
||||
// not mananged by this controller
|
||||
if !managedByController(ipAddress) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// does not reference a Service but created by the service allocator, something else have changed it, delete it
|
||||
if ipAddress.Spec.ParentRef.Group != "" ||
|
||||
ipAddress.Spec.ParentRef.Resource != "services" {
|
||||
runtime.HandleError(fmt.Errorf("IPAddress %s appears to have been modified, not referencing a Service %v: cleaning up", ipAddress.Name, ipAddress.Spec.ParentRef))
|
||||
r.recorder.Eventf(ipAddress, nil, v1.EventTypeWarning, "IPAddressNotAllocated", "IPAddressAllocation", "IPAddress %s appears to have been modified, not referencing a Service %v: cleaning up", ipAddress.Name, ipAddress.Spec.ParentRef)
|
||||
err := r.client.NetworkingV1alpha1().IPAddresses().Delete(context.Background(), ipAddress.Name, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
svc, err := r.serviceLister.Services(ipAddress.Spec.ParentRef.Namespace).Get(ipAddress.Spec.ParentRef.Name)
|
||||
if apierrors.IsNotFound(err) {
|
||||
// cleaning all IPAddress without an owner reference IF the time since it was created is greater than 60 seconds (default timeout value on the kube-apiserver)
|
||||
// This is required because during the Service creation there is a time that the IPAddress object exists but the Service is still being created
|
||||
// Assume that CreationTimestamp exists.
|
||||
ipLifetime := r.clock.Now().Sub(ipAddress.CreationTimestamp.Time)
|
||||
gracePeriod := 60 * time.Second
|
||||
if ipLifetime > gracePeriod {
|
||||
runtime.HandleError(fmt.Errorf("IPAddress %s appears to have leaked: cleaning up", ipAddress.Name))
|
||||
r.recorder.Eventf(ipAddress, nil, v1.EventTypeWarning, "IPAddressNotAllocated", "IPAddressAllocation", "IPAddress: %s for Service %s/%s appears to have leaked: cleaning up", ipAddress.Name, ipAddress.Spec.ParentRef.Namespace, ipAddress.Spec.ParentRef.Name)
|
||||
err := r.client.NetworkingV1alpha1().IPAddresses().Delete(context.Background(), ipAddress.Name, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// requeue after the grace period
|
||||
r.ipQueue.AddAfter(key, gracePeriod-ipLifetime)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
runtime.HandleError(fmt.Errorf("unable to get parent Service for IPAddress %s due to an unknown error: %v", ipAddress, err))
|
||||
r.recorder.Eventf(ipAddress, nil, v1.EventTypeWarning, "UnknownError", "IPAddressAllocation", "Unable to get parent Service for IPAddress %s due to an unknown error", ipAddress)
|
||||
return err
|
||||
}
|
||||
// The service exists, we have checked in previous loop that all Service to IPAddress are correct
|
||||
// but we also have to check the reverse, that the IPAddress to Service relation is correct
|
||||
for _, clusterIP := range svc.Spec.ClusterIPs {
|
||||
if ipAddress.Name == clusterIP {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
runtime.HandleError(fmt.Errorf("the IPAddress: %s for Service %s/%s has a wrong reference %#v; cleaning up", ipAddress.Name, svc.Name, svc.Namespace, ipAddress.Spec.ParentRef))
|
||||
r.recorder.Eventf(ipAddress, nil, v1.EventTypeWarning, "IPAddressWrongReference", "IPAddressAllocation", "IPAddress: %s for Service %s/%s has a wrong reference; cleaning up", ipAddress.Name, svc.Namespace, svc.Name)
|
||||
err = r.client.NetworkingV1alpha1().IPAddresses().Delete(context.Background(), ipAddress.Name, metav1.DeleteOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
func newIPAddress(name string, svc *v1.Service) *networkingv1alpha1.IPAddress {
|
||||
family := string(v1.IPv4Protocol)
|
||||
if netutils.IsIPv6String(name) {
|
||||
family = string(v1.IPv6Protocol)
|
||||
}
|
||||
return &networkingv1alpha1.IPAddress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
networkingv1alpha1.LabelIPAddressFamily: family,
|
||||
networkingv1alpha1.LabelManagedBy: ipallocator.ControllerName,
|
||||
},
|
||||
},
|
||||
Spec: networkingv1alpha1.IPAddressSpec{
|
||||
ParentRef: serviceToRef(svc),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func serviceToRef(svc *v1.Service) *networkingv1alpha1.ParentReference {
|
||||
if svc == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &networkingv1alpha1.ParentReference{
|
||||
Group: "",
|
||||
Resource: "services",
|
||||
Namespace: svc.Namespace,
|
||||
Name: svc.Name,
|
||||
UID: svc.UID,
|
||||
}
|
||||
}
|
||||
|
||||
func getFamilyByIP(ip net.IP) v1.IPFamily {
|
||||
if netutils.IsIPv6(ip) {
|
||||
return v1.IPv6Protocol
|
||||
}
|
||||
return v1.IPv4Protocol
|
||||
}
|
||||
|
||||
// managedByController returns true if the controller of the provided
|
||||
// EndpointSlices is the EndpointSlice controller.
|
||||
func managedByController(ip *networkingv1alpha1.IPAddress) bool {
|
||||
managedBy, ok := ip.Labels[networkingv1alpha1.LabelManagedBy]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
return managedBy == ipallocator.ControllerName
|
||||
}
|
||||
|
||||
func verifyIPAddressLabels(ip *networkingv1alpha1.IPAddress) bool {
|
||||
labelFamily, ok := ip.Labels[networkingv1alpha1.LabelIPAddressFamily]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
family := string(v1.IPv4Protocol)
|
||||
if netutils.IsIPv6String(ip.Name) {
|
||||
family = string(v1.IPv6Protocol)
|
||||
}
|
||||
if family != labelFamily {
|
||||
return false
|
||||
}
|
||||
return managedByController(ip)
|
||||
}
|
@@ -0,0 +1,446 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package controller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
k8stesting "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
var (
|
||||
serviceCIDRv4 = "10.0.0.0/16"
|
||||
serviceCIDRv6 = "2001:db8::/64"
|
||||
)
|
||||
|
||||
type fakeRepair struct {
|
||||
*RepairIPAddress
|
||||
serviceStore cache.Store
|
||||
ipAddressStore cache.Store
|
||||
}
|
||||
|
||||
func newFakeRepair() (*fake.Clientset, *fakeRepair) {
|
||||
fakeClient := fake.NewSimpleClientset()
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(fakeClient, 0*time.Second)
|
||||
serviceInformer := informerFactory.Core().V1().Services()
|
||||
serviceIndexer := serviceInformer.Informer().GetIndexer()
|
||||
|
||||
ipInformer := informerFactory.Networking().V1alpha1().IPAddresses()
|
||||
ipIndexer := ipInformer.Informer().GetIndexer()
|
||||
|
||||
fakeClient.PrependReactor("create", "ipaddresses", k8stesting.ReactionFunc(func(action k8stesting.Action) (bool, runtime.Object, error) {
|
||||
ip := action.(k8stesting.CreateAction).GetObject().(*networkingv1alpha1.IPAddress)
|
||||
err := ipIndexer.Add(ip)
|
||||
return false, ip, err
|
||||
}))
|
||||
fakeClient.PrependReactor("update", "ipaddresses", k8stesting.ReactionFunc(func(action k8stesting.Action) (bool, runtime.Object, error) {
|
||||
ip := action.(k8stesting.UpdateAction).GetObject().(*networkingv1alpha1.IPAddress)
|
||||
return false, ip, fmt.Errorf("IPAddress is inmutable after creation")
|
||||
}))
|
||||
fakeClient.PrependReactor("delete", "ipaddresses", k8stesting.ReactionFunc(func(action k8stesting.Action) (bool, runtime.Object, error) {
|
||||
ip := action.(k8stesting.DeleteAction).GetName()
|
||||
err := ipIndexer.Delete(ip)
|
||||
return false, &networkingv1alpha1.IPAddress{}, err
|
||||
}))
|
||||
|
||||
_, primary, err := netutils.ParseCIDRSloppy(serviceCIDRv4)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, secondary, err := netutils.ParseCIDRSloppy(serviceCIDRv6)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
r := NewRepairIPAddress(0*time.Second,
|
||||
fakeClient,
|
||||
primary,
|
||||
secondary,
|
||||
serviceInformer,
|
||||
ipInformer,
|
||||
)
|
||||
return fakeClient, &fakeRepair{r, serviceIndexer, ipIndexer}
|
||||
}
|
||||
|
||||
func TestRepairServiceIP(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
svcs []*v1.Service
|
||||
ipAddresses []*networkingv1alpha1.IPAddress
|
||||
expectedIPs []string
|
||||
actions [][]string // verb and resource
|
||||
events []string
|
||||
}{
|
||||
{
|
||||
name: "no changes needed single stack",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"10.0.1.1"})},
|
||||
ipAddresses: []*networkingv1alpha1.IPAddress{
|
||||
newIPAddress("10.0.1.1", newService("test-svc", []string{"10.0.1.1"})),
|
||||
},
|
||||
expectedIPs: []string{"10.0.1.1"},
|
||||
actions: [][]string{},
|
||||
events: []string{},
|
||||
},
|
||||
{
|
||||
name: "no changes needed dual stack",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"10.0.1.1", "2001:db8::10"})},
|
||||
ipAddresses: []*networkingv1alpha1.IPAddress{
|
||||
newIPAddress("10.0.1.1", newService("test-svc", []string{"10.0.1.1"})),
|
||||
newIPAddress("2001:db8::10", newService("test-svc", []string{"2001:db8::10"})),
|
||||
},
|
||||
expectedIPs: []string{"10.0.1.1", "2001:db8::10"},
|
||||
actions: [][]string{},
|
||||
events: []string{},
|
||||
},
|
||||
// these two cases simulate migrating from bitmaps to IPAddress objects
|
||||
{
|
||||
name: "create IPAddress single stack",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"10.0.1.1"})},
|
||||
expectedIPs: []string{"10.0.1.1"},
|
||||
actions: [][]string{{"create", "ipaddresses"}},
|
||||
events: []string{"Warning ClusterIPNotAllocated Cluster IP [IPv4]: 10.0.1.1 is not allocated; repairing"},
|
||||
},
|
||||
{
|
||||
name: "create IPAddresses dual stack",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"10.0.1.1", "2001:db8::10"})},
|
||||
expectedIPs: []string{"10.0.1.1", "2001:db8::10"},
|
||||
actions: [][]string{{"create", "ipaddresses"}, {"create", "ipaddresses"}},
|
||||
events: []string{
|
||||
"Warning ClusterIPNotAllocated Cluster IP [IPv4]: 10.0.1.1 is not allocated; repairing",
|
||||
"Warning ClusterIPNotAllocated Cluster IP [IPv6]: 2001:db8::10 is not allocated; repairing",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "reconcile IPAddress single stack wrong reference",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"10.0.1.1"})},
|
||||
ipAddresses: []*networkingv1alpha1.IPAddress{
|
||||
newIPAddress("10.0.1.1", newService("test-svc2", []string{"10.0.1.1"})),
|
||||
},
|
||||
expectedIPs: []string{"10.0.1.1"},
|
||||
actions: [][]string{{"delete", "ipaddresses"}, {"create", "ipaddresses"}},
|
||||
events: []string{"Warning ClusterIPNotAllocated the ClusterIP [IPv4]: 10.0.1.1 for Service bar/test-svc has a wrong reference; repairing"},
|
||||
},
|
||||
{
|
||||
name: "reconcile IPAddresses dual stack",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"10.0.1.1", "2001:db8::10"})},
|
||||
ipAddresses: []*networkingv1alpha1.IPAddress{
|
||||
newIPAddress("10.0.1.1", newService("test-svc2", []string{"10.0.1.1"})),
|
||||
newIPAddress("2001:db8::10", newService("test-svc2", []string{"2001:db8::10"})),
|
||||
},
|
||||
expectedIPs: []string{"10.0.1.1", "2001:db8::10"},
|
||||
actions: [][]string{{"delete", "ipaddresses"}, {"create", "ipaddresses"}, {"delete", "ipaddresses"}, {"create", "ipaddresses"}},
|
||||
events: []string{
|
||||
"Warning ClusterIPNotAllocated the ClusterIP [IPv4]: 10.0.1.1 for Service bar/test-svc has a wrong reference; repairing",
|
||||
"Warning ClusterIPNotAllocated the ClusterIP [IPv6]: 2001:db8::10 for Service bar/test-svc has a wrong reference; repairing",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "one IP out of range",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"192.168.1.1", "2001:db8::10"})},
|
||||
ipAddresses: []*networkingv1alpha1.IPAddress{
|
||||
newIPAddress("192.168.1.1", newService("test-svc", []string{"192.168.1.1"})),
|
||||
newIPAddress("2001:db8::10", newService("test-svc", []string{"2001:db8::10"})),
|
||||
},
|
||||
expectedIPs: []string{"2001:db8::10"},
|
||||
actions: [][]string{},
|
||||
events: []string{"Warning ClusterIPOutOfRange Cluster IP [IPv4]: 192.168.1.1 is not within the configured Service CIDR 10.0.0.0/16; please recreate service"},
|
||||
},
|
||||
{
|
||||
name: "one IP orphan",
|
||||
ipAddresses: []*networkingv1alpha1.IPAddress{
|
||||
newIPAddress("10.0.1.1", newService("test-svc", []string{"10.0.1.1"})),
|
||||
},
|
||||
actions: [][]string{{"delete", "ipaddresses"}},
|
||||
events: []string{"Warning IPAddressNotAllocated IPAddress: 10.0.1.1 for Service bar/test-svc appears to have leaked: cleaning up"},
|
||||
},
|
||||
{
|
||||
name: "Two IPAddresses referencing the same service",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"10.0.1.1"})},
|
||||
ipAddresses: []*networkingv1alpha1.IPAddress{
|
||||
newIPAddress("10.0.1.1", newService("test-svc", []string{"10.0.1.1"})),
|
||||
newIPAddress("10.0.1.2", newService("test-svc", []string{"10.0.1.1"})),
|
||||
},
|
||||
actions: [][]string{{"delete", "ipaddresses"}},
|
||||
events: []string{"Warning IPAddressWrongReference IPAddress: 10.0.1.2 for Service bar/test-svc has a wrong reference; cleaning up"},
|
||||
},
|
||||
{
|
||||
name: "Two Services with same ClusterIP",
|
||||
svcs: []*v1.Service{
|
||||
newService("test-svc", []string{"10.0.1.1"}),
|
||||
newService("test-svc2", []string{"10.0.1.1"}),
|
||||
},
|
||||
ipAddresses: []*networkingv1alpha1.IPAddress{
|
||||
newIPAddress("10.0.1.1", newService("test-svc2", []string{"10.0.1.1"})),
|
||||
},
|
||||
events: []string{"Warning ClusterIPAlreadyAllocated Cluster IP [4]:10.0.1.1 was assigned to multiple services; please recreate service"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
||||
c, r := newFakeRepair()
|
||||
// override for testing
|
||||
r.servicesSynced = func() bool { return true }
|
||||
r.ipAddressSynced = func() bool { return true }
|
||||
recorder := events.NewFakeRecorder(100)
|
||||
r.recorder = recorder
|
||||
for _, svc := range test.svcs {
|
||||
err := r.serviceStore.Add(svc)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error trying to add Service %v object: %v", svc, err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, ip := range test.ipAddresses {
|
||||
ip.CreationTimestamp = metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
err := r.ipAddressStore.Add(ip)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error trying to add IPAddress %s object: %v", ip, err)
|
||||
}
|
||||
}
|
||||
|
||||
err := r.runOnce()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for _, ip := range test.expectedIPs {
|
||||
_, err := r.ipAddressLister.Get(ip)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error trying to get IPAddress %s object: %v", ip, err)
|
||||
}
|
||||
}
|
||||
|
||||
expectAction(t, c.Actions(), test.actions)
|
||||
expectEvents(t, recorder.Events, test.events)
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestRepairIPAddress_syncIPAddress(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ip *networkingv1alpha1.IPAddress
|
||||
actions [][]string // verb and resource
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "correct ipv4 address",
|
||||
ip: &networkingv1alpha1.IPAddress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "10.0.1.1",
|
||||
Labels: map[string]string{
|
||||
networkingv1alpha1.LabelIPAddressFamily: string(v1.IPv4Protocol),
|
||||
networkingv1alpha1.LabelManagedBy: ipallocator.ControllerName,
|
||||
},
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Spec: networkingv1alpha1.IPAddressSpec{
|
||||
ParentRef: &networkingv1alpha1.ParentReference{
|
||||
Group: "",
|
||||
Resource: "services",
|
||||
Name: "foo",
|
||||
Namespace: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "correct ipv6 address",
|
||||
ip: &networkingv1alpha1.IPAddress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "2001:db8::11",
|
||||
Labels: map[string]string{
|
||||
networkingv1alpha1.LabelIPAddressFamily: string(v1.IPv6Protocol),
|
||||
networkingv1alpha1.LabelManagedBy: ipallocator.ControllerName,
|
||||
},
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Spec: networkingv1alpha1.IPAddressSpec{
|
||||
ParentRef: &networkingv1alpha1.ParentReference{
|
||||
Group: "",
|
||||
Resource: "services",
|
||||
Name: "foo",
|
||||
Namespace: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "not managed by this controller",
|
||||
ip: &networkingv1alpha1.IPAddress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "2001:db8::11",
|
||||
Labels: map[string]string{
|
||||
networkingv1alpha1.LabelIPAddressFamily: string(v1.IPv6Protocol),
|
||||
networkingv1alpha1.LabelManagedBy: "controller-foo",
|
||||
},
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Spec: networkingv1alpha1.IPAddressSpec{
|
||||
ParentRef: &networkingv1alpha1.ParentReference{
|
||||
Group: "networking.gateway.k8s.io",
|
||||
Resource: "gateway",
|
||||
Name: "foo",
|
||||
Namespace: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "out of range",
|
||||
ip: &networkingv1alpha1.IPAddress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "fd00:db8::11",
|
||||
Labels: map[string]string{
|
||||
networkingv1alpha1.LabelIPAddressFamily: string(v1.IPv6Protocol),
|
||||
networkingv1alpha1.LabelManagedBy: ipallocator.ControllerName,
|
||||
},
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Spec: networkingv1alpha1.IPAddressSpec{
|
||||
ParentRef: &networkingv1alpha1.ParentReference{
|
||||
Group: "",
|
||||
Resource: "services",
|
||||
Name: "foo",
|
||||
Namespace: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "leaked ip",
|
||||
ip: &networkingv1alpha1.IPAddress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "10.0.1.1",
|
||||
Labels: map[string]string{
|
||||
networkingv1alpha1.LabelIPAddressFamily: string(v1.IPv6Protocol),
|
||||
networkingv1alpha1.LabelManagedBy: ipallocator.ControllerName,
|
||||
},
|
||||
CreationTimestamp: metav1.Date(2012, 1, 1, 0, 0, 0, 0, time.UTC),
|
||||
},
|
||||
Spec: networkingv1alpha1.IPAddressSpec{
|
||||
ParentRef: &networkingv1alpha1.ParentReference{
|
||||
Group: "",
|
||||
Resource: "services",
|
||||
Name: "noexist",
|
||||
Namespace: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
actions: [][]string{{"delete", "ipaddresses"}},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
c, r := newFakeRepair()
|
||||
err := r.ipAddressStore.Add(tt.ip)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = r.serviceStore.Add(newService("foo", []string{tt.ip.Name}))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// override for testing
|
||||
r.servicesSynced = func() bool { return true }
|
||||
r.ipAddressSynced = func() bool { return true }
|
||||
recorder := events.NewFakeRecorder(100)
|
||||
r.recorder = recorder
|
||||
if err := r.syncIPAddress(tt.ip.Name); (err != nil) != tt.wantErr {
|
||||
t.Errorf("RepairIPAddress.syncIPAddress() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
expectAction(t, c.Actions(), tt.actions)
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func newService(name string, ips []string) *v1.Service {
|
||||
if len(ips) == 0 {
|
||||
return nil
|
||||
}
|
||||
svc := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{Namespace: "bar", Name: name},
|
||||
Spec: v1.ServiceSpec{
|
||||
ClusterIP: ips[0],
|
||||
ClusterIPs: ips,
|
||||
Type: v1.ServiceTypeClusterIP,
|
||||
},
|
||||
}
|
||||
return svc
|
||||
}
|
||||
|
||||
func expectAction(t *testing.T, actions []k8stesting.Action, expected [][]string) {
|
||||
t.Helper()
|
||||
if len(actions) != len(expected) {
|
||||
t.Fatalf("Expected at least %d actions, got %d \ndiff: %v", len(expected), len(actions), cmp.Diff(expected, actions))
|
||||
}
|
||||
|
||||
for i, action := range actions {
|
||||
verb := expected[i][0]
|
||||
if action.GetVerb() != verb {
|
||||
t.Errorf("Expected action %d verb to be %s, got %s", i, verb, action.GetVerb())
|
||||
}
|
||||
resource := expected[i][1]
|
||||
if action.GetResource().Resource != resource {
|
||||
t.Errorf("Expected action %d resource to be %s, got %s", i, resource, action.GetResource().Resource)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func expectEvents(t *testing.T, actual <-chan string, expected []string) {
|
||||
t.Helper()
|
||||
c := time.After(wait.ForeverTestTimeout)
|
||||
for _, e := range expected {
|
||||
select {
|
||||
case a := <-actual:
|
||||
if e != a {
|
||||
t.Errorf("Expected event %q, got %q", e, a)
|
||||
return
|
||||
}
|
||||
case <-c:
|
||||
t.Errorf("Expected event %q, got nothing", e)
|
||||
// continue iterating to print all expected events
|
||||
}
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case a := <-actual:
|
||||
t.Errorf("Unexpected event: %q", a)
|
||||
default:
|
||||
return // No more events, as expected.
|
||||
}
|
||||
}
|
||||
}
|
566
pkg/registry/core/service/ipallocator/ipallocator.go
Normal file
566
pkg/registry/core/service/ipallocator/ipallocator.go
Normal file
@@ -0,0 +1,566 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipallocator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/netip"
|
||||
"time"
|
||||
|
||||
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
networkingv1alpha1informers "k8s.io/client-go/informers/networking/v1alpha1"
|
||||
networkingv1alpha1client "k8s.io/client-go/kubernetes/typed/networking/v1alpha1"
|
||||
networkingv1alpha1listers "k8s.io/client-go/listers/networking/v1alpha1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/klog/v2"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
netutils "k8s.io/utils/net"
|
||||
utiltrace "k8s.io/utils/trace"
|
||||
)
|
||||
|
||||
const ControllerName = "ipallocator.k8s.io"
|
||||
|
||||
// Allocator implements current ipallocator interface using IPAddress API object
|
||||
// and an informer as backend.
|
||||
type Allocator struct {
|
||||
cidr *net.IPNet
|
||||
prefix netip.Prefix
|
||||
firstAddress netip.Addr // first IP address within the range
|
||||
offsetAddress netip.Addr // IP address that delimits the upper and lower subranges
|
||||
lastAddress netip.Addr // last IP address within the range
|
||||
family api.IPFamily // family is the IP family of this range
|
||||
|
||||
rangeOffset int // subdivides the assigned IP range to prefer dynamic allocation from the upper range
|
||||
size uint64 // cap the total number of IPs available to maxInt64
|
||||
|
||||
client networkingv1alpha1client.NetworkingV1alpha1Interface
|
||||
ipAddressLister networkingv1alpha1listers.IPAddressLister
|
||||
ipAddressSynced cache.InformerSynced
|
||||
|
||||
// metrics is a metrics recorder that can be disabled
|
||||
metrics metricsRecorderInterface
|
||||
metricLabel string
|
||||
|
||||
rand *rand.Rand
|
||||
}
|
||||
|
||||
var _ Interface = &Allocator{}
|
||||
|
||||
// NewIPAllocator returns an IP allocator associated to a network range
|
||||
// that use the IPAddress objectto track the assigned IP addresses,
|
||||
// using an informer cache as storage.
|
||||
func NewIPAllocator(
|
||||
cidr *net.IPNet,
|
||||
client networkingv1alpha1client.NetworkingV1alpha1Interface,
|
||||
ipAddressInformer networkingv1alpha1informers.IPAddressInformer,
|
||||
) (*Allocator, error) {
|
||||
prefix, err := netip.ParsePrefix(cidr.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if prefix.Addr().Is6() && prefix.Bits() < 64 {
|
||||
return nil, fmt.Errorf("shortest allowed prefix length for service CIDR is 64, got %d", prefix.Bits())
|
||||
}
|
||||
|
||||
// TODO: use the utils/net function once is available
|
||||
size := hostsPerNetwork(cidr)
|
||||
var family api.IPFamily
|
||||
if netutils.IsIPv6CIDR(cidr) {
|
||||
family = api.IPv6Protocol
|
||||
} else {
|
||||
family = api.IPv4Protocol
|
||||
}
|
||||
// Caching the first, offset and last addresses allows to optimize
|
||||
// the search loops by using the netip.Addr iterator instead
|
||||
// of having to do conversions with IP addresses.
|
||||
// Don't allocate the network's ".0" address.
|
||||
ipFirst := prefix.Masked().Addr().Next()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Use the broadcast address as last address for IPv6
|
||||
ipLast, err := broadcastAddress(prefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// For IPv4 don't use the network's broadcast address
|
||||
if family == api.IPv4Protocol {
|
||||
ipLast = ipLast.Prev()
|
||||
}
|
||||
// KEP-3070: Reserve Service IP Ranges For Dynamic and Static IP Allocation
|
||||
// calculate the subrange offset
|
||||
rangeOffset := calculateRangeOffset(cidr)
|
||||
offsetAddress, err := addOffsetAddress(ipFirst, uint64(rangeOffset))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
a := &Allocator{
|
||||
cidr: cidr,
|
||||
prefix: prefix,
|
||||
firstAddress: ipFirst,
|
||||
lastAddress: ipLast,
|
||||
rangeOffset: rangeOffset,
|
||||
offsetAddress: offsetAddress,
|
||||
size: size,
|
||||
family: family,
|
||||
client: client,
|
||||
ipAddressLister: ipAddressInformer.Lister(),
|
||||
ipAddressSynced: ipAddressInformer.Informer().HasSynced,
|
||||
metrics: &emptyMetricsRecorder{}, // disabled by default
|
||||
metricLabel: cidr.String(),
|
||||
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
}
|
||||
|
||||
return a, nil
|
||||
}
|
||||
|
||||
func (a *Allocator) createIPAddress(name string, svc *api.Service, scope string) error {
|
||||
ipAddress := networkingv1alpha1.IPAddress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
networkingv1alpha1.LabelIPAddressFamily: string(a.IPFamily()),
|
||||
networkingv1alpha1.LabelManagedBy: ControllerName,
|
||||
},
|
||||
},
|
||||
Spec: networkingv1alpha1.IPAddressSpec{
|
||||
ParentRef: serviceToRef(svc),
|
||||
},
|
||||
}
|
||||
_, err := a.client.IPAddresses().Create(context.Background(), &ipAddress, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
// update metrics
|
||||
a.metrics.incrementAllocationErrors(a.metricLabel, scope)
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return ErrAllocated
|
||||
}
|
||||
return err
|
||||
}
|
||||
// update metrics
|
||||
a.metrics.incrementAllocations(a.metricLabel, scope)
|
||||
a.metrics.setAllocated(a.metricLabel, a.Used())
|
||||
a.metrics.setAvailable(a.metricLabel, a.Free())
|
||||
return nil
|
||||
}
|
||||
|
||||
// Allocate attempts to reserve the provided IP. ErrNotInRange or
|
||||
// ErrAllocated will be returned if the IP is not valid for this range
|
||||
// or has already been reserved. ErrFull will be returned if there
|
||||
// are no addresses left.
|
||||
// Only for testing, it will fail to create the IPAddress object because
|
||||
// the Service reference is required.
|
||||
func (a *Allocator) Allocate(ip net.IP) error {
|
||||
return a.AllocateService(nil, ip)
|
||||
}
|
||||
|
||||
// AllocateService attempts to reserve the provided IP. ErrNotInRange or
|
||||
// ErrAllocated will be returned if the IP is not valid for this range
|
||||
// or has already been reserved. ErrFull will be returned if there
|
||||
// are no addresses left.
|
||||
func (a *Allocator) AllocateService(svc *api.Service, ip net.IP) error {
|
||||
return a.allocateService(svc, ip, dryRunFalse)
|
||||
}
|
||||
|
||||
func (a *Allocator) allocateService(svc *api.Service, ip net.IP, dryRun bool) error {
|
||||
if !a.ipAddressSynced() {
|
||||
return fmt.Errorf("allocator not ready")
|
||||
}
|
||||
addr, err := netip.ParseAddr(ip.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// check address is within the range of available addresses
|
||||
if addr.Less(a.firstAddress) || // requested address is lower than the first address in the subnet
|
||||
a.lastAddress.Less(addr) { // the last address in the subnet is lower than the requested address
|
||||
if !dryRun {
|
||||
// update metrics
|
||||
a.metrics.incrementAllocationErrors(a.metricLabel, "static")
|
||||
}
|
||||
return &ErrNotInRange{ip, a.prefix.String()}
|
||||
}
|
||||
if dryRun {
|
||||
return nil
|
||||
}
|
||||
return a.createIPAddress(ip.String(), svc, "static")
|
||||
}
|
||||
|
||||
// AllocateNext return an IP address that wasn't allocated yet.
|
||||
// Only for testing, it will fail to create the IPAddress object because
|
||||
// the Service reference is required.
|
||||
func (a *Allocator) AllocateNext() (net.IP, error) {
|
||||
return a.AllocateNextService(nil)
|
||||
}
|
||||
|
||||
// AllocateNext return an IP address that wasn't allocated yet.
|
||||
func (a *Allocator) AllocateNextService(svc *api.Service) (net.IP, error) {
|
||||
return a.allocateNextService(svc, dryRunFalse)
|
||||
}
|
||||
|
||||
// allocateNextService tries to allocate a free IP address within the subnet.
|
||||
// If the subnet is big enough, it partitions the subnet into two subranges,
|
||||
// delimited by a.rangeOffset.
|
||||
// It tries to allocate a free IP address from the upper subnet first and
|
||||
// falls back to the lower subnet.
|
||||
// It starts allocating from a random IP within each range.
|
||||
func (a *Allocator) allocateNextService(svc *api.Service, dryRun bool) (net.IP, error) {
|
||||
if !a.ipAddressSynced() {
|
||||
return nil, fmt.Errorf("allocator not ready")
|
||||
}
|
||||
if dryRun {
|
||||
// Don't bother finding a free value. It's racy and not worth the
|
||||
// effort to plumb any further.
|
||||
return a.CIDR().IP, nil
|
||||
}
|
||||
|
||||
trace := utiltrace.New("allocate dynamic ClusterIP address")
|
||||
defer trace.LogIfLong(500 * time.Millisecond)
|
||||
|
||||
// rand.Int63n panics for n <= 0 so we need to avoid problems when
|
||||
// converting from uint64 to int64
|
||||
rangeSize := a.size - uint64(a.rangeOffset)
|
||||
var offset uint64
|
||||
switch {
|
||||
case rangeSize >= math.MaxInt64:
|
||||
offset = rand.Uint64()
|
||||
case rangeSize == 0:
|
||||
return net.IP{}, ErrFull
|
||||
default:
|
||||
offset = uint64(a.rand.Int63n(int64(rangeSize)))
|
||||
}
|
||||
iterator := ipIterator(a.offsetAddress, a.lastAddress, offset)
|
||||
ip, err := a.allocateFromRange(iterator, svc)
|
||||
if err == nil {
|
||||
return ip, nil
|
||||
}
|
||||
// check the lower range
|
||||
if a.rangeOffset != 0 {
|
||||
offset = uint64(a.rand.Intn(a.rangeOffset))
|
||||
iterator = ipIterator(a.firstAddress, a.offsetAddress.Prev(), offset)
|
||||
ip, err = a.allocateFromRange(iterator, svc)
|
||||
if err == nil {
|
||||
return ip, nil
|
||||
}
|
||||
}
|
||||
// update metrics
|
||||
a.metrics.incrementAllocationErrors(a.metricLabel, "dynamic")
|
||||
return net.IP{}, ErrFull
|
||||
}
|
||||
|
||||
// IP iterator allows to iterate over all the IP addresses
|
||||
// in a range defined by the start and last address.
|
||||
// It starts iterating at the address position defined by the offset.
|
||||
// It returns an invalid address to indicate it hasfinished.
|
||||
func ipIterator(first netip.Addr, last netip.Addr, offset uint64) func() netip.Addr {
|
||||
// There are no modulo operations for IP addresses
|
||||
modulo := func(addr netip.Addr) netip.Addr {
|
||||
if addr.Compare(last) == 1 {
|
||||
return first
|
||||
}
|
||||
return addr
|
||||
}
|
||||
next := func(addr netip.Addr) netip.Addr {
|
||||
return modulo(addr.Next())
|
||||
}
|
||||
start, err := addOffsetAddress(first, offset)
|
||||
if err != nil {
|
||||
return func() netip.Addr { return netip.Addr{} }
|
||||
}
|
||||
start = modulo(start)
|
||||
ip := start
|
||||
seen := false
|
||||
return func() netip.Addr {
|
||||
value := ip
|
||||
// is the last or the first iteration
|
||||
if value == start {
|
||||
if seen {
|
||||
return netip.Addr{}
|
||||
}
|
||||
seen = true
|
||||
}
|
||||
ip = next(ip)
|
||||
return value
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// allocateFromRange allocates an empty IP address from the range of
|
||||
// IPs between the first and last address (both included), starting
|
||||
// from the start address.
|
||||
// TODO: this is a linear search, it can be optimized.
|
||||
func (a *Allocator) allocateFromRange(iterator func() netip.Addr, svc *api.Service) (net.IP, error) {
|
||||
for {
|
||||
ip := iterator()
|
||||
if !ip.IsValid() {
|
||||
break
|
||||
}
|
||||
name := ip.String()
|
||||
_, err := a.ipAddressLister.Get(name)
|
||||
// continue if ip already exist
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
if !apierrors.IsNotFound(err) {
|
||||
klog.Infof("unexpected error: %v", err)
|
||||
continue
|
||||
}
|
||||
// address is not present on the cache, try to allocate it
|
||||
err = a.createIPAddress(name, svc, "dynamic")
|
||||
// an error can happen if there is a race and our informer was not updated
|
||||
// swallow the error and try with the next IP address
|
||||
if err != nil {
|
||||
klog.Infof("can not create IPAddress %s: %v", name, err)
|
||||
continue
|
||||
}
|
||||
return ip.AsSlice(), nil
|
||||
}
|
||||
return net.IP{}, ErrFull
|
||||
}
|
||||
|
||||
// Release releases the IP back to the pool. Releasing an
|
||||
// unallocated IP or an IP out of the range is a no-op and
|
||||
// returns no error.
|
||||
func (a *Allocator) Release(ip net.IP) error {
|
||||
return a.release(ip, dryRunFalse)
|
||||
}
|
||||
|
||||
func (a *Allocator) release(ip net.IP, dryRun bool) error {
|
||||
if !a.ipAddressSynced() {
|
||||
return fmt.Errorf("allocator not ready")
|
||||
}
|
||||
if dryRun {
|
||||
return nil
|
||||
}
|
||||
name := ip.String()
|
||||
// Try to Delete the IPAddress independently of the cache state.
|
||||
// The error is ignored for compatibility reasons.
|
||||
err := a.client.IPAddresses().Delete(context.Background(), name, metav1.DeleteOptions{})
|
||||
if err == nil {
|
||||
// update metrics
|
||||
a.metrics.setAllocated(a.metricLabel, a.Used())
|
||||
a.metrics.setAvailable(a.metricLabel, a.Free())
|
||||
return nil
|
||||
}
|
||||
klog.Infof("error releasing ip %s : %v", name, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ForEach executes the function on each allocated IP
|
||||
// This is required to satisfy the Allocator Interface only
|
||||
func (a *Allocator) ForEach(f func(net.IP)) {
|
||||
ipLabelSelector := labels.Set(map[string]string{
|
||||
networkingv1alpha1.LabelIPAddressFamily: string(a.IPFamily()),
|
||||
networkingv1alpha1.LabelManagedBy: ControllerName,
|
||||
}).AsSelectorPreValidated()
|
||||
ips, err := a.ipAddressLister.List(ipLabelSelector)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, ip := range ips {
|
||||
f(netutils.ParseIPSloppy(ip.Name))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *Allocator) CIDR() net.IPNet {
|
||||
return *a.cidr
|
||||
}
|
||||
|
||||
// for testing
|
||||
func (a *Allocator) Has(ip net.IP) bool {
|
||||
// convert IP to name
|
||||
name := ip.String()
|
||||
ipAddress, err := a.client.IPAddresses().Get(context.Background(), name, metav1.GetOptions{})
|
||||
if err != nil || len(ipAddress.Name) == 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (a *Allocator) IPFamily() api.IPFamily {
|
||||
return a.family
|
||||
}
|
||||
|
||||
// for testing
|
||||
func (a *Allocator) Used() int {
|
||||
ipLabelSelector := labels.Set(map[string]string{
|
||||
networkingv1alpha1.LabelIPAddressFamily: string(a.IPFamily()),
|
||||
networkingv1alpha1.LabelManagedBy: ControllerName,
|
||||
}).AsSelectorPreValidated()
|
||||
ips, err := a.ipAddressLister.List(ipLabelSelector)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return len(ips)
|
||||
}
|
||||
|
||||
// for testing
|
||||
func (a *Allocator) Free() int {
|
||||
return int(a.size) - a.Used()
|
||||
}
|
||||
|
||||
// Destroy
|
||||
func (a *Allocator) Destroy() {
|
||||
}
|
||||
|
||||
// DryRun
|
||||
func (a *Allocator) DryRun() Interface {
|
||||
return dryRunAllocator{a}
|
||||
}
|
||||
|
||||
// EnableMetrics
|
||||
func (a *Allocator) EnableMetrics() {
|
||||
registerMetrics()
|
||||
a.metrics = &metricsRecorder{}
|
||||
}
|
||||
|
||||
// dryRunRange is a shim to satisfy Interface without persisting state.
|
||||
type dryRunAllocator struct {
|
||||
real *Allocator
|
||||
}
|
||||
|
||||
func (dry dryRunAllocator) Allocate(ip net.IP) error {
|
||||
return dry.real.allocateService(nil, ip, dryRunTrue)
|
||||
|
||||
}
|
||||
|
||||
func (dry dryRunAllocator) AllocateNext() (net.IP, error) {
|
||||
return dry.real.allocateNextService(nil, dryRunTrue)
|
||||
}
|
||||
|
||||
func (dry dryRunAllocator) Release(ip net.IP) error {
|
||||
return dry.real.release(ip, dryRunTrue)
|
||||
}
|
||||
|
||||
func (dry dryRunAllocator) ForEach(cb func(net.IP)) {
|
||||
dry.real.ForEach(cb)
|
||||
}
|
||||
|
||||
func (dry dryRunAllocator) CIDR() net.IPNet {
|
||||
return dry.real.CIDR()
|
||||
}
|
||||
|
||||
func (dry dryRunAllocator) IPFamily() api.IPFamily {
|
||||
return dry.real.IPFamily()
|
||||
}
|
||||
|
||||
func (dry dryRunAllocator) DryRun() Interface {
|
||||
return dry
|
||||
}
|
||||
|
||||
func (dry dryRunAllocator) Has(ip net.IP) bool {
|
||||
return dry.real.Has(ip)
|
||||
}
|
||||
|
||||
func (dry dryRunAllocator) Destroy() {
|
||||
}
|
||||
|
||||
func (dry dryRunAllocator) EnableMetrics() {
|
||||
}
|
||||
|
||||
// addOffsetAddress returns the address at the provided offset within the subnet
|
||||
// TODO: move it to k8s.io/utils/net, this is the same as current AddIPOffset()
|
||||
// but using netip.Addr instead of net.IP
|
||||
func addOffsetAddress(address netip.Addr, offset uint64) (netip.Addr, error) {
|
||||
addressBig := big.NewInt(0).SetBytes(address.AsSlice())
|
||||
r := big.NewInt(0).Add(addressBig, big.NewInt(int64(offset)))
|
||||
addr, ok := netip.AddrFromSlice(r.Bytes())
|
||||
if !ok {
|
||||
return netip.Addr{}, fmt.Errorf("invalid address %v", r.Bytes())
|
||||
}
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// hostsPerNetwork returns the number of available hosts in a subnet.
|
||||
// The max number is limited by the size of an uint64.
|
||||
// Number of hosts is calculated with the formula:
|
||||
// IPv4: 2^x – 2, not consider network and broadcast address
|
||||
// IPv6: 2^x - 1, not consider network address
|
||||
// where x is the number of host bits in the subnet.
|
||||
func hostsPerNetwork(subnet *net.IPNet) uint64 {
|
||||
ones, bits := subnet.Mask.Size()
|
||||
// this checks that we are not overflowing an int64
|
||||
if bits-ones >= 64 {
|
||||
return math.MaxUint64
|
||||
}
|
||||
max := uint64(1) << uint(bits-ones)
|
||||
// Don't use the network's ".0" address,
|
||||
if max == 0 {
|
||||
return 0
|
||||
}
|
||||
max--
|
||||
if netutils.IsIPv4CIDR(subnet) {
|
||||
// Don't use the IPv4 network's broadcast address
|
||||
if max == 0 {
|
||||
return 0
|
||||
}
|
||||
max--
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
// broadcastAddress returns the broadcast address of the subnet
|
||||
// The broadcast address is obtained by setting all the host bits
|
||||
// in a subnet to 1.
|
||||
// network 192.168.0.0/24 : subnet bits 24 host bits 32 - 24 = 8
|
||||
// broadcast address 192.168.0.255
|
||||
func broadcastAddress(subnet netip.Prefix) (netip.Addr, error) {
|
||||
base := subnet.Masked().Addr()
|
||||
bytes := base.AsSlice()
|
||||
// get all the host bits from the subnet
|
||||
n := 8*len(bytes) - subnet.Bits()
|
||||
// set all the host bits to 1
|
||||
for i := len(bytes) - 1; i >= 0 && n > 0; i-- {
|
||||
if n >= 8 {
|
||||
bytes[i] = 0xff
|
||||
n -= 8
|
||||
} else {
|
||||
mask := ^uint8(0) >> (8 - n)
|
||||
bytes[i] |= mask
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
addr, ok := netip.AddrFromSlice(bytes)
|
||||
if !ok {
|
||||
return netip.Addr{}, fmt.Errorf("invalid address %v", bytes)
|
||||
}
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// serviceToRef obtain the Service Parent Reference
|
||||
func serviceToRef(svc *api.Service) *networkingv1alpha1.ParentReference {
|
||||
if svc == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &networkingv1alpha1.ParentReference{
|
||||
Group: "",
|
||||
Resource: "services",
|
||||
Namespace: svc.Namespace,
|
||||
Name: svc.Name,
|
||||
UID: svc.UID,
|
||||
}
|
||||
}
|
921
pkg/registry/core/service/ipallocator/ipallocator_test.go
Normal file
921
pkg/registry/core/service/ipallocator/ipallocator_test.go
Normal file
@@ -0,0 +1,921 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipallocator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"net"
|
||||
"net/netip"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
k8stesting "k8s.io/client-go/testing"
|
||||
"k8s.io/component-base/metrics/testutil"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
func newTestAllocator(cidr *net.IPNet) (*Allocator, error) {
|
||||
client := fake.NewSimpleClientset()
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0*time.Second)
|
||||
ipInformer := informerFactory.Networking().V1alpha1().IPAddresses()
|
||||
ipStore := ipInformer.Informer().GetIndexer()
|
||||
|
||||
client.PrependReactor("create", "ipaddresses", k8stesting.ReactionFunc(func(action k8stesting.Action) (bool, runtime.Object, error) {
|
||||
ip := action.(k8stesting.CreateAction).GetObject().(*networkingv1alpha1.IPAddress)
|
||||
_, exists, err := ipStore.GetByKey(ip.Name)
|
||||
if exists && err != nil {
|
||||
return false, nil, fmt.Errorf("ip already exist")
|
||||
}
|
||||
ip.Generation = 1
|
||||
err = ipStore.Add(ip)
|
||||
return false, ip, err
|
||||
}))
|
||||
client.PrependReactor("delete", "ipaddresses", k8stesting.ReactionFunc(func(action k8stesting.Action) (bool, runtime.Object, error) {
|
||||
name := action.(k8stesting.DeleteAction).GetName()
|
||||
obj, exists, err := ipStore.GetByKey(name)
|
||||
ip := &networkingv1alpha1.IPAddress{}
|
||||
if exists && err == nil {
|
||||
ip = obj.(*networkingv1alpha1.IPAddress)
|
||||
err = ipStore.Delete(ip)
|
||||
}
|
||||
return false, ip, err
|
||||
}))
|
||||
|
||||
c, err := NewIPAllocator(cidr, client.NetworkingV1alpha1(), ipInformer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.ipAddressSynced = func() bool { return true }
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func TestAllocateIPAllocator(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
cidr string
|
||||
family api.IPFamily
|
||||
free int
|
||||
released string
|
||||
outOfRange []string
|
||||
alreadyAllocated string
|
||||
}{
|
||||
{
|
||||
name: "IPv4",
|
||||
cidr: "192.168.1.0/24",
|
||||
free: 254,
|
||||
released: "192.168.1.5",
|
||||
outOfRange: []string{
|
||||
"192.168.0.1", // not in 192.168.1.0/24
|
||||
"192.168.1.0", // reserved (base address)
|
||||
"192.168.1.255", // reserved (broadcast address)
|
||||
"192.168.2.2", // not in 192.168.1.0/24
|
||||
},
|
||||
alreadyAllocated: "192.168.1.1",
|
||||
},
|
||||
{
|
||||
name: "IPv6",
|
||||
cidr: "2001:db8:1::/116",
|
||||
free: 4095,
|
||||
released: "2001:db8:1::5",
|
||||
outOfRange: []string{
|
||||
"2001:db8::1", // not in 2001:db8:1::/48
|
||||
"2001:db8:1::", // reserved (base address)
|
||||
"2001:db8:2::2", // not in 2001:db8:1::/48
|
||||
},
|
||||
alreadyAllocated: "2001:db8:1::1",
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
_, cidr, err := netutils.ParseCIDRSloppy(tc.cidr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r, err := newTestAllocator(cidr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer r.Destroy()
|
||||
if f := r.Free(); f != tc.free {
|
||||
t.Errorf("[%s] wrong free: expected %d, got %d", tc.name, tc.free, f)
|
||||
}
|
||||
|
||||
if f := r.Used(); f != 0 {
|
||||
t.Errorf("[%s]: wrong used: expected %d, got %d", tc.name, 0, f)
|
||||
}
|
||||
found := sets.NewString()
|
||||
count := 0
|
||||
for r.Free() > 0 {
|
||||
ip, err := r.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("[%s] error @ free: %d used: %d count: %d: %v", tc.name, r.Free(), r.Used(), count, err)
|
||||
}
|
||||
count++
|
||||
//if !cidr.Contains(ip) {
|
||||
// t.Fatalf("[%s] allocated %s which is outside of %s", tc.name, ip, cidr)
|
||||
//}
|
||||
if found.Has(ip.String()) {
|
||||
t.Fatalf("[%s] allocated %s twice @ %d", tc.name, ip, count)
|
||||
}
|
||||
found.Insert(ip.String())
|
||||
}
|
||||
if _, err := r.AllocateNext(); err == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !found.Has(tc.released) {
|
||||
t.Fatalf("not allocated address to be releases %s found %d", tc.released, len(found))
|
||||
}
|
||||
released := netutils.ParseIPSloppy(tc.released)
|
||||
if err := r.Release(released); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if f := r.Free(); f != 1 {
|
||||
t.Errorf("[%s] wrong free: expected %d, got %d", tc.name, 1, f)
|
||||
}
|
||||
if f := r.Used(); f != (tc.free - 1) {
|
||||
t.Errorf("[%s] wrong free: expected %d, got %d", tc.name, tc.free-1, f)
|
||||
}
|
||||
ip, err := r.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !released.Equal(ip) {
|
||||
t.Errorf("[%s] unexpected %s : %s", tc.name, ip, released)
|
||||
}
|
||||
|
||||
if err := r.Release(released); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, outOfRange := range tc.outOfRange {
|
||||
err = r.Allocate(netutils.ParseIPSloppy(outOfRange))
|
||||
if err == nil {
|
||||
t.Fatalf("unexpacted allocating of %s", outOfRange)
|
||||
}
|
||||
}
|
||||
if err := r.Allocate(netutils.ParseIPSloppy(tc.alreadyAllocated)); err == nil {
|
||||
t.Fatalf("unexpected allocation of %s", tc.alreadyAllocated)
|
||||
}
|
||||
if f := r.Free(); f != 1 {
|
||||
t.Errorf("[%s] wrong free: expected %d, got %d", tc.name, 1, f)
|
||||
}
|
||||
if f := r.Used(); f != (tc.free - 1) {
|
||||
t.Errorf("[%s] wrong free: expected %d, got %d", tc.name, tc.free-1, f)
|
||||
}
|
||||
if err := r.Allocate(released); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if f := r.Free(); f != 0 {
|
||||
t.Errorf("[%s] wrong free: expected %d, got %d", tc.name, 0, f)
|
||||
}
|
||||
if f := r.Used(); f != tc.free {
|
||||
t.Errorf("[%s] wrong free: expected %d, got %d", tc.name, tc.free, f)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllocateTinyIPAllocator(t *testing.T) {
|
||||
_, cidr, err := netutils.ParseCIDRSloppy("192.168.1.0/32")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
r, err := newTestAllocator(cidr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer r.Destroy()
|
||||
|
||||
if f := r.Free(); f != 0 {
|
||||
t.Errorf("free: %d", f)
|
||||
}
|
||||
if _, err := r.AllocateNext(); err == nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllocateReservedIPAllocator(t *testing.T) {
|
||||
_, cidr, err := netutils.ParseCIDRSloppy("192.168.1.0/25")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r, err := newTestAllocator(cidr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer r.Destroy()
|
||||
// allocate all addresses on the dynamic block
|
||||
// subnet /25 = 128 ; dynamic block size is min(max(16,128/16),256) = 16
|
||||
dynamicOffset := calculateRangeOffset(cidr)
|
||||
dynamicBlockSize := int(r.size) - dynamicOffset
|
||||
for i := 0; i < dynamicBlockSize; i++ {
|
||||
_, err := r.AllocateNext()
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error trying to allocate: %v", err)
|
||||
}
|
||||
}
|
||||
for i := dynamicOffset; i < int(r.size); i++ {
|
||||
ip := fmt.Sprintf("192.168.1.%d", i+1)
|
||||
if !r.Has(netutils.ParseIPSloppy(ip)) {
|
||||
t.Errorf("IP %s expected to be allocated", ip)
|
||||
}
|
||||
}
|
||||
if f := r.Free(); f != dynamicOffset {
|
||||
t.Errorf("expected %d free addresses, got %d", dynamicOffset, f)
|
||||
}
|
||||
// allocate all addresses on the static block
|
||||
for i := 0; i < dynamicOffset; i++ {
|
||||
ip := fmt.Sprintf("192.168.1.%d", i+1)
|
||||
if err := r.Allocate(netutils.ParseIPSloppy(ip)); err != nil {
|
||||
t.Errorf("Unexpected error trying to allocate IP %s: %v", ip, err)
|
||||
}
|
||||
}
|
||||
if f := r.Free(); f != 0 {
|
||||
t.Errorf("expected free equal to 0 got: %d", f)
|
||||
}
|
||||
// release one address in the allocated block and another a new one randomly
|
||||
if err := r.Release(netutils.ParseIPSloppy("192.168.1.10")); err != nil {
|
||||
t.Fatalf("Unexpected error trying to release ip 192.168.1.10: %v", err)
|
||||
}
|
||||
if _, err := r.AllocateNext(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
if f := r.Free(); f != 0 {
|
||||
t.Errorf("expected free equal to 0 got: %d", f)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAllocateSmallIPAllocator(t *testing.T) {
|
||||
_, cidr, err := netutils.ParseCIDRSloppy("192.168.1.240/30")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r, err := newTestAllocator(cidr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer r.Destroy()
|
||||
|
||||
if f := r.Free(); f != 2 {
|
||||
t.Errorf("expected free equal to 2 got: %d", f)
|
||||
}
|
||||
found := sets.NewString()
|
||||
for i := 0; i < 2; i++ {
|
||||
ip, err := r.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("error allocating %s try %d : %v", ip, i, err)
|
||||
}
|
||||
if found.Has(ip.String()) {
|
||||
t.Fatalf("address %s has been already allocated", ip)
|
||||
}
|
||||
found.Insert(ip.String())
|
||||
}
|
||||
for s := range found {
|
||||
if !r.Has(netutils.ParseIPSloppy(s)) {
|
||||
t.Fatalf("missing: %s", s)
|
||||
}
|
||||
if err := r.Allocate(netutils.ParseIPSloppy(s)); err == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
if f := r.Free(); f != 0 {
|
||||
t.Errorf("expected free equal to 0 got: %d", f)
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
if ip, err := r.AllocateNext(); err == nil {
|
||||
t.Fatalf("suddenly became not-full: %s", ip.String())
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestForEachIPAllocator(t *testing.T) {
|
||||
_, cidr, err := netutils.ParseCIDRSloppy("192.168.1.0/24")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testCases := []sets.String{
|
||||
sets.NewString(),
|
||||
sets.NewString("192.168.1.1"),
|
||||
sets.NewString("192.168.1.1", "192.168.1.254"),
|
||||
sets.NewString("192.168.1.1", "192.168.1.128", "192.168.1.254"),
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
r, err := newTestAllocator(cidr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer r.Destroy()
|
||||
|
||||
for ips := range tc {
|
||||
ip := netutils.ParseIPSloppy(ips)
|
||||
if err := r.Allocate(ip); err != nil {
|
||||
t.Errorf("[%d] error allocating IP %v: %v", i, ip, err)
|
||||
}
|
||||
if !r.Has(ip) {
|
||||
t.Errorf("[%d] expected IP %v allocated", i, ip)
|
||||
}
|
||||
}
|
||||
calls := sets.NewString()
|
||||
r.ForEach(func(ip net.IP) {
|
||||
calls.Insert(ip.String())
|
||||
})
|
||||
if len(calls) != len(tc) {
|
||||
t.Errorf("[%d] expected %d calls, got %d", i, len(tc), len(calls))
|
||||
}
|
||||
if !calls.Equal(tc) {
|
||||
t.Errorf("[%d] expected calls to equal testcase: %v vs %v", i, calls.List(), tc.List())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIPAllocatorClusterIPMetrics(t *testing.T) {
|
||||
clearMetrics()
|
||||
// create IPv4 allocator
|
||||
cidrIPv4 := "10.0.0.0/24"
|
||||
_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy(cidrIPv4)
|
||||
a, err := newTestAllocator(clusterCIDRv4)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
a.EnableMetrics()
|
||||
// create IPv6 allocator
|
||||
cidrIPv6 := "2001:db8::/112"
|
||||
_, clusterCIDRv6, _ := netutils.ParseCIDRSloppy(cidrIPv6)
|
||||
b, err := newTestAllocator(clusterCIDRv6)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating CidrSet: %v", err)
|
||||
}
|
||||
b.EnableMetrics()
|
||||
|
||||
// Check initial state
|
||||
em := testMetrics{
|
||||
free: 0,
|
||||
used: 0,
|
||||
allocated: 0,
|
||||
errors: 0,
|
||||
}
|
||||
expectMetrics(t, cidrIPv4, em)
|
||||
em = testMetrics{
|
||||
free: 0,
|
||||
used: 0,
|
||||
allocated: 0,
|
||||
errors: 0,
|
||||
}
|
||||
expectMetrics(t, cidrIPv6, em)
|
||||
|
||||
// allocate 2 IPv4 addresses
|
||||
found := sets.NewString()
|
||||
for i := 0; i < 2; i++ {
|
||||
ip, err := a.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if found.Has(ip.String()) {
|
||||
t.Fatalf("already reserved: %s", ip)
|
||||
}
|
||||
found.Insert(ip.String())
|
||||
}
|
||||
|
||||
em = testMetrics{
|
||||
free: 252,
|
||||
used: 2,
|
||||
allocated: 2,
|
||||
errors: 0,
|
||||
}
|
||||
expectMetrics(t, cidrIPv4, em)
|
||||
|
||||
// try to allocate the same IP addresses
|
||||
for s := range found {
|
||||
if !a.Has(netutils.ParseIPSloppy(s)) {
|
||||
t.Fatalf("missing: %s", s)
|
||||
}
|
||||
if err := a.Allocate(netutils.ParseIPSloppy(s)); err != ErrAllocated {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
em = testMetrics{
|
||||
free: 252,
|
||||
used: 2,
|
||||
allocated: 2,
|
||||
errors: 2,
|
||||
}
|
||||
expectMetrics(t, cidrIPv4, em)
|
||||
|
||||
// release the addresses allocated
|
||||
for s := range found {
|
||||
if !a.Has(netutils.ParseIPSloppy(s)) {
|
||||
t.Fatalf("missing: %s", s)
|
||||
}
|
||||
if err := a.Release(netutils.ParseIPSloppy(s)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
em = testMetrics{
|
||||
free: 254,
|
||||
used: 0,
|
||||
allocated: 2,
|
||||
errors: 2,
|
||||
}
|
||||
expectMetrics(t, cidrIPv4, em)
|
||||
|
||||
// allocate 264 addresses for each allocator
|
||||
// the full range and 10 more (254 + 10 = 264) for IPv4
|
||||
for i := 0; i < 264; i++ {
|
||||
a.AllocateNext()
|
||||
b.AllocateNext()
|
||||
}
|
||||
em = testMetrics{
|
||||
free: 0,
|
||||
used: 254,
|
||||
allocated: 256, // this is a counter, we already had 2 allocations and we did 254 more
|
||||
errors: 12,
|
||||
}
|
||||
expectMetrics(t, cidrIPv4, em)
|
||||
em = testMetrics{
|
||||
free: 65271, // IPv6 clusterIP range is capped to 2^16 and consider the broadcast address as valid
|
||||
used: 264,
|
||||
allocated: 264,
|
||||
errors: 0,
|
||||
}
|
||||
expectMetrics(t, cidrIPv6, em)
|
||||
}
|
||||
|
||||
func TestIPAllocatorClusterIPAllocatedMetrics(t *testing.T) {
|
||||
clearMetrics()
|
||||
// create IPv4 allocator
|
||||
cidrIPv4 := "10.0.0.0/25"
|
||||
_, clusterCIDRv4, _ := netutils.ParseCIDRSloppy(cidrIPv4)
|
||||
a, err := newTestAllocator(clusterCIDRv4)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
a.EnableMetrics()
|
||||
|
||||
em := testMetrics{
|
||||
free: 0,
|
||||
used: 0,
|
||||
allocated: 0,
|
||||
errors: 0,
|
||||
}
|
||||
expectMetrics(t, cidrIPv4, em)
|
||||
|
||||
// allocate 2 dynamic IPv4 addresses
|
||||
found := sets.NewString()
|
||||
for i := 0; i < 2; i++ {
|
||||
ip, err := a.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if found.Has(ip.String()) {
|
||||
t.Fatalf("already reserved: %s", ip)
|
||||
}
|
||||
found.Insert(ip.String())
|
||||
}
|
||||
|
||||
dynamic_allocated, err := testutil.GetCounterMetricValue(clusterIPAllocations.WithLabelValues(cidrIPv4, "dynamic"))
|
||||
if err != nil {
|
||||
t.Errorf("failed to get %s value, err: %v", clusterIPAllocations.Name, err)
|
||||
}
|
||||
if dynamic_allocated != 2 {
|
||||
t.Fatalf("Expected 2 received %f", dynamic_allocated)
|
||||
}
|
||||
|
||||
// try to allocate the same IP addresses
|
||||
for s := range found {
|
||||
if !a.Has(netutils.ParseIPSloppy(s)) {
|
||||
t.Fatalf("missing: %s", s)
|
||||
}
|
||||
if err := a.Allocate(netutils.ParseIPSloppy(s)); err != ErrAllocated {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
static_errors, err := testutil.GetCounterMetricValue(clusterIPAllocationErrors.WithLabelValues(cidrIPv4, "static"))
|
||||
if err != nil {
|
||||
t.Errorf("failed to get %s value, err: %v", clusterIPAllocationErrors.Name, err)
|
||||
}
|
||||
if static_errors != 2 {
|
||||
t.Fatalf("Expected 2 received %f", dynamic_allocated)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_addOffsetAddress(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
address netip.Addr
|
||||
offset uint64
|
||||
want netip.Addr
|
||||
}{
|
||||
{
|
||||
name: "IPv4 offset 0",
|
||||
address: netip.MustParseAddr("192.168.0.0"),
|
||||
offset: 0,
|
||||
want: netip.MustParseAddr("192.168.0.0"),
|
||||
},
|
||||
{
|
||||
name: "IPv4 offset 0 not nibble boundary",
|
||||
address: netip.MustParseAddr("192.168.0.11"),
|
||||
offset: 0,
|
||||
want: netip.MustParseAddr("192.168.0.11"),
|
||||
},
|
||||
{
|
||||
name: "IPv4 offset 1",
|
||||
address: netip.MustParseAddr("192.168.0.0"),
|
||||
offset: 1,
|
||||
want: netip.MustParseAddr("192.168.0.1"),
|
||||
},
|
||||
{
|
||||
name: "IPv4 offset 1 not nibble boundary",
|
||||
address: netip.MustParseAddr("192.168.0.11"),
|
||||
offset: 1,
|
||||
want: netip.MustParseAddr("192.168.0.12"),
|
||||
},
|
||||
{
|
||||
name: "IPv6 offset 1",
|
||||
address: netip.MustParseAddr("fd00:1:2:3::"),
|
||||
offset: 1,
|
||||
want: netip.MustParseAddr("fd00:1:2:3::1"),
|
||||
},
|
||||
{
|
||||
name: "IPv6 offset 1 not nibble boundary",
|
||||
address: netip.MustParseAddr("fd00:1:2:3::a"),
|
||||
offset: 1,
|
||||
want: netip.MustParseAddr("fd00:1:2:3::b"),
|
||||
},
|
||||
{
|
||||
name: "IPv4 offset last",
|
||||
address: netip.MustParseAddr("192.168.0.0"),
|
||||
offset: 255,
|
||||
want: netip.MustParseAddr("192.168.0.255"),
|
||||
},
|
||||
{
|
||||
name: "IPv6 offset last",
|
||||
address: netip.MustParseAddr("fd00:1:2:3::"),
|
||||
offset: 0x7FFFFFFFFFFFFFFF,
|
||||
want: netip.MustParseAddr("fd00:1:2:3:7FFF:FFFF:FFFF:FFFF"),
|
||||
},
|
||||
{
|
||||
name: "IPv4 offset middle",
|
||||
address: netip.MustParseAddr("192.168.0.0"),
|
||||
offset: 128,
|
||||
want: netip.MustParseAddr("192.168.0.128"),
|
||||
},
|
||||
{
|
||||
name: "IPv6 offset 255",
|
||||
address: netip.MustParseAddr("2001:db8:1::101"),
|
||||
offset: 255,
|
||||
want: netip.MustParseAddr("2001:db8:1::200"),
|
||||
},
|
||||
{
|
||||
name: "IPv6 offset 1025",
|
||||
address: netip.MustParseAddr("fd00:1:2:3::"),
|
||||
offset: 1025,
|
||||
want: netip.MustParseAddr("fd00:1:2:3::401"),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := addOffsetAddress(tt.address, tt.offset)
|
||||
if !reflect.DeepEqual(got, tt.want) || err != nil {
|
||||
t.Errorf("offsetAddress() = %v, want %v", got, tt.want)
|
||||
}
|
||||
// double check to avoid mistakes on the hardcoded values
|
||||
// avoid large numbers or it will timeout the test
|
||||
if tt.offset < 2048 {
|
||||
want := tt.address
|
||||
var i uint64
|
||||
for i = 0; i < tt.offset; i++ {
|
||||
want = want.Next()
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) || err != nil {
|
||||
t.Errorf("offsetAddress() = %v, want %v", got, tt.want)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_broadcastAddress(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
subnet netip.Prefix
|
||||
want netip.Addr
|
||||
}{
|
||||
{
|
||||
name: "ipv4",
|
||||
subnet: netip.MustParsePrefix("192.168.0.0/24"),
|
||||
want: netip.MustParseAddr("192.168.0.255"),
|
||||
},
|
||||
{
|
||||
name: "ipv4 no nibble boundary",
|
||||
subnet: netip.MustParsePrefix("10.0.0.0/12"),
|
||||
want: netip.MustParseAddr("10.15.255.255"),
|
||||
},
|
||||
{
|
||||
name: "ipv6",
|
||||
subnet: netip.MustParsePrefix("fd00:1:2:3::/64"),
|
||||
want: netip.MustParseAddr("fd00:1:2:3:FFFF:FFFF:FFFF:FFFF"),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got, err := broadcastAddress(tt.subnet); !reflect.DeepEqual(got, tt.want) || err != nil {
|
||||
t.Errorf("broadcastAddress() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_hostsPerNetwork(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
cidr string
|
||||
addrs uint64
|
||||
}{
|
||||
{
|
||||
name: "supported IPv4 cidr",
|
||||
cidr: "192.168.1.0/24",
|
||||
addrs: 254,
|
||||
},
|
||||
{
|
||||
name: "single IPv4 host",
|
||||
cidr: "192.168.1.0/32",
|
||||
addrs: 0,
|
||||
},
|
||||
{
|
||||
name: "small IPv4 cidr",
|
||||
cidr: "192.168.1.0/31",
|
||||
addrs: 0,
|
||||
},
|
||||
{
|
||||
name: "very large IPv4 cidr",
|
||||
cidr: "0.0.0.0/1",
|
||||
addrs: math.MaxInt32 - 1,
|
||||
},
|
||||
{
|
||||
name: "full IPv4 range",
|
||||
cidr: "0.0.0.0/0",
|
||||
addrs: math.MaxUint32 - 1,
|
||||
},
|
||||
{
|
||||
name: "supported IPv6 cidr",
|
||||
cidr: "2001:db2::/112",
|
||||
addrs: 65535,
|
||||
},
|
||||
{
|
||||
name: "single IPv6 host",
|
||||
cidr: "2001:db8::/128",
|
||||
addrs: 0,
|
||||
},
|
||||
{
|
||||
name: "small IPv6 cidr",
|
||||
cidr: "2001:db8::/127",
|
||||
addrs: 1,
|
||||
},
|
||||
{
|
||||
name: "largest IPv6 for Int64",
|
||||
cidr: "2001:db8::/65",
|
||||
addrs: math.MaxInt64,
|
||||
},
|
||||
{
|
||||
name: "largest IPv6 for Uint64",
|
||||
cidr: "2001:db8::/64",
|
||||
addrs: math.MaxUint64,
|
||||
},
|
||||
{
|
||||
name: "very large IPv6 cidr",
|
||||
cidr: "2001:db8::/1",
|
||||
addrs: math.MaxUint64,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
_, cidr, err := netutils.ParseCIDRSloppy(tc.cidr)
|
||||
if err != nil {
|
||||
t.Errorf("failed to parse cidr for test %s, unexpected error: '%s'", tc.name, err)
|
||||
}
|
||||
if size := hostsPerNetwork(cidr); size != tc.addrs {
|
||||
t.Errorf("test %s failed. %s should have a range size of %d, got %d",
|
||||
tc.name, tc.cidr, tc.addrs, size)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ipIterator(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
first netip.Addr
|
||||
last netip.Addr
|
||||
offset uint64
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "start from first address small range",
|
||||
first: netip.MustParseAddr("192.168.0.1"),
|
||||
last: netip.MustParseAddr("192.168.0.2"),
|
||||
offset: 0,
|
||||
want: []string{"192.168.0.1", "192.168.0.2"},
|
||||
}, {
|
||||
name: "start from last address small range",
|
||||
first: netip.MustParseAddr("192.168.0.1"),
|
||||
last: netip.MustParseAddr("192.168.0.2"),
|
||||
offset: 1,
|
||||
want: []string{"192.168.0.2", "192.168.0.1"},
|
||||
}, {
|
||||
name: "start from offset out of range address small range",
|
||||
first: netip.MustParseAddr("192.168.0.1"),
|
||||
last: netip.MustParseAddr("192.168.0.2"),
|
||||
offset: 10,
|
||||
want: []string{"192.168.0.1", "192.168.0.2"},
|
||||
}, {
|
||||
name: "start from first address",
|
||||
first: netip.MustParseAddr("192.168.0.1"),
|
||||
last: netip.MustParseAddr("192.168.0.7"),
|
||||
offset: 0,
|
||||
want: []string{"192.168.0.1", "192.168.0.2", "192.168.0.3", "192.168.0.4", "192.168.0.5", "192.168.0.6", "192.168.0.7"},
|
||||
}, {
|
||||
name: "start from middle address",
|
||||
first: netip.MustParseAddr("192.168.0.1"),
|
||||
last: netip.MustParseAddr("192.168.0.7"),
|
||||
offset: 2,
|
||||
want: []string{"192.168.0.3", "192.168.0.4", "192.168.0.5", "192.168.0.6", "192.168.0.7", "192.168.0.1", "192.168.0.2"},
|
||||
}, {
|
||||
name: "start from last address",
|
||||
first: netip.MustParseAddr("192.168.0.1"),
|
||||
last: netip.MustParseAddr("192.168.0.7"),
|
||||
offset: 6,
|
||||
want: []string{"192.168.0.7", "192.168.0.1", "192.168.0.2", "192.168.0.3", "192.168.0.4", "192.168.0.5", "192.168.0.6"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := []string{}
|
||||
iterator := ipIterator(tt.first, tt.last, tt.offset)
|
||||
|
||||
for {
|
||||
ip := iterator()
|
||||
if !ip.IsValid() {
|
||||
break
|
||||
}
|
||||
got = append(got, ip.String())
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("ipIterator() = %v, want %v", got, tt.want)
|
||||
}
|
||||
// check the iterator is fully stopped
|
||||
for i := 0; i < 5; i++ {
|
||||
if ip := iterator(); ip.IsValid() {
|
||||
t.Errorf("iterator should not return more addresses: %v", ip)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ipIterator_Number(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
first netip.Addr
|
||||
last netip.Addr
|
||||
offset uint64
|
||||
want uint64
|
||||
}{
|
||||
{
|
||||
name: "start from first address small range",
|
||||
first: netip.MustParseAddr("192.168.0.1"),
|
||||
last: netip.MustParseAddr("192.168.0.2"),
|
||||
offset: 0,
|
||||
want: 2,
|
||||
}, {
|
||||
name: "start from last address small range",
|
||||
first: netip.MustParseAddr("192.168.0.1"),
|
||||
last: netip.MustParseAddr("192.168.0.2"),
|
||||
offset: 1,
|
||||
want: 2,
|
||||
}, {
|
||||
name: "start from offset out of range small range",
|
||||
first: netip.MustParseAddr("192.168.0.1"),
|
||||
last: netip.MustParseAddr("192.168.0.2"),
|
||||
offset: 10,
|
||||
want: 2,
|
||||
}, {
|
||||
name: "start from first address",
|
||||
first: netip.MustParseAddr("192.168.0.1"),
|
||||
last: netip.MustParseAddr("192.168.0.7"),
|
||||
offset: 0,
|
||||
want: 7,
|
||||
}, {
|
||||
name: "start from middle address",
|
||||
first: netip.MustParseAddr("192.168.0.1"),
|
||||
last: netip.MustParseAddr("192.168.0.7"),
|
||||
offset: 2,
|
||||
want: 7,
|
||||
}, {
|
||||
name: "start from last address",
|
||||
first: netip.MustParseAddr("192.168.0.1"),
|
||||
last: netip.MustParseAddr("192.168.0.7"),
|
||||
offset: 6,
|
||||
want: 7,
|
||||
}, {
|
||||
name: "start from first address large range",
|
||||
first: netip.MustParseAddr("2001:db8:1::101"),
|
||||
last: netip.MustParseAddr("2001:db8:1::fff"),
|
||||
offset: 0,
|
||||
want: 3839,
|
||||
}, {
|
||||
name: "start from address in the middle",
|
||||
first: netip.MustParseAddr("2001:db8:1::101"),
|
||||
last: netip.MustParseAddr("2001:db8:1::fff"),
|
||||
offset: 255,
|
||||
want: 3839,
|
||||
}, {
|
||||
name: "start from last address",
|
||||
first: netip.MustParseAddr("2001:db8:1::101"),
|
||||
last: netip.MustParseAddr("2001:db8:1::fff"),
|
||||
offset: 3838,
|
||||
want: 3839,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var got uint64
|
||||
iterator := ipIterator(tt.first, tt.last, tt.offset)
|
||||
|
||||
for {
|
||||
ip := iterator()
|
||||
if !ip.IsValid() {
|
||||
break
|
||||
}
|
||||
got++
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("ipIterator() = %d, want %d", got, tt.want)
|
||||
}
|
||||
// check the iterator is fully stopped
|
||||
for i := 0; i < 5; i++ {
|
||||
if ip := iterator(); ip.IsValid() {
|
||||
t.Errorf("iterator should not return more addresses: %v", ip)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIPAllocatorAllocateNextIPv4Size1048574(b *testing.B) {
|
||||
_, cidr, err := netutils.ParseCIDRSloppy("10.0.0.0/12")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
r, err := newTestAllocator(cidr)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer r.Destroy()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
r.AllocateNext()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkIPAllocatorAllocateNextIPv6Size65535(b *testing.B) {
|
||||
_, cidr, err := netutils.ParseCIDRSloppy("fd00::/120")
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
r, err := newTestAllocator(cidr)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
defer r.Destroy()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
r.AllocateNext()
|
||||
}
|
||||
}
|
@@ -18,14 +18,17 @@ package storage
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/klog/v2"
|
||||
apiservice "k8s.io/kubernetes/pkg/api/service"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/core/validation"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/portallocator"
|
||||
netutils "k8s.io/utils/net"
|
||||
@@ -399,7 +402,18 @@ func (al *Allocators) allocIPs(service *api.Service, toAlloc map[api.IPFamily]st
|
||||
allocator = allocator.DryRun()
|
||||
}
|
||||
if ip == "" {
|
||||
allocatedIP, err := allocator.AllocateNext()
|
||||
var allocatedIP net.IP
|
||||
var err error
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRServiceAllocator) {
|
||||
svcAllocator, ok := allocator.(*ipallocator.Allocator)
|
||||
if ok {
|
||||
allocatedIP, err = svcAllocator.AllocateNextService(service)
|
||||
} else {
|
||||
allocatedIP, err = allocator.AllocateNext()
|
||||
}
|
||||
} else {
|
||||
allocatedIP, err = allocator.AllocateNext()
|
||||
}
|
||||
if err != nil {
|
||||
return allocated, errors.NewInternalError(fmt.Errorf("failed to allocate a serviceIP: %v", err))
|
||||
}
|
||||
@@ -409,7 +423,18 @@ func (al *Allocators) allocIPs(service *api.Service, toAlloc map[api.IPFamily]st
|
||||
if parsedIP == nil {
|
||||
return allocated, errors.NewInternalError(fmt.Errorf("failed to parse service IP %q", ip))
|
||||
}
|
||||
if err := allocator.Allocate(parsedIP); err != nil {
|
||||
var err error
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRServiceAllocator) {
|
||||
svcAllocator, ok := allocator.(*ipallocator.Allocator)
|
||||
if ok {
|
||||
err = svcAllocator.AllocateService(service, parsedIP)
|
||||
} else {
|
||||
err = allocator.Allocate(parsedIP)
|
||||
}
|
||||
} else {
|
||||
err = allocator.Allocate(parsedIP)
|
||||
}
|
||||
if err != nil {
|
||||
el := field.ErrorList{field.Invalid(field.NewPath("spec", "clusterIPs"), service.Spec.ClusterIPs, fmt.Sprintf("failed to allocate IP %v: %v", ip, err))}
|
||||
return allocated, errors.NewInvalid(api.Kind("Service"), service.Name, el)
|
||||
}
|
||||
|
17
pkg/registry/networking/ipaddress/doc.go
Normal file
17
pkg/registry/networking/ipaddress/doc.go
Normal file
@@ -0,0 +1,17 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipaddress // import "k8s.io/kubernetes/pkg/registry/networking/ipaddress"
|
63
pkg/registry/networking/ipaddress/storage/storage.go
Normal file
63
pkg/registry/networking/ipaddress/storage/storage.go
Normal file
@@ -0,0 +1,63 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/kubernetes/pkg/apis/networking"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
|
||||
printerstorage "k8s.io/kubernetes/pkg/printers/storage"
|
||||
"k8s.io/kubernetes/pkg/registry/networking/ipaddress"
|
||||
)
|
||||
|
||||
// REST implements a RESTStorage for IPAddress against etcd
|
||||
type REST struct {
|
||||
*genericregistry.Store
|
||||
}
|
||||
|
||||
// NewREST returns a RESTStorage object that will work against endpoint slices.
|
||||
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, error) {
|
||||
store := &genericregistry.Store{
|
||||
NewFunc: func() runtime.Object { return &networking.IPAddress{} },
|
||||
NewListFunc: func() runtime.Object { return &networking.IPAddressList{} },
|
||||
DefaultQualifiedResource: networking.Resource("ipaddresses"),
|
||||
SingularQualifiedResource: networking.Resource("ipaddress"),
|
||||
|
||||
CreateStrategy: ipaddress.Strategy,
|
||||
UpdateStrategy: ipaddress.Strategy,
|
||||
DeleteStrategy: ipaddress.Strategy,
|
||||
|
||||
TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)},
|
||||
}
|
||||
options := &generic.StoreOptions{RESTOptions: optsGetter}
|
||||
if err := store.CompleteWithOptions(options); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &REST{store}, nil
|
||||
}
|
||||
|
||||
// Implement ShortNamesProvider.
|
||||
var _ rest.ShortNamesProvider = &REST{}
|
||||
|
||||
// ShortNames implements the ShortNamesProvider interface. Returns a list of short names for a resource.
|
||||
func (r *REST) ShortNames() []string {
|
||||
return []string{"ip"}
|
||||
}
|
110
pkg/registry/networking/ipaddress/strategy.go
Normal file
110
pkg/registry/networking/ipaddress/strategy.go
Normal file
@@ -0,0 +1,110 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipaddress
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/networking"
|
||||
"k8s.io/kubernetes/pkg/apis/networking/validation"
|
||||
)
|
||||
|
||||
// ipAddressStrategy implements verification logic for Replication.
|
||||
type ipAddressStrategy struct {
|
||||
runtime.ObjectTyper
|
||||
names.NameGenerator
|
||||
}
|
||||
|
||||
// noopNameGenerator does not generate names, it just returns the base.
|
||||
type noopNameGenerator struct{}
|
||||
|
||||
func (noopNameGenerator) GenerateName(base string) string {
|
||||
return base
|
||||
}
|
||||
|
||||
// Strategy is the default logic that applies when creating and updating Replication IPAddress objects.
|
||||
var Strategy = ipAddressStrategy{legacyscheme.Scheme, noopNameGenerator{}}
|
||||
|
||||
// Strategy should implement rest.RESTCreateStrategy
|
||||
var _ rest.RESTCreateStrategy = Strategy
|
||||
|
||||
// Strategy should implement rest.RESTUpdateStrategy
|
||||
var _ rest.RESTUpdateStrategy = Strategy
|
||||
|
||||
// NamespaceScoped returns false because all IPAddresses is cluster scoped.
|
||||
func (ipAddressStrategy) NamespaceScoped() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// PrepareForCreate clears the status of an IPAddress before creation.
|
||||
func (ipAddressStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
||||
_ = obj.(*networking.IPAddress)
|
||||
|
||||
}
|
||||
|
||||
// PrepareForUpdate clears fields that are not allowed to be set by end users on update.
|
||||
func (ipAddressStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
||||
newIPAddress := obj.(*networking.IPAddress)
|
||||
oldIPAddress := old.(*networking.IPAddress)
|
||||
|
||||
_, _ = newIPAddress, oldIPAddress
|
||||
}
|
||||
|
||||
// Validate validates a new IPAddress.
|
||||
func (ipAddressStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||
ipAddress := obj.(*networking.IPAddress)
|
||||
err := validation.ValidateIPAddress(ipAddress)
|
||||
return err
|
||||
}
|
||||
|
||||
// Canonicalize normalizes the object after validation.
|
||||
func (ipAddressStrategy) Canonicalize(obj runtime.Object) {
|
||||
}
|
||||
|
||||
// AllowCreateOnUpdate is false for IPAddress; this means POST is needed to create one.
|
||||
func (ipAddressStrategy) AllowCreateOnUpdate() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidateUpdate is the default update validation for an end user.
|
||||
func (ipAddressStrategy) ValidateUpdate(ctx context.Context, new, old runtime.Object) field.ErrorList {
|
||||
newIPAddress := new.(*networking.IPAddress)
|
||||
oldIPAddress := old.(*networking.IPAddress)
|
||||
errList := validation.ValidateIPAddress(newIPAddress)
|
||||
errList = append(errList, validation.ValidateIPAddressUpdate(newIPAddress, oldIPAddress)...)
|
||||
return errList
|
||||
}
|
||||
|
||||
// AllowUnconditionalUpdate is the default update policy for IPAddress objects.
|
||||
func (ipAddressStrategy) AllowUnconditionalUpdate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// WarningsOnCreate returns warnings for the creation of the given object.
|
||||
func (ipAddressStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WarningsOnUpdate returns warnings for the given update.
|
||||
func (ipAddressStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
|
||||
return nil
|
||||
}
|
17
pkg/registry/networking/ipaddress/strategy_test.go
Normal file
17
pkg/registry/networking/ipaddress/strategy_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipaddress
|
@@ -28,6 +28,7 @@ import (
|
||||
clustercidrstore "k8s.io/kubernetes/pkg/registry/networking/clustercidr/storage"
|
||||
ingressstore "k8s.io/kubernetes/pkg/registry/networking/ingress/storage"
|
||||
ingressclassstore "k8s.io/kubernetes/pkg/registry/networking/ingressclass/storage"
|
||||
ipaddressstore "k8s.io/kubernetes/pkg/registry/networking/ipaddress/storage"
|
||||
networkpolicystore "k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage"
|
||||
)
|
||||
|
||||
@@ -99,6 +100,14 @@ func (p RESTStorageProvider) v1alpha1Storage(apiResourceConfigSource serverstora
|
||||
storage[resource] = clusterCIDRCStorage
|
||||
}
|
||||
|
||||
// ipaddress
|
||||
if resource := "ipaddresses"; apiResourceConfigSource.ResourceEnabled(networkingapiv1alpha1.SchemeGroupVersion.WithResource(resource)) {
|
||||
ipAddressStorage, err := ipaddressstore.NewREST(restOptionsGetter)
|
||||
if err != nil {
|
||||
return storage, err
|
||||
}
|
||||
storage[resource] = ipAddressStorage
|
||||
}
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user