Merge pull request #109090 from sarveshr7/multicidr-rangeallocator
Enhance NodeIPAM to support multiple ClusterCIDRs
This commit is contained in:
@@ -370,3 +370,62 @@ func ScopedResourceSelectorRequirementsAsSelector(ssr v1.ScopedResourceSelectorR
|
||||
selector = selector.Add(*r)
|
||||
return selector, nil
|
||||
}
|
||||
|
||||
// nodeSelectorRequirementsAsLabelRequirements converts the NodeSelectorRequirement
|
||||
// type to a labels.Requirement type.
|
||||
func nodeSelectorRequirementsAsLabelRequirements(nsr v1.NodeSelectorRequirement) (*labels.Requirement, error) {
|
||||
var op selection.Operator
|
||||
switch nsr.Operator {
|
||||
case v1.NodeSelectorOpIn:
|
||||
op = selection.In
|
||||
case v1.NodeSelectorOpNotIn:
|
||||
op = selection.NotIn
|
||||
case v1.NodeSelectorOpExists:
|
||||
op = selection.Exists
|
||||
case v1.NodeSelectorOpDoesNotExist:
|
||||
op = selection.DoesNotExist
|
||||
case v1.NodeSelectorOpGt:
|
||||
op = selection.GreaterThan
|
||||
case v1.NodeSelectorOpLt:
|
||||
op = selection.LessThan
|
||||
default:
|
||||
return nil, fmt.Errorf("%q is not a valid node selector operator", nsr.Operator)
|
||||
}
|
||||
return labels.NewRequirement(nsr.Key, op, nsr.Values)
|
||||
}
|
||||
|
||||
// NodeSelectorAsSelector converts the NodeSelector api type into a struct that
|
||||
// implements labels.Selector
|
||||
// Note: This function should be kept in sync with the selector methods in
|
||||
// pkg/labels/selector.go
|
||||
func NodeSelectorAsSelector(ns *v1.NodeSelector) (labels.Selector, error) {
|
||||
if ns == nil {
|
||||
return labels.Nothing(), nil
|
||||
}
|
||||
if len(ns.NodeSelectorTerms) == 0 {
|
||||
return labels.Everything(), nil
|
||||
}
|
||||
var requirements []labels.Requirement
|
||||
|
||||
for _, nsTerm := range ns.NodeSelectorTerms {
|
||||
for _, expr := range nsTerm.MatchExpressions {
|
||||
req, err := nodeSelectorRequirementsAsLabelRequirements(expr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
requirements = append(requirements, *req)
|
||||
}
|
||||
|
||||
for _, field := range nsTerm.MatchFields {
|
||||
req, err := nodeSelectorRequirementsAsLabelRequirements(field)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
requirements = append(requirements, *req)
|
||||
}
|
||||
}
|
||||
|
||||
selector := labels.NewSelector()
|
||||
selector = selector.Add(requirements...)
|
||||
return selector, nil
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/networking"
|
||||
"k8s.io/kubernetes/pkg/apis/networking/v1"
|
||||
"k8s.io/kubernetes/pkg/apis/networking/v1alpha1"
|
||||
"k8s.io/kubernetes/pkg/apis/networking/v1beta1"
|
||||
)
|
||||
|
||||
@@ -36,5 +37,6 @@ func Install(scheme *runtime.Scheme) {
|
||||
utilruntime.Must(networking.AddToScheme(scheme))
|
||||
utilruntime.Must(v1.AddToScheme(scheme))
|
||||
utilruntime.Must(v1beta1.AddToScheme(scheme))
|
||||
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion))
|
||||
utilruntime.Must(v1alpha1.AddToScheme(scheme))
|
||||
utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion, v1alpha1.SchemeGroupVersion))
|
||||
}
|
||||
|
||||
@@ -52,6 +52,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
&IngressList{},
|
||||
&IngressClass{},
|
||||
&IngressClassList{},
|
||||
&ClusterCIDR{},
|
||||
&ClusterCIDRList{},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -583,3 +583,67 @@ type ServiceBackendPort struct {
|
||||
// +optional
|
||||
Number int32
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ClusterCIDR represents a single configuration for per-Node Pod CIDR
|
||||
// allocations when the MultiCIDRRangeAllocator is enabled (see the config for
|
||||
// kube-controller-manager). A cluster may have any number of ClusterCIDR
|
||||
// resources, all of which will be considered when allocating a CIDR for a
|
||||
// Node. A ClusterCIDR is eligible to be used for a given Node when the node
|
||||
// selector matches the node in question and has free CIDRs to allocate. In
|
||||
// case of multiple matching ClusterCIDR resources, the allocator will attempt
|
||||
// to break ties using internal heuristics, but any ClusterCIDR whose node
|
||||
// selector matches the Node may be used.
|
||||
type ClusterCIDR struct {
|
||||
metav1.TypeMeta
|
||||
metav1.ObjectMeta
|
||||
|
||||
Spec ClusterCIDRSpec
|
||||
}
|
||||
|
||||
// ClusterCIDRSpec defines the desired state of ClusterCIDR.
|
||||
type ClusterCIDRSpec struct {
|
||||
// NodeSelector defines which nodes the config is applicable to.
|
||||
// An empty or nil NodeSelector selects all nodes.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
NodeSelector *api.NodeSelector
|
||||
|
||||
// PerNodeHostBits defines the number of host bits to be configured per node.
|
||||
// A subnet mask determines how much of the address is used for network bits
|
||||
// and host bits. For example an IPv4 address of 192.168.0.0/24, splits the
|
||||
// address into 24 bits for the network portion and 8 bits for the host portion.
|
||||
// To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6).
|
||||
// Minimum value is 4 (16 IPs).
|
||||
// This field is immutable.
|
||||
// +required
|
||||
PerNodeHostBits int32
|
||||
|
||||
// IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8").
|
||||
// At least one of IPv4 and IPv6 must be specified.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
IPv4 string
|
||||
|
||||
// IPv6 defines an IPv6 IP block in CIDR notation(e.g. "fd12:3456:789a:1::/64").
|
||||
// At least one of IPv4 and IPv6 must be specified.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
IPv6 string
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ClusterCIDRList contains a list of ClusterCIDRs.
|
||||
type ClusterCIDRList struct {
|
||||
metav1.TypeMeta
|
||||
|
||||
// +optional
|
||||
metav1.ListMeta
|
||||
|
||||
// Items is the list of ClusterCIDRs.
|
||||
Items []ClusterCIDR
|
||||
}
|
||||
|
||||
25
pkg/apis/networking/v1alpha1/defaults.go
Normal file
25
pkg/apis/networking/v1alpha1/defaults.go
Normal file
@@ -0,0 +1,25 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func addDefaultingFuncs(scheme *runtime.Scheme) error {
|
||||
return RegisterDefaults(scheme)
|
||||
}
|
||||
23
pkg/apis/networking/v1alpha1/doc.go
Normal file
23
pkg/apis/networking/v1alpha1/doc.go
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/networking
|
||||
// +k8s:conversion-gen-external-types=k8s.io/api/networking/v1alpha1
|
||||
// +k8s:defaulter-gen=TypeMeta
|
||||
// +k8s:defaulter-gen-input=k8s.io/api/networking/v1alpha1
|
||||
// +groupName=networking.k8s.io
|
||||
|
||||
package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/networking/v1alpha1"
|
||||
45
pkg/apis/networking/v1alpha1/register.go
Normal file
45
pkg/apis/networking/v1alpha1/register.go
Normal file
@@ -0,0 +1,45 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// GroupName is the group name use in this package.
|
||||
const GroupName = "networking.k8s.io"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects.
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource.
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
var (
|
||||
localSchemeBuilder = &networkingv1alpha1.SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addDefaultingFuncs)
|
||||
}
|
||||
147
pkg/apis/networking/v1alpha1/zz_generated.conversion.go
generated
Normal file
147
pkg/apis/networking/v1alpha1/zz_generated.conversion.go
generated
Normal file
@@ -0,0 +1,147 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by conversion-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
unsafe "unsafe"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
v1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
core "k8s.io/kubernetes/pkg/apis/core"
|
||||
networking "k8s.io/kubernetes/pkg/apis/networking"
|
||||
)
|
||||
|
||||
func init() {
|
||||
localSchemeBuilder.Register(RegisterConversions)
|
||||
}
|
||||
|
||||
// RegisterConversions adds conversion functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
func RegisterConversions(s *runtime.Scheme) error {
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha1.ClusterCIDR)(nil), (*networking.ClusterCIDR)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(a.(*v1alpha1.ClusterCIDR), b.(*networking.ClusterCIDR), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*networking.ClusterCIDR)(nil), (*v1alpha1.ClusterCIDR)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(a.(*networking.ClusterCIDR), b.(*v1alpha1.ClusterCIDR), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha1.ClusterCIDRList)(nil), (*networking.ClusterCIDRList)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(a.(*v1alpha1.ClusterCIDRList), b.(*networking.ClusterCIDRList), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*networking.ClusterCIDRList)(nil), (*v1alpha1.ClusterCIDRList)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(a.(*networking.ClusterCIDRList), b.(*v1alpha1.ClusterCIDRList), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha1.ClusterCIDRSpec)(nil), (*networking.ClusterCIDRSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(a.(*v1alpha1.ClusterCIDRSpec), b.(*networking.ClusterCIDRSpec), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*networking.ClusterCIDRSpec)(nil), (*v1alpha1.ClusterCIDRSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(a.(*networking.ClusterCIDRSpec), b.(*v1alpha1.ClusterCIDRSpec), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(in *v1alpha1.ClusterCIDR, out *networking.ClusterCIDR, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(in *v1alpha1.ClusterCIDR, out *networking.ClusterCIDR, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(in *networking.ClusterCIDR, out *v1alpha1.ClusterCIDR, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR is an autogenerated conversion function.
|
||||
func Convert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(in *networking.ClusterCIDR, out *v1alpha1.ClusterCIDR, s conversion.Scope) error {
|
||||
return autoConvert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(in *v1alpha1.ClusterCIDRList, out *networking.ClusterCIDRList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]networking.ClusterCIDR)(unsafe.Pointer(&in.Items))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(in *v1alpha1.ClusterCIDRList, out *networking.ClusterCIDRList, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(in *networking.ClusterCIDRList, out *v1alpha1.ClusterCIDRList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]v1alpha1.ClusterCIDR)(unsafe.Pointer(&in.Items))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList is an autogenerated conversion function.
|
||||
func Convert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(in *networking.ClusterCIDRList, out *v1alpha1.ClusterCIDRList, s conversion.Scope) error {
|
||||
return autoConvert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(in *v1alpha1.ClusterCIDRSpec, out *networking.ClusterCIDRSpec, s conversion.Scope) error {
|
||||
out.NodeSelector = (*core.NodeSelector)(unsafe.Pointer(in.NodeSelector))
|
||||
out.PerNodeHostBits = in.PerNodeHostBits
|
||||
out.IPv4 = in.IPv4
|
||||
out.IPv6 = in.IPv6
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(in *v1alpha1.ClusterCIDRSpec, out *networking.ClusterCIDRSpec, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(in *networking.ClusterCIDRSpec, out *v1alpha1.ClusterCIDRSpec, s conversion.Scope) error {
|
||||
out.NodeSelector = (*v1.NodeSelector)(unsafe.Pointer(in.NodeSelector))
|
||||
out.PerNodeHostBits = in.PerNodeHostBits
|
||||
out.IPv4 = in.IPv4
|
||||
out.IPv6 = in.IPv6
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec is an autogenerated conversion function.
|
||||
func Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(in *networking.ClusterCIDRSpec, out *v1alpha1.ClusterCIDRSpec, s conversion.Scope) error {
|
||||
return autoConvert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(in, out, s)
|
||||
}
|
||||
33
pkg/apis/networking/v1alpha1/zz_generated.defaults.go
generated
Normal file
33
pkg/apis/networking/v1alpha1/zz_generated.defaults.go
generated
Normal file
@@ -0,0 +1,33 @@
|
||||
//go:build !ignore_autogenerated
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by defaulter-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// RegisterDefaults adds defaulters functions to the given scheme.
|
||||
// Public to allow building arbitrary schemes.
|
||||
// All generated defaulters are covering - they call all nested defaulters.
|
||||
func RegisterDefaults(scheme *runtime.Scheme) error {
|
||||
return nil
|
||||
}
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
|
||||
pathvalidation "k8s.io/apimachinery/pkg/api/validation/path"
|
||||
unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation"
|
||||
@@ -602,3 +603,89 @@ func allowInvalidWildcardHostRule(oldIngress *networking.Ingress) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidateClusterCIDRName validates that the given name can be used as an
|
||||
// ClusterCIDR name.
|
||||
var ValidateClusterCIDRName = apimachineryvalidation.NameIsDNSLabel
|
||||
|
||||
// ValidateClusterCIDR validates a ClusterCIDR.
|
||||
func ValidateClusterCIDR(cc *networking.ClusterCIDR) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMeta(&cc.ObjectMeta, false, ValidateClusterCIDRName, field.NewPath("metadata"))
|
||||
allErrs = append(allErrs, ValidateClusterCIDRSpec(&cc.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateClusterCIDRSpec validates ClusterCIDR Spec.
|
||||
func ValidateClusterCIDRSpec(spec *networking.ClusterCIDRSpec, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
if spec.NodeSelector != nil {
|
||||
allErrs = append(allErrs, apivalidation.ValidateNodeSelector(spec.NodeSelector, fldPath.Child("nodeSelector"))...)
|
||||
}
|
||||
|
||||
// Validate if CIDR is specified for at least one IP Family(IPv4/IPv6).
|
||||
if spec.IPv4 == "" && spec.IPv6 == "" {
|
||||
allErrs = append(allErrs, field.Required(fldPath, "one or both of `ipv4` and `ipv6` must be specified"))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// Validate specified IPv4 CIDR and PerNodeHostBits.
|
||||
if spec.IPv4 != "" {
|
||||
allErrs = append(allErrs, validateCIDRConfig(spec.IPv4, spec.PerNodeHostBits, 32, v1.IPv4Protocol, fldPath)...)
|
||||
}
|
||||
|
||||
// Validate specified IPv6 CIDR and PerNodeHostBits.
|
||||
if spec.IPv6 != "" {
|
||||
allErrs = append(allErrs, validateCIDRConfig(spec.IPv6, spec.PerNodeHostBits, 128, v1.IPv6Protocol, fldPath)...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateCIDRConfig(configCIDR string, perNodeHostBits, maxMaskSize int32, ipFamily v1.IPFamily, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
minPerNodeHostBits := int32(4)
|
||||
|
||||
ip, ipNet, err := netutils.ParseCIDRSloppy(configCIDR)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child(string(ipFamily)), configCIDR, fmt.Sprintf("must be a valid CIDR: %s", configCIDR)))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
if ipFamily == v1.IPv4Protocol && !netutils.IsIPv4(ip) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child(string(ipFamily)), configCIDR, "must be a valid IPv4 CIDR"))
|
||||
}
|
||||
if ipFamily == v1.IPv6Protocol && !netutils.IsIPv6(ip) {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child(string(ipFamily)), configCIDR, "must be a valid IPv6 CIDR"))
|
||||
}
|
||||
|
||||
// Validate PerNodeHostBits
|
||||
maskSize, _ := ipNet.Mask.Size()
|
||||
maxPerNodeHostBits := maxMaskSize - int32(maskSize)
|
||||
|
||||
if perNodeHostBits < minPerNodeHostBits {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("perNodeHostBits"), perNodeHostBits, fmt.Sprintf("must be greater than or equal to %d", minPerNodeHostBits)))
|
||||
}
|
||||
if perNodeHostBits > maxPerNodeHostBits {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath.Child("perNodeHostBits"), perNodeHostBits, fmt.Sprintf("must be less than or equal to %d", maxPerNodeHostBits)))
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateClusterCIDRUpdate tests if an update to a ClusterCIDR is valid.
|
||||
func ValidateClusterCIDRUpdate(update, old *networking.ClusterCIDR) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...)
|
||||
allErrs = append(allErrs, validateClusterCIDRUpdateSpec(&update.Spec, &old.Spec, field.NewPath("spec"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateClusterCIDRUpdateSpec(update, old *networking.ClusterCIDRSpec, fldPath *field.Path) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
|
||||
allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.NodeSelector, old.NodeSelector, fldPath.Child("nodeSelector"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.PerNodeHostBits, old.PerNodeHostBits, fldPath.Child("perNodeHostBits"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.IPv4, old.IPv4, fldPath.Child("ipv4"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.IPv6, old.IPv6, fldPath.Child("ipv6"))...)
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
@@ -1982,3 +1982,216 @@ func TestValidateIngressStatusUpdate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeNodeSelector(key string, op api.NodeSelectorOperator, values []string) *api.NodeSelector {
|
||||
return &api.NodeSelector{
|
||||
NodeSelectorTerms: []api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: key,
|
||||
Operator: op,
|
||||
Values: values,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func makeClusterCIDR(perNodeHostBits int32, ipv4, ipv6 string, nodeSelector *api.NodeSelector) *networking.ClusterCIDR {
|
||||
return &networking.ClusterCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
ResourceVersion: "9",
|
||||
},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: perNodeHostBits,
|
||||
IPv4: ipv4,
|
||||
IPv6: ipv6,
|
||||
NodeSelector: nodeSelector,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateClusterCIDR(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
cc *networking.ClusterCIDR
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid SingleStack IPv4 ClusterCIDR",
|
||||
cc: makeClusterCIDR(8, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid SingleStack IPv4 ClusterCIDR, perNodeHostBits = maxPerNodeHostBits",
|
||||
cc: makeClusterCIDR(16, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid SingleStack IPv4 ClusterCIDR, perNodeHostBits > minPerNodeHostBits",
|
||||
cc: makeClusterCIDR(4, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid SingleStack IPv6 ClusterCIDR",
|
||||
cc: makeClusterCIDR(8, "", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid SingleStack IPv6 ClusterCIDR, perNodeHostBits = maxPerNodeHostBit",
|
||||
cc: makeClusterCIDR(64, "", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid SingleStack IPv6 ClusterCIDR, perNodeHostBits > minPerNodeHostBit",
|
||||
cc: makeClusterCIDR(4, "", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid SingleStack IPv6 ClusterCIDR perNodeHostBits=100",
|
||||
cc: makeClusterCIDR(100, "", "fd00:1:1::/16", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid DualStack ClusterCIDR",
|
||||
cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "valid DualStack ClusterCIDR, no NodeSelector",
|
||||
cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", nil),
|
||||
expectErr: false,
|
||||
},
|
||||
// Failure cases.
|
||||
{
|
||||
name: "invalid ClusterCIDR, no IPv4 or IPv6 CIDR",
|
||||
cc: makeClusterCIDR(8, "", "", nil),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid ClusterCIDR, invalid nodeSelector",
|
||||
cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("NoUppercaseOrSpecialCharsLike=Equals", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
// IPv4 tests.
|
||||
{
|
||||
name: "invalid SingleStack IPv4 ClusterCIDR, invalid spec.IPv4",
|
||||
cc: makeClusterCIDR(8, "test", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid Singlestack IPv4 ClusterCIDR, perNodeHostBits > maxPerNodeHostBits",
|
||||
cc: makeClusterCIDR(100, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid SingleStack IPv4 ClusterCIDR, perNodeHostBits < minPerNodeHostBits",
|
||||
cc: makeClusterCIDR(2, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
// IPv6 tests.
|
||||
{
|
||||
name: "invalid SingleStack IPv6 ClusterCIDR, invalid spec.IPv6",
|
||||
cc: makeClusterCIDR(8, "", "testv6", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid SingleStack IPv6 ClusterCIDR, valid IPv4 CIDR in spec.IPv6",
|
||||
cc: makeClusterCIDR(8, "", "10.2.0.0/16", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid SingleStack IPv6 ClusterCIDR, invalid perNodeHostBits > maxPerNodeHostBits",
|
||||
cc: makeClusterCIDR(12, "", "fd00::/120", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid SingleStack IPv6 ClusterCIDR, invalid perNodeHostBits < minPerNodeHostBits",
|
||||
cc: makeClusterCIDR(3, "", "fd00::/120", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
// DualStack tests
|
||||
{
|
||||
name: "invalid DualStack ClusterCIDR, valid spec.IPv4, invalid spec.IPv6",
|
||||
cc: makeClusterCIDR(8, "10.1.0.0/16", "testv6", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid DualStack ClusterCIDR, valid spec.IPv6, invalid spec.IPv4",
|
||||
cc: makeClusterCIDR(8, "testv4", "fd00::/120", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid DualStack ClusterCIDR, invalid perNodeHostBits > maxPerNodeHostBits",
|
||||
cc: makeClusterCIDR(24, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "invalid DualStack ClusterCIDR, valid IPv6 CIDR in spec.IPv4",
|
||||
cc: makeClusterCIDR(8, "fd00::/120", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
err := ValidateClusterCIDR(testCase.cc)
|
||||
if !testCase.expectErr && err != nil {
|
||||
t.Errorf("ValidateClusterCIDR(%+v) must be successful for test '%s', got %v", testCase.cc, testCase.name, err)
|
||||
}
|
||||
if testCase.expectErr && err == nil {
|
||||
t.Errorf("ValidateClusterCIDR(%+v) must return an error for test: %s, but got nil", testCase.cc, testCase.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateClusterConfigUpdate(t *testing.T) {
|
||||
oldCCC := makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}))
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
cc *networking.ClusterCIDR
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "Successful update, no changes to ClusterCIDR.Spec",
|
||||
cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: false,
|
||||
},
|
||||
{
|
||||
name: "Failed update, update spec.PerNodeHostBits",
|
||||
cc: makeClusterCIDR(12, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "Failed update, update spec.IPv4",
|
||||
cc: makeClusterCIDR(8, "10.2.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "Failed update, update spec.IPv6",
|
||||
cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:2:/112", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})),
|
||||
expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "Failed update, update spec.NodeSelector",
|
||||
cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar2"})),
|
||||
expectErr: true,
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
err := ValidateClusterCIDRUpdate(testCase.cc, oldCCC)
|
||||
if !testCase.expectErr && err != nil {
|
||||
t.Errorf("ValidateClusterCIDRUpdate(%+v) must be successful for test '%s', got %v", testCase.cc, testCase.name, err)
|
||||
}
|
||||
if testCase.expectErr && err == nil {
|
||||
t.Errorf("ValidateClusterCIDRUpdate(%+v) must return error for test: %s, but got nil", testCase.cc, testCase.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
81
pkg/apis/networking/zz_generated.deepcopy.go
generated
81
pkg/apis/networking/zz_generated.deepcopy.go
generated
@@ -28,6 +28,87 @@ import (
|
||||
core "k8s.io/kubernetes/pkg/apis/core"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterCIDR) DeepCopyInto(out *ClusterCIDR) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDR.
|
||||
func (in *ClusterCIDR) DeepCopy() *ClusterCIDR {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterCIDR)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ClusterCIDR) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterCIDRList) DeepCopyInto(out *ClusterCIDRList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ClusterCIDR, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRList.
|
||||
func (in *ClusterCIDRList) DeepCopy() *ClusterCIDRList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterCIDRList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ClusterCIDRList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ClusterCIDRSpec) DeepCopyInto(out *ClusterCIDRSpec) {
|
||||
*out = *in
|
||||
if in.NodeSelector != nil {
|
||||
in, out := &in.NodeSelector, &out.NodeSelector
|
||||
*out = new(core.NodeSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRSpec.
|
||||
func (in *ClusterCIDRSpec) DeepCopy() *ClusterCIDRSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ClusterCIDRSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) {
|
||||
*out = *in
|
||||
|
||||
@@ -22,16 +22,18 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
informers "k8s.io/client-go/informers/core/v1"
|
||||
networkinginformers "k8s.io/client-go/informers/networking/v1alpha1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
// CIDRAllocatorType is the type of the allocator to use.
|
||||
@@ -41,6 +43,9 @@ const (
|
||||
// RangeAllocatorType is the allocator that uses an internal CIDR
|
||||
// range allocator to do node CIDR range allocations.
|
||||
RangeAllocatorType CIDRAllocatorType = "RangeAllocator"
|
||||
// MultiCIDRRangeAllocatorType is the allocator that uses an internal CIDR
|
||||
// range allocator to do node CIDR range allocations.
|
||||
MultiCIDRRangeAllocatorType CIDRAllocatorType = "MultiCIDRRangeAllocator"
|
||||
// CloudAllocatorType is the allocator that uses cloud platform
|
||||
// support to do node CIDR range allocations.
|
||||
CloudAllocatorType CIDRAllocatorType = "CloudAllocator"
|
||||
@@ -87,7 +92,7 @@ type CIDRAllocator interface {
|
||||
// CIDR if it doesn't currently have one or mark the CIDR as used if
|
||||
// the node already have one.
|
||||
AllocateOrOccupyCIDR(node *v1.Node) error
|
||||
// ReleaseCIDR releases the CIDR of the removed node
|
||||
// ReleaseCIDR releases the CIDR of the removed node.
|
||||
ReleaseCIDR(node *v1.Node) error
|
||||
// Run starts all the working logic of the allocator.
|
||||
Run(stopCh <-chan struct{})
|
||||
@@ -96,18 +101,25 @@ type CIDRAllocator interface {
|
||||
// CIDRAllocatorParams is parameters that's required for creating new
|
||||
// cidr range allocator.
|
||||
type CIDRAllocatorParams struct {
|
||||
// ClusterCIDRs is list of cluster cidrs
|
||||
// ClusterCIDRs is list of cluster cidrs.
|
||||
ClusterCIDRs []*net.IPNet
|
||||
// ServiceCIDR is primary service cidr for cluster
|
||||
// ServiceCIDR is primary service cidr for cluster.
|
||||
ServiceCIDR *net.IPNet
|
||||
// SecondaryServiceCIDR is secondary service cidr for cluster
|
||||
// SecondaryServiceCIDR is secondary service cidr for cluster.
|
||||
SecondaryServiceCIDR *net.IPNet
|
||||
// NodeCIDRMaskSizes is list of node cidr mask sizes
|
||||
// NodeCIDRMaskSizes is list of node cidr mask sizes.
|
||||
NodeCIDRMaskSizes []int
|
||||
}
|
||||
|
||||
// CIDRs are reserved, then node resource is patched with them.
|
||||
// nodeReservedCIDRs holds the reservation info for a node.
|
||||
type nodeReservedCIDRs struct {
|
||||
allocatedCIDRs []*net.IPNet
|
||||
nodeName string
|
||||
}
|
||||
|
||||
// New creates a new CIDR range allocator.
|
||||
func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, allocatorType CIDRAllocatorType, allocatorParams CIDRAllocatorParams) (CIDRAllocator, error) {
|
||||
func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, clusterCIDRInformer networkinginformers.ClusterCIDRInformer, allocatorType CIDRAllocatorType, allocatorParams CIDRAllocatorParams) (CIDRAllocator, error) {
|
||||
nodeList, err := listNodes(kubeClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -116,6 +128,12 @@ func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInfo
|
||||
switch allocatorType {
|
||||
case RangeAllocatorType:
|
||||
return NewCIDRRangeAllocator(kubeClient, nodeInformer, allocatorParams, nodeList)
|
||||
case MultiCIDRRangeAllocatorType:
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRRangeAllocator) {
|
||||
return nil, fmt.Errorf("invalid CIDR allocator type: %v, feature gate %v must be enabled", allocatorType, features.MultiCIDRRangeAllocator)
|
||||
}
|
||||
return NewMultiCIDRRangeAllocator(kubeClient, nodeInformer, clusterCIDRInformer, allocatorParams, nodeList, nil)
|
||||
|
||||
case CloudAllocatorType:
|
||||
return NewCloudCIDRAllocator(kubeClient, cloud, nodeInformer)
|
||||
default:
|
||||
@@ -144,3 +162,12 @@ func listNodes(kubeClient clientset.Interface) (*v1.NodeList, error) {
|
||||
}
|
||||
return nodeList, nil
|
||||
}
|
||||
|
||||
// ipnetToStringList converts a slice of net.IPNet into a list of CIDR in string format
|
||||
func ipnetToStringList(inCIDRs []*net.IPNet) []string {
|
||||
outCIDRs := make([]string, len(inCIDRs))
|
||||
for idx, inCIDR := range inCIDRs {
|
||||
outCIDRs[idx] = inCIDR.String()
|
||||
}
|
||||
return outCIDRs
|
||||
}
|
||||
|
||||
140
pkg/controller/nodeipam/ipam/multi_cidr_priority_queue.go
Normal file
140
pkg/controller/nodeipam/ipam/multi_cidr_priority_queue.go
Normal file
@@ -0,0 +1,140 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipam
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
cidrset "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/multicidrset"
|
||||
)
|
||||
|
||||
// A PriorityQueue implementation based on https://pkg.go.dev/container/heap#example-package-PriorityQueue
|
||||
|
||||
// An PriorityQueueItem is something we manage in a priority queue.
|
||||
type PriorityQueueItem struct {
|
||||
clusterCIDR *cidrset.ClusterCIDR
|
||||
// labelMatchCount is the first determinant of priority.
|
||||
labelMatchCount int
|
||||
// selectorString is a string representation of the labelSelector associated with the cidrSet.
|
||||
selectorString string
|
||||
// index is needed by update and is maintained by the heap.Interface methods.
|
||||
index int // The index of the item in the heap.
|
||||
}
|
||||
|
||||
// A PriorityQueue implements heap.Interface and holds PriorityQueueItems.
|
||||
type PriorityQueue []*PriorityQueueItem
|
||||
|
||||
func (pq PriorityQueue) Len() int { return len(pq) }
|
||||
|
||||
// Less compares the priority queue items, to store in a min heap.
|
||||
// Less(i,j) == true denotes i has higher priority than j.
|
||||
func (pq PriorityQueue) Less(i, j int) bool {
|
||||
if pq[i].labelMatchCount != pq[j].labelMatchCount {
|
||||
// P0: CidrSet with higher number of matching labels has the highest priority.
|
||||
return pq[i].labelMatchCount > pq[j].labelMatchCount
|
||||
}
|
||||
|
||||
// If the count of matching labels is equal, compare the max allocatable pod CIDRs.
|
||||
if pq[i].maxAllocatable() != pq[j].maxAllocatable() {
|
||||
// P1: CidrSet with fewer allocatable pod CIDRs has higher priority.
|
||||
return pq[i].maxAllocatable() < pq[j].maxAllocatable()
|
||||
}
|
||||
|
||||
// If the value of allocatable pod CIDRs is equal, compare the node mask size.
|
||||
if pq[i].nodeMaskSize() != pq[j].nodeMaskSize() {
|
||||
// P2: CidrSet with a PerNodeMaskSize having fewer IPs has higher priority.
|
||||
// For example, `27` (32 IPs) picked before `25` (128 IPs).
|
||||
return pq[i].nodeMaskSize() > pq[j].nodeMaskSize()
|
||||
}
|
||||
|
||||
// If the per node mask size are equal compare the CIDR labels.
|
||||
if pq[i].selectorString != pq[j].selectorString {
|
||||
// P3: CidrSet having label with lower alphanumeric value has higher priority.
|
||||
return pq[i].selectorString < pq[j].selectorString
|
||||
}
|
||||
|
||||
// P4: CidrSet having an alpha-numerically smaller IP address value has a higher priority.
|
||||
return pq[i].cidrLabel() < pq[j].cidrLabel()
|
||||
}
|
||||
|
||||
func (pq PriorityQueue) Swap(i, j int) {
|
||||
pq[i], pq[j] = pq[j], pq[i]
|
||||
pq[i].index = i
|
||||
pq[j].index = j
|
||||
}
|
||||
|
||||
func (pq *PriorityQueue) Push(x interface{}) {
|
||||
n := len(*pq)
|
||||
if item, ok := x.(*PriorityQueueItem); ok {
|
||||
item.index = n
|
||||
*pq = append(*pq, item)
|
||||
}
|
||||
}
|
||||
|
||||
func (pq *PriorityQueue) Pop() interface{} {
|
||||
old := *pq
|
||||
n := len(old)
|
||||
item := old[n-1]
|
||||
old[n-1] = nil // avoid memory leak.
|
||||
item.index = -1 // for safety.
|
||||
*pq = old[0 : n-1]
|
||||
return item
|
||||
}
|
||||
|
||||
// maxAllocatable computes the minimum value of the MaxCIDRs for a ClusterCIDR.
|
||||
// It compares the MaxCIDRs for each CIDR family and returns the minimum.
|
||||
// e.g. IPv4 - 10.0.0.0/16 PerNodeMaskSize: 24 MaxCIDRs = 256
|
||||
// IPv6 - ff:ff::/120 PerNodeMaskSize: 120 MaxCIDRs = 1
|
||||
// MaxAllocatable for this ClusterCIDR = 1
|
||||
func (pqi *PriorityQueueItem) maxAllocatable() int {
|
||||
ipv4Allocatable := math.MaxInt
|
||||
ipv6Allocatable := math.MaxInt
|
||||
|
||||
if pqi.clusterCIDR.IPv4CIDRSet != nil {
|
||||
ipv4Allocatable = pqi.clusterCIDR.IPv4CIDRSet.MaxCIDRs
|
||||
}
|
||||
|
||||
if pqi.clusterCIDR.IPv6CIDRSet != nil {
|
||||
ipv6Allocatable = pqi.clusterCIDR.IPv6CIDRSet.MaxCIDRs
|
||||
}
|
||||
|
||||
if ipv4Allocatable < ipv6Allocatable {
|
||||
return ipv4Allocatable
|
||||
}
|
||||
|
||||
return ipv6Allocatable
|
||||
}
|
||||
|
||||
// nodeMaskSize returns IPv4 NodeMaskSize if present, else returns IPv6 NodeMaskSize.
|
||||
// Note the requirement: 32 - IPv4 NodeMaskSize == 128 - IPv6 NodeMaskSize
|
||||
// Due to the above requirement it does not matter which NodeMaskSize we compare.
|
||||
func (pqi *PriorityQueueItem) nodeMaskSize() int {
|
||||
if pqi.clusterCIDR.IPv4CIDRSet != nil {
|
||||
return pqi.clusterCIDR.IPv4CIDRSet.NodeMaskSize
|
||||
}
|
||||
|
||||
return pqi.clusterCIDR.IPv6CIDRSet.NodeMaskSize
|
||||
}
|
||||
|
||||
// cidrLabel returns IPv4 CIDR if present, else returns IPv6 CIDR.
|
||||
func (pqi *PriorityQueueItem) cidrLabel() string {
|
||||
if pqi.clusterCIDR.IPv4CIDRSet != nil {
|
||||
return pqi.clusterCIDR.IPv4CIDRSet.Label
|
||||
}
|
||||
|
||||
return pqi.clusterCIDR.IPv6CIDRSet.Label
|
||||
}
|
||||
170
pkg/controller/nodeipam/ipam/multi_cidr_priority_queue_test.go
Normal file
170
pkg/controller/nodeipam/ipam/multi_cidr_priority_queue_test.go
Normal file
@@ -0,0 +1,170 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipam
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"testing"
|
||||
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/multicidrset"
|
||||
utilnet "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
func createTestPriorityQueueItem(name, cidr, selectorString string, labelMatchCount, perNodeHostBits int) *PriorityQueueItem {
|
||||
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(cidr)
|
||||
cidrSet, _ := multicidrset.NewMultiCIDRSet(clusterCIDR, perNodeHostBits)
|
||||
|
||||
return &PriorityQueueItem{
|
||||
clusterCIDR: &multicidrset.ClusterCIDR{
|
||||
Name: name,
|
||||
IPv4CIDRSet: cidrSet,
|
||||
},
|
||||
labelMatchCount: labelMatchCount,
|
||||
selectorString: selectorString,
|
||||
}
|
||||
}
|
||||
|
||||
func TestPriorityQueue(t *testing.T) {
|
||||
|
||||
pqi1 := createTestPriorityQueueItem("cidr1", "192.168.0.0/16", "foo=bar,name=test1", 1, 8)
|
||||
pqi2 := createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8)
|
||||
pqi3 := createTestPriorityQueueItem("cidr3", "172.16.0.0/16", "foo=bar,name=test3", 2, 8)
|
||||
pqi4 := createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6)
|
||||
pqi5 := createTestPriorityQueueItem("cidr5", "10.1.2.0/26", "foo=bar,name=test5", 2, 6)
|
||||
pqi6 := createTestPriorityQueueItem("cidr6", "10.1.3.0/26", "abc=bar,name=test4", 2, 6)
|
||||
|
||||
for _, testQueue := range []struct {
|
||||
name string
|
||||
items []*PriorityQueueItem
|
||||
want *PriorityQueueItem
|
||||
}{
|
||||
{"Test queue with single item", []*PriorityQueueItem{pqi1}, pqi1},
|
||||
{"Test queue with items having different labelMatchCount", []*PriorityQueueItem{pqi1, pqi2}, pqi2},
|
||||
{"Test queue with items having same labelMatchCount, different max Allocatable Pod CIDRs", []*PriorityQueueItem{pqi1, pqi2, pqi3}, pqi2},
|
||||
{"Test queue with items having same labelMatchCount, max Allocatable Pod CIDRs, different PerNodeMaskSize", []*PriorityQueueItem{pqi1, pqi2, pqi4}, pqi4},
|
||||
{"Test queue with items having same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, different labels", []*PriorityQueueItem{pqi1, pqi2, pqi4, pqi5}, pqi4},
|
||||
{"Test queue with items having same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, labels, different IP addresses", []*PriorityQueueItem{pqi1, pqi2, pqi4, pqi5, pqi6}, pqi4},
|
||||
} {
|
||||
pq := make(PriorityQueue, 0)
|
||||
for _, pqi := range testQueue.items {
|
||||
heap.Push(&pq, pqi)
|
||||
}
|
||||
|
||||
got := heap.Pop(&pq)
|
||||
|
||||
if got != testQueue.want {
|
||||
t.Errorf("Error, wanted: %+v, got: %+v", testQueue.want, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLess(t *testing.T) {
|
||||
|
||||
for _, testQueue := range []struct {
|
||||
name string
|
||||
items []*PriorityQueueItem
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "different labelMatchCount, i higher priority than j",
|
||||
items: []*PriorityQueueItem{
|
||||
createTestPriorityQueueItem("cidr1", "192.168.0.0/16", "foo=bar,name=test1", 2, 8),
|
||||
createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 1, 8),
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "different labelMatchCount, i lower priority than j",
|
||||
items: []*PriorityQueueItem{
|
||||
createTestPriorityQueueItem("cidr1", "192.168.0.0/16", "foo=bar,name=test1", 1, 8),
|
||||
createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8),
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "same labelMatchCount, different max allocatable cidrs, i higher priority than j",
|
||||
items: []*PriorityQueueItem{
|
||||
createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8),
|
||||
createTestPriorityQueueItem("cidr3", "172.16.0.0/16", "foo=bar,name=test3", 2, 8),
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "same labelMatchCount, different max allocatable cidrs, i lower priority than j",
|
||||
items: []*PriorityQueueItem{
|
||||
createTestPriorityQueueItem("cidr2", "10.1.0.0/16", "foo=bar,name=test2", 2, 8),
|
||||
createTestPriorityQueueItem("cidr3", "172.16.0.0/24", "foo=bar,name=test3", 2, 8),
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "same labelMatchCount, max allocatable cidrs, different PerNodeMaskSize i higher priority than j",
|
||||
items: []*PriorityQueueItem{
|
||||
createTestPriorityQueueItem("cidr2", "10.1.0.0/26", "foo=bar,name=test2", 2, 6),
|
||||
createTestPriorityQueueItem("cidr4", "10.1.1.0/24", "abc=bar,name=test4", 2, 8),
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "same labelMatchCount, max allocatable cidrs, different PerNodeMaskSize i lower priority than j",
|
||||
items: []*PriorityQueueItem{
|
||||
createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8),
|
||||
createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6),
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, different labels i higher priority than j",
|
||||
items: []*PriorityQueueItem{
|
||||
createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6),
|
||||
createTestPriorityQueueItem("cidr5", "10.1.2.0/26", "foo=bar,name=test5", 2, 6),
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, different labels i lower priority than j",
|
||||
items: []*PriorityQueueItem{
|
||||
createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "xyz=bar,name=test4", 2, 6),
|
||||
createTestPriorityQueueItem("cidr5", "10.1.2.0/26", "foo=bar,name=test5", 2, 6),
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, labels, different IP addresses i higher priority than j",
|
||||
items: []*PriorityQueueItem{
|
||||
createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6),
|
||||
createTestPriorityQueueItem("cidr6", "10.1.3.0/26", "abc=bar,name=test4", 2, 6),
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, labels, different IP addresses i lower priority than j",
|
||||
items: []*PriorityQueueItem{
|
||||
createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "xyz=bar,name=test4", 2, 6),
|
||||
createTestPriorityQueueItem("cidr6", "10.0.3.0/26", "abc=bar,name=test4", 2, 6),
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
} {
|
||||
var pq PriorityQueue
|
||||
pq = testQueue.items
|
||||
got := pq.Less(0, 1)
|
||||
if got != testQueue.want {
|
||||
t.Errorf("Error, wanted: %v, got: %v\nTest %q \npq[0]: %+v \npq[1]: %+v ", testQueue.want, got, testQueue.name, pq[0], pq[1])
|
||||
}
|
||||
}
|
||||
}
|
||||
1205
pkg/controller/nodeipam/ipam/multi_cidr_range_allocator.go
Normal file
1205
pkg/controller/nodeipam/ipam/multi_cidr_range_allocator.go
Normal file
File diff suppressed because it is too large
Load Diff
1868
pkg/controller/nodeipam/ipam/multi_cidr_range_allocator_test.go
Normal file
1868
pkg/controller/nodeipam/ipam/multi_cidr_range_allocator_test.go
Normal file
File diff suppressed because it is too large
Load Diff
78
pkg/controller/nodeipam/ipam/multicidrset/metrics.go
Normal file
78
pkg/controller/nodeipam/ipam/multicidrset/metrics.go
Normal file
@@ -0,0 +1,78 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package multicidrset
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"k8s.io/component-base/metrics"
|
||||
"k8s.io/component-base/metrics/legacyregistry"
|
||||
)
|
||||
|
||||
const nodeIpamSubsystem = "node_ipam_controller"
|
||||
|
||||
var (
|
||||
cidrSetAllocations = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: nodeIpamSubsystem,
|
||||
Name: "multicidrset_cidrs_allocations_total",
|
||||
Help: "Counter measuring total number of CIDR allocations.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"clusterCIDR"},
|
||||
)
|
||||
cidrSetReleases = metrics.NewCounterVec(
|
||||
&metrics.CounterOpts{
|
||||
Subsystem: nodeIpamSubsystem,
|
||||
Name: "multicidrset_cidrs_releases_total",
|
||||
Help: "Counter measuring total number of CIDR releases.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"clusterCIDR"},
|
||||
)
|
||||
cidrSetUsage = metrics.NewGaugeVec(
|
||||
&metrics.GaugeOpts{
|
||||
Subsystem: nodeIpamSubsystem,
|
||||
Name: "multicidrset_usage_cidrs",
|
||||
Help: "Gauge measuring percentage of allocated CIDRs.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
},
|
||||
[]string{"clusterCIDR"},
|
||||
)
|
||||
cidrSetAllocationTriesPerRequest = metrics.NewHistogramVec(
|
||||
&metrics.HistogramOpts{
|
||||
Subsystem: nodeIpamSubsystem,
|
||||
Name: "multicidrset_allocation_tries_per_request",
|
||||
Help: "Histogram measuring CIDR allocation tries per request.",
|
||||
StabilityLevel: metrics.ALPHA,
|
||||
Buckets: metrics.ExponentialBuckets(1, 5, 5),
|
||||
},
|
||||
[]string{"clusterCIDR"},
|
||||
)
|
||||
)
|
||||
|
||||
var registerMetrics sync.Once
|
||||
|
||||
// registerCidrsetMetrics the metrics that are to be monitored.
|
||||
func registerCidrsetMetrics() {
|
||||
registerMetrics.Do(func() {
|
||||
legacyregistry.MustRegister(cidrSetAllocations)
|
||||
legacyregistry.MustRegister(cidrSetReleases)
|
||||
legacyregistry.MustRegister(cidrSetUsage)
|
||||
legacyregistry.MustRegister(cidrSetAllocationTriesPerRequest)
|
||||
})
|
||||
}
|
||||
361
pkg/controller/nodeipam/ipam/multicidrset/multi_cidr_set.go
Normal file
361
pkg/controller/nodeipam/ipam/multicidrset/multi_cidr_set.go
Normal file
@@ -0,0 +1,361 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package multicidrset
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"math/bits"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
// MultiCIDRSet manages a set of CIDR ranges from which blocks of IPs can
|
||||
// be allocated from.
|
||||
type MultiCIDRSet struct {
|
||||
sync.Mutex
|
||||
// ClusterCIDR is the CIDR assigned to the cluster.
|
||||
ClusterCIDR *net.IPNet
|
||||
// NodeMaskSize is the mask size, in bits,assigned to the nodes
|
||||
// caches the mask size to avoid the penalty of calling nodeMask.Size().
|
||||
NodeMaskSize int
|
||||
// MaxCIDRs is the maximum number of CIDRs that can be allocated.
|
||||
MaxCIDRs int
|
||||
// Label stores the CIDR in a string, it is used to identify the metrics such
|
||||
// as Number of allocations, Total number of CIDR releases, Percentage of
|
||||
// allocated CIDRs, Tries required for allocating a CIDR for a particular CIDRSet.
|
||||
Label string
|
||||
// AllocatedCIDRMap stores all the allocated CIDRs from the current CIDRSet.
|
||||
// Stores a mapping of the next candidate CIDR for allocation to it's
|
||||
// allocation status. Next candidate is used only if allocation status is false.
|
||||
AllocatedCIDRMap map[string]bool
|
||||
|
||||
// clusterMaskSize is the mask size, in bits, assigned to the cluster.
|
||||
// caches the mask size to avoid the penalty of calling clusterCIDR.Mask.Size().
|
||||
clusterMaskSize int
|
||||
// nodeMask is the network mask assigned to the nodes.
|
||||
nodeMask net.IPMask
|
||||
// allocatedCIDRs counts the number of CIDRs allocated.
|
||||
allocatedCIDRs int
|
||||
// nextCandidate points to the next CIDR that should be free.
|
||||
nextCandidate int
|
||||
}
|
||||
|
||||
// ClusterCIDR is an internal representation of the ClusterCIDR API object.
|
||||
type ClusterCIDR struct {
|
||||
// Name of the associated ClusterCIDR API object.
|
||||
Name string
|
||||
// IPv4CIDRSet is the MultiCIDRSet representation of ClusterCIDR.spec.ipv4
|
||||
// of the associated ClusterCIDR API object.
|
||||
IPv4CIDRSet *MultiCIDRSet
|
||||
// IPv6CIDRSet is the MultiCIDRSet representation of ClusterCIDR.spec.ipv6
|
||||
// of the associated ClusterCIDR API object.
|
||||
IPv6CIDRSet *MultiCIDRSet
|
||||
// AssociatedNodes is used to identify which nodes have CIDRs allocated from this ClusterCIDR.
|
||||
// Stores a mapping of node name to association status.
|
||||
AssociatedNodes map[string]bool
|
||||
// Terminating is used to identify whether ClusterCIDR has been marked for termination.
|
||||
Terminating bool
|
||||
}
|
||||
|
||||
const (
|
||||
// The subnet mask size cannot be greater than 16 more than the cluster mask size
|
||||
// TODO: https://github.com/kubernetes/kubernetes/issues/44918
|
||||
// clusterSubnetMaxDiff limited to 16 due to the uncompressed bitmap.
|
||||
// Due to this limitation the subnet mask for IPv6 cluster cidr needs to be >= 48
|
||||
// as default mask size for IPv6 is 64.
|
||||
clusterSubnetMaxDiff = 16
|
||||
// halfIPv6Len is the half of the IPv6 length.
|
||||
halfIPv6Len = net.IPv6len / 2
|
||||
)
|
||||
|
||||
// CIDRRangeNoCIDRsRemainingErr is an error type used to denote there is no more
|
||||
// space to allocate CIDR ranges from the given CIDR.
|
||||
type CIDRRangeNoCIDRsRemainingErr struct {
|
||||
// CIDR represents the CIDR which is exhausted.
|
||||
CIDR string
|
||||
}
|
||||
|
||||
func (err *CIDRRangeNoCIDRsRemainingErr) Error() string {
|
||||
return fmt.Sprintf("CIDR allocation failed; there are no remaining CIDRs left to allocate in the range %s", err.CIDR)
|
||||
}
|
||||
|
||||
// CIDRSetSubNetTooBigErr is an error type to denote that subnet mask size is too
|
||||
// big compared to the CIDR mask size.
|
||||
type CIDRSetSubNetTooBigErr struct {
|
||||
cidr string
|
||||
subnetMaskSize int
|
||||
clusterMaskSize int
|
||||
}
|
||||
|
||||
func (err *CIDRSetSubNetTooBigErr) Error() string {
|
||||
return fmt.Sprintf("Creation of New CIDR Set failed for %s. "+
|
||||
"PerNodeMaskSize %d is too big for CIDR Mask %d, Maximum difference allowed "+
|
||||
"is %d", err.cidr, err.subnetMaskSize, err.clusterMaskSize, clusterSubnetMaxDiff)
|
||||
}
|
||||
|
||||
// NewMultiCIDRSet creates a new MultiCIDRSet.
|
||||
func NewMultiCIDRSet(cidrConfig *net.IPNet, perNodeHostBits int) (*MultiCIDRSet, error) {
|
||||
clusterMask := cidrConfig.Mask
|
||||
clusterMaskSize, bits := clusterMask.Size()
|
||||
|
||||
var subNetMaskSize int
|
||||
switch /*v4 or v6*/ {
|
||||
case netutils.IsIPv4(cidrConfig.IP):
|
||||
subNetMaskSize = 32 - perNodeHostBits
|
||||
case netutils.IsIPv6(cidrConfig.IP):
|
||||
subNetMaskSize = 128 - perNodeHostBits
|
||||
}
|
||||
|
||||
if netutils.IsIPv6(cidrConfig.IP) && (subNetMaskSize-clusterMaskSize > clusterSubnetMaxDiff) {
|
||||
return nil, &CIDRSetSubNetTooBigErr{
|
||||
cidr: cidrConfig.String(),
|
||||
subnetMaskSize: subNetMaskSize,
|
||||
clusterMaskSize: clusterMaskSize,
|
||||
}
|
||||
}
|
||||
|
||||
// Register MultiCIDRSet metrics.
|
||||
registerCidrsetMetrics()
|
||||
|
||||
return &MultiCIDRSet{
|
||||
ClusterCIDR: cidrConfig,
|
||||
nodeMask: net.CIDRMask(subNetMaskSize, bits),
|
||||
clusterMaskSize: clusterMaskSize,
|
||||
MaxCIDRs: 1 << uint32(subNetMaskSize-clusterMaskSize),
|
||||
NodeMaskSize: subNetMaskSize,
|
||||
Label: cidrConfig.String(),
|
||||
AllocatedCIDRMap: make(map[string]bool, 0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *MultiCIDRSet) indexToCIDRBlock(index int) (*net.IPNet, error) {
|
||||
var ip []byte
|
||||
switch /*v4 or v6*/ {
|
||||
case netutils.IsIPv4(s.ClusterCIDR.IP):
|
||||
j := uint32(index) << uint32(32-s.NodeMaskSize)
|
||||
ipInt := (binary.BigEndian.Uint32(s.ClusterCIDR.IP)) | j
|
||||
ip = make([]byte, net.IPv4len)
|
||||
binary.BigEndian.PutUint32(ip, ipInt)
|
||||
case netutils.IsIPv6(s.ClusterCIDR.IP):
|
||||
// leftClusterIP | rightClusterIP
|
||||
// 2001:0DB8:1234:0000:0000:0000:0000:0000
|
||||
const v6NBits = 128
|
||||
const halfV6NBits = v6NBits / 2
|
||||
leftClusterIP := binary.BigEndian.Uint64(s.ClusterCIDR.IP[:halfIPv6Len])
|
||||
rightClusterIP := binary.BigEndian.Uint64(s.ClusterCIDR.IP[halfIPv6Len:])
|
||||
|
||||
ip = make([]byte, net.IPv6len)
|
||||
|
||||
if s.NodeMaskSize <= halfV6NBits {
|
||||
// We only care about left side IP.
|
||||
leftClusterIP |= uint64(index) << uint(halfV6NBits-s.NodeMaskSize)
|
||||
} else {
|
||||
if s.clusterMaskSize < halfV6NBits {
|
||||
// see how many bits are needed to reach the left side.
|
||||
btl := uint(s.NodeMaskSize - halfV6NBits)
|
||||
indexMaxBit := uint(64 - bits.LeadingZeros64(uint64(index)))
|
||||
if indexMaxBit > btl {
|
||||
leftClusterIP |= uint64(index) >> btl
|
||||
}
|
||||
}
|
||||
// the right side will be calculated the same way either the
|
||||
// subNetMaskSize affects both left and right sides.
|
||||
rightClusterIP |= uint64(index) << uint(v6NBits-s.NodeMaskSize)
|
||||
}
|
||||
binary.BigEndian.PutUint64(ip[:halfIPv6Len], leftClusterIP)
|
||||
binary.BigEndian.PutUint64(ip[halfIPv6Len:], rightClusterIP)
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid IP: %s", s.ClusterCIDR.IP)
|
||||
}
|
||||
return &net.IPNet{
|
||||
IP: ip,
|
||||
Mask: s.nodeMask,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NextCandidate returns the next candidate and the last evaluated index
|
||||
// for the current cidrSet. Returns nil if the candidate is already allocated.
|
||||
func (s *MultiCIDRSet) NextCandidate() (*net.IPNet, int, error) {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
if s.allocatedCIDRs == s.MaxCIDRs {
|
||||
return nil, 0, &CIDRRangeNoCIDRsRemainingErr{
|
||||
CIDR: s.Label,
|
||||
}
|
||||
}
|
||||
|
||||
candidate := s.nextCandidate
|
||||
for i := 0; i < s.MaxCIDRs; i++ {
|
||||
nextCandidateCIDR, err := s.indexToCIDRBlock(candidate)
|
||||
if err != nil {
|
||||
return nil, i, err
|
||||
}
|
||||
// Check if the nextCandidate is not already allocated.
|
||||
if _, ok := s.AllocatedCIDRMap[nextCandidateCIDR.String()]; !ok {
|
||||
s.nextCandidate = (candidate + 1) % s.MaxCIDRs
|
||||
return nextCandidateCIDR, i, nil
|
||||
}
|
||||
candidate = (candidate + 1) % s.MaxCIDRs
|
||||
}
|
||||
|
||||
return nil, s.MaxCIDRs, &CIDRRangeNoCIDRsRemainingErr{
|
||||
CIDR: s.Label,
|
||||
}
|
||||
}
|
||||
|
||||
// getBeginningAndEndIndices returns the indices for the given CIDR, returned
|
||||
// values are inclusive indices [beginning, end].
|
||||
func (s *MultiCIDRSet) getBeginningAndEndIndices(cidr *net.IPNet) (int, int, error) {
|
||||
if cidr == nil {
|
||||
return -1, -1, fmt.Errorf("error getting indices for cluster cidr %v, cidr is nil", s.ClusterCIDR)
|
||||
}
|
||||
begin, end := 0, s.MaxCIDRs-1
|
||||
cidrMask := cidr.Mask
|
||||
maskSize, _ := cidrMask.Size()
|
||||
var ipSize int
|
||||
|
||||
if !s.ClusterCIDR.Contains(cidr.IP.Mask(s.ClusterCIDR.Mask)) && !cidr.Contains(s.ClusterCIDR.IP.Mask(cidr.Mask)) {
|
||||
return -1, -1, fmt.Errorf("cidr %v is out the range of cluster cidr %v", cidr, s.ClusterCIDR)
|
||||
}
|
||||
|
||||
if s.clusterMaskSize < maskSize {
|
||||
var err error
|
||||
ipSize = net.IPv4len
|
||||
if netutils.IsIPv6(cidr.IP) {
|
||||
ipSize = net.IPv6len
|
||||
}
|
||||
begin, err = s.getIndexForCIDR(&net.IPNet{
|
||||
IP: cidr.IP.Mask(s.nodeMask),
|
||||
Mask: s.nodeMask,
|
||||
})
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
ip := make([]byte, ipSize)
|
||||
if netutils.IsIPv4(cidr.IP) {
|
||||
ipInt := binary.BigEndian.Uint32(cidr.IP) | (^binary.BigEndian.Uint32(cidr.Mask))
|
||||
binary.BigEndian.PutUint32(ip, ipInt)
|
||||
} else {
|
||||
// ipIntLeft | ipIntRight
|
||||
// 2001:0DB8:1234:0000:0000:0000:0000:0000
|
||||
ipIntLeft := binary.BigEndian.Uint64(cidr.IP[:net.IPv6len/2]) | (^binary.BigEndian.Uint64(cidr.Mask[:net.IPv6len/2]))
|
||||
ipIntRight := binary.BigEndian.Uint64(cidr.IP[net.IPv6len/2:]) | (^binary.BigEndian.Uint64(cidr.Mask[net.IPv6len/2:]))
|
||||
binary.BigEndian.PutUint64(ip[:net.IPv6len/2], ipIntLeft)
|
||||
binary.BigEndian.PutUint64(ip[net.IPv6len/2:], ipIntRight)
|
||||
}
|
||||
end, err = s.getIndexForCIDR(&net.IPNet{
|
||||
IP: net.IP(ip).Mask(s.nodeMask),
|
||||
Mask: s.nodeMask,
|
||||
})
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
}
|
||||
return begin, end, nil
|
||||
}
|
||||
|
||||
// Release releases the given CIDR range.
|
||||
func (s *MultiCIDRSet) Release(cidr *net.IPNet) error {
|
||||
begin, end, err := s.getBeginningAndEndIndices(cidr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for i := begin; i <= end; i++ {
|
||||
// Remove from the allocated CIDR Map and decrement the counter only if currently
|
||||
// marked allocated. Avoids double counting.
|
||||
currCIDR, err := s.indexToCIDRBlock(i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := s.AllocatedCIDRMap[currCIDR.String()]; ok {
|
||||
delete(s.AllocatedCIDRMap, currCIDR.String())
|
||||
s.allocatedCIDRs--
|
||||
cidrSetReleases.WithLabelValues(s.Label).Inc()
|
||||
}
|
||||
}
|
||||
|
||||
cidrSetUsage.WithLabelValues(s.Label).Set(float64(s.allocatedCIDRs) / float64(s.MaxCIDRs))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Occupy marks the given CIDR range as used. Occupy succeeds even if the CIDR
|
||||
// range was previously used.
|
||||
func (s *MultiCIDRSet) Occupy(cidr *net.IPNet) (err error) {
|
||||
begin, end, err := s.getBeginningAndEndIndices(cidr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for i := begin; i <= end; i++ {
|
||||
// Add to the allocated CIDR Map and increment the counter only if not already
|
||||
// marked allocated. Prevents double counting.
|
||||
currCIDR, err := s.indexToCIDRBlock(i)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, ok := s.AllocatedCIDRMap[currCIDR.String()]; !ok {
|
||||
s.AllocatedCIDRMap[currCIDR.String()] = true
|
||||
cidrSetAllocations.WithLabelValues(s.Label).Inc()
|
||||
s.allocatedCIDRs++
|
||||
}
|
||||
}
|
||||
cidrSetUsage.WithLabelValues(s.Label).Set(float64(s.allocatedCIDRs) / float64(s.MaxCIDRs))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *MultiCIDRSet) getIndexForCIDR(cidr *net.IPNet) (int, error) {
|
||||
return s.getIndexForIP(cidr.IP)
|
||||
}
|
||||
|
||||
func (s *MultiCIDRSet) getIndexForIP(ip net.IP) (int, error) {
|
||||
if ip.To4() != nil {
|
||||
cidrIndex := (binary.BigEndian.Uint32(s.ClusterCIDR.IP) ^ binary.BigEndian.Uint32(ip.To4())) >> uint32(32-s.NodeMaskSize)
|
||||
if cidrIndex >= uint32(s.MaxCIDRs) {
|
||||
return 0, fmt.Errorf("CIDR: %v/%v is out of the range of CIDR allocator", ip, s.NodeMaskSize)
|
||||
}
|
||||
return int(cidrIndex), nil
|
||||
}
|
||||
if netutils.IsIPv6(ip) {
|
||||
bigIP := big.NewInt(0).SetBytes(s.ClusterCIDR.IP)
|
||||
bigIP = bigIP.Xor(bigIP, big.NewInt(0).SetBytes(ip))
|
||||
cidrIndexBig := bigIP.Rsh(bigIP, uint(net.IPv6len*8-s.NodeMaskSize))
|
||||
cidrIndex := cidrIndexBig.Uint64()
|
||||
if cidrIndex >= uint64(s.MaxCIDRs) {
|
||||
return 0, fmt.Errorf("CIDR: %v/%v is out of the range of CIDR allocator", ip, s.NodeMaskSize)
|
||||
}
|
||||
return int(cidrIndex), nil
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("invalid IP: %v", ip)
|
||||
}
|
||||
|
||||
// UpdateEvaluatedCount increments the evaluated count.
|
||||
func (s *MultiCIDRSet) UpdateEvaluatedCount(evaluated int) {
|
||||
cidrSetAllocationTriesPerRequest.WithLabelValues(s.Label).Observe(float64(evaluated))
|
||||
}
|
||||
874
pkg/controller/nodeipam/ipam/multicidrset/multi_cidr_set_test.go
Normal file
874
pkg/controller/nodeipam/ipam/multicidrset/multi_cidr_set_test.go
Normal file
@@ -0,0 +1,874 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package multicidrset
|
||||
|
||||
import (
|
||||
"net"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"k8s.io/component-base/metrics/testutil"
|
||||
"k8s.io/klog/v2"
|
||||
utilnet "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
func allocateNext(s *MultiCIDRSet) (*net.IPNet, error) {
|
||||
candidate, _, err := s.NextCandidate()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = s.Occupy(candidate)
|
||||
|
||||
return candidate, err
|
||||
}
|
||||
|
||||
func TestCIDRSetFullyAllocated(t *testing.T) {
|
||||
cases := []struct {
|
||||
clusterCIDRStr string
|
||||
perNodeHostBits int
|
||||
expectedCIDR string
|
||||
description string
|
||||
}{
|
||||
{
|
||||
clusterCIDRStr: "127.123.234.0/28",
|
||||
perNodeHostBits: 4,
|
||||
expectedCIDR: "127.123.234.0/28",
|
||||
description: "Fully allocated CIDR with IPv4",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "beef:1234::/112",
|
||||
perNodeHostBits: 16,
|
||||
expectedCIDR: "beef:1234::/112",
|
||||
description: "Fully allocated CIDR with IPv6",
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(tc.clusterCIDRStr)
|
||||
a, err := NewMultiCIDRSet(clusterCIDR, tc.perNodeHostBits)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v for %v", err, tc.description)
|
||||
}
|
||||
p, err := allocateNext(a)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v for %v", err, tc.description)
|
||||
}
|
||||
if p.String() != tc.expectedCIDR {
|
||||
t.Fatalf("unexpected allocated cidr: %v, expecting %v for %v",
|
||||
p.String(), tc.expectedCIDR, tc.description)
|
||||
}
|
||||
|
||||
_, err = allocateNext(a)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error because of fully-allocated range for %v", tc.description)
|
||||
}
|
||||
|
||||
a.Release(p)
|
||||
|
||||
p, err = allocateNext(a)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v for %v", err, tc.description)
|
||||
}
|
||||
if p.String() != tc.expectedCIDR {
|
||||
t.Fatalf("unexpected allocated cidr: %v, expecting %v for %v",
|
||||
p.String(), tc.expectedCIDR, tc.description)
|
||||
}
|
||||
_, err = allocateNext(a)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error because of fully-allocated range for %v", tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIndexToCIDRBlock(t *testing.T) {
|
||||
cases := []struct {
|
||||
clusterCIDRStr string
|
||||
perNodeHostBits int
|
||||
index int
|
||||
CIDRBlock string
|
||||
description string
|
||||
}{
|
||||
{
|
||||
clusterCIDRStr: "127.123.3.0/16",
|
||||
perNodeHostBits: 8,
|
||||
index: 0,
|
||||
CIDRBlock: "127.123.0.0/24",
|
||||
description: "1st IP address indexed with IPv4",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "127.123.0.0/16",
|
||||
perNodeHostBits: 8,
|
||||
index: 15,
|
||||
CIDRBlock: "127.123.15.0/24",
|
||||
description: "16th IP address indexed with IPv4",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "192.168.5.219/28",
|
||||
perNodeHostBits: 0,
|
||||
index: 5,
|
||||
CIDRBlock: "192.168.5.213/32",
|
||||
description: "5th IP address indexed with IPv4",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "2001:0db8:1234:3::/48",
|
||||
perNodeHostBits: 64,
|
||||
index: 0,
|
||||
CIDRBlock: "2001:db8:1234::/64",
|
||||
description: "1st IP address indexed with IPv6 /64",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "2001:0db8:1234::/48",
|
||||
perNodeHostBits: 64,
|
||||
index: 15,
|
||||
CIDRBlock: "2001:db8:1234:f::/64",
|
||||
description: "16th IP address indexed with IPv6 /64",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "2001:0db8:85a3::8a2e:0370:7334/50",
|
||||
perNodeHostBits: 65,
|
||||
index: 6425,
|
||||
CIDRBlock: "2001:db8:85a3:3232::/63",
|
||||
description: "6426th IP address indexed with IPv6 /63",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "2001:0db8::/32",
|
||||
perNodeHostBits: 80,
|
||||
index: 0,
|
||||
CIDRBlock: "2001:db8::/48",
|
||||
description: "1st IP address indexed with IPv6 /48",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "2001:0db8::/32",
|
||||
perNodeHostBits: 80,
|
||||
index: 15,
|
||||
CIDRBlock: "2001:db8:f::/48",
|
||||
description: "16th IP address indexed with IPv6 /48",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "2001:0db8:85a3::8a2e:0370:7334/32",
|
||||
perNodeHostBits: 80,
|
||||
index: 6425,
|
||||
CIDRBlock: "2001:db8:1919::/48",
|
||||
description: "6426th IP address indexed with IPv6 /48",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "2001:0db8:1234:ff00::/56",
|
||||
perNodeHostBits: 56,
|
||||
index: 0,
|
||||
CIDRBlock: "2001:db8:1234:ff00::/72",
|
||||
description: "1st IP address indexed with IPv6 /72",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "2001:0db8:1234:ff00::/56",
|
||||
perNodeHostBits: 56,
|
||||
index: 15,
|
||||
CIDRBlock: "2001:db8:1234:ff00:f00::/72",
|
||||
description: "16th IP address indexed with IPv6 /72",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "2001:0db8:1234:ff00::0370:7334/56",
|
||||
perNodeHostBits: 56,
|
||||
index: 6425,
|
||||
CIDRBlock: "2001:db8:1234:ff19:1900::/72",
|
||||
description: "6426th IP address indexed with IPv6 /72",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "2001:0db8:1234:0:1234::/80",
|
||||
perNodeHostBits: 32,
|
||||
index: 0,
|
||||
CIDRBlock: "2001:db8:1234:0:1234::/96",
|
||||
description: "1st IP address indexed with IPv6 /96",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "2001:0db8:1234:0:1234::/80",
|
||||
perNodeHostBits: 32,
|
||||
index: 15,
|
||||
CIDRBlock: "2001:db8:1234:0:1234:f::/96",
|
||||
description: "16th IP address indexed with IPv6 /96",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "2001:0db8:1234:ff00::0370:7334/80",
|
||||
perNodeHostBits: 32,
|
||||
index: 6425,
|
||||
CIDRBlock: "2001:db8:1234:ff00:0:1919::/96",
|
||||
description: "6426th IP address indexed with IPv6 /96",
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(tc.clusterCIDRStr)
|
||||
a, err := NewMultiCIDRSet(clusterCIDR, tc.perNodeHostBits)
|
||||
if err != nil {
|
||||
t.Fatalf("error for %v ", tc.description)
|
||||
}
|
||||
cidr, err := a.indexToCIDRBlock(tc.index)
|
||||
if err != nil {
|
||||
t.Fatalf("error for %v ", tc.description)
|
||||
}
|
||||
if cidr.String() != tc.CIDRBlock {
|
||||
t.Fatalf("error for %v index %d %s", tc.description, tc.index, cidr.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCIDRSet_RandomishAllocation(t *testing.T) {
|
||||
cases := []struct {
|
||||
clusterCIDRStr string
|
||||
description string
|
||||
}{
|
||||
{
|
||||
clusterCIDRStr: "127.123.234.0/16",
|
||||
description: "RandomishAllocation with IPv4",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "beef:1234::/112",
|
||||
description: "RandomishAllocation with IPv6",
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(tc.clusterCIDRStr)
|
||||
a, err := NewMultiCIDRSet(clusterCIDR, 8)
|
||||
if err != nil {
|
||||
t.Fatalf("Error allocating CIDRSet for %v", tc.description)
|
||||
}
|
||||
// allocate all the CIDRs.
|
||||
var cidrs []*net.IPNet
|
||||
|
||||
for i := 0; i < 256; i++ {
|
||||
if c, err := allocateNext(a); err == nil {
|
||||
cidrs = append(cidrs, c)
|
||||
} else {
|
||||
t.Fatalf("unexpected error: %v for %v", err, tc.description)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = allocateNext(a)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error because of fully-allocated range for %v", tc.description)
|
||||
}
|
||||
// release all the CIDRs.
|
||||
for i := 0; i < len(cidrs); i++ {
|
||||
a.Release(cidrs[i])
|
||||
}
|
||||
|
||||
// allocate the CIDRs again.
|
||||
var rcidrs []*net.IPNet
|
||||
for i := 0; i < 256; i++ {
|
||||
if c, err := allocateNext(a); err == nil {
|
||||
rcidrs = append(rcidrs, c)
|
||||
} else {
|
||||
t.Fatalf("unexpected error: %d, %v for %v", i, err, tc.description)
|
||||
}
|
||||
}
|
||||
_, err = allocateNext(a)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error because of fully-allocated range for %v", tc.description)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(cidrs, rcidrs) {
|
||||
t.Fatalf("expected re-allocated cidrs are the same collection for %v", tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCIDRSet_AllocationOccupied(t *testing.T) {
|
||||
cases := []struct {
|
||||
clusterCIDRStr string
|
||||
description string
|
||||
}{
|
||||
{
|
||||
clusterCIDRStr: "127.123.234.0/16",
|
||||
description: "AllocationOccupied with IPv4",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "beef:1234::/112",
|
||||
description: "AllocationOccupied with IPv6",
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(tc.clusterCIDRStr)
|
||||
a, err := NewMultiCIDRSet(clusterCIDR, 8)
|
||||
if err != nil {
|
||||
t.Fatalf("Error allocating CIDRSet for %v", tc.description)
|
||||
}
|
||||
// allocate all the CIDRs.
|
||||
var cidrs []*net.IPNet
|
||||
var numCIDRs = 256
|
||||
|
||||
for i := 0; i < numCIDRs; i++ {
|
||||
if c, err := allocateNext(a); err == nil {
|
||||
cidrs = append(cidrs, c)
|
||||
} else {
|
||||
t.Fatalf("unexpected error: %v for %v", err, tc.description)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = allocateNext(a)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error because of fully-allocated range for %v", tc.description)
|
||||
}
|
||||
// release all the CIDRs.
|
||||
for i := 0; i < len(cidrs); i++ {
|
||||
a.Release(cidrs[i])
|
||||
}
|
||||
// occupy the last 128 CIDRs.
|
||||
for i := numCIDRs / 2; i < numCIDRs; i++ {
|
||||
a.Occupy(cidrs[i])
|
||||
}
|
||||
// occupy the first of the last 128 again.
|
||||
a.Occupy(cidrs[numCIDRs/2])
|
||||
|
||||
// allocate the first 128 CIDRs again.
|
||||
var rcidrs []*net.IPNet
|
||||
for i := 0; i < numCIDRs/2; i++ {
|
||||
if c, err := allocateNext(a); err == nil {
|
||||
rcidrs = append(rcidrs, c)
|
||||
} else {
|
||||
t.Fatalf("unexpected error: %d, %v for %v", i, err, tc.description)
|
||||
}
|
||||
}
|
||||
_, err = allocateNext(a)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error because of fully-allocated range for %v", tc.description)
|
||||
}
|
||||
|
||||
// check Occupy() works properly.
|
||||
for i := numCIDRs / 2; i < numCIDRs; i++ {
|
||||
rcidrs = append(rcidrs, cidrs[i])
|
||||
}
|
||||
if !reflect.DeepEqual(cidrs, rcidrs) {
|
||||
t.Fatalf("expected re-allocated cidrs are the same collection for %v", tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDoubleOccupyRelease(t *testing.T) {
|
||||
// Run a sequence of operations and check the number of occupied CIDRs
|
||||
// after each one.
|
||||
clusterCIDRStr := "10.42.0.0/16"
|
||||
operations := []struct {
|
||||
cidrStr string
|
||||
operation string
|
||||
numOccupied int
|
||||
}{
|
||||
// Occupy 1 element: +1
|
||||
{
|
||||
cidrStr: "10.42.5.0/24",
|
||||
operation: "occupy",
|
||||
numOccupied: 1,
|
||||
},
|
||||
// Occupy 1 more element: +1
|
||||
{
|
||||
cidrStr: "10.42.9.0/24",
|
||||
operation: "occupy",
|
||||
numOccupied: 2,
|
||||
},
|
||||
// Occupy 4 elements overlapping with one from the above: +3
|
||||
{
|
||||
cidrStr: "10.42.8.0/22",
|
||||
operation: "occupy",
|
||||
numOccupied: 5,
|
||||
},
|
||||
// Occupy an already-occupied element: no change
|
||||
{
|
||||
cidrStr: "10.42.9.0/24",
|
||||
operation: "occupy",
|
||||
numOccupied: 5,
|
||||
},
|
||||
// Release an coccupied element: -1
|
||||
{
|
||||
cidrStr: "10.42.9.0/24",
|
||||
operation: "release",
|
||||
numOccupied: 4,
|
||||
},
|
||||
// Release an unoccupied element: no change
|
||||
{
|
||||
cidrStr: "10.42.9.0/24",
|
||||
operation: "release",
|
||||
numOccupied: 4,
|
||||
},
|
||||
// Release 4 elements, only one of which is occupied: -1
|
||||
{
|
||||
cidrStr: "10.42.4.0/22",
|
||||
operation: "release",
|
||||
numOccupied: 3,
|
||||
},
|
||||
}
|
||||
// Check that there are exactly that many allocatable CIDRs after all
|
||||
// operations have been executed.
|
||||
numAllocatable24s := (1 << 8) - 3
|
||||
|
||||
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(clusterCIDRStr)
|
||||
a, err := NewMultiCIDRSet(clusterCIDR, 8)
|
||||
if err != nil {
|
||||
t.Fatalf("Error allocating CIDRSet")
|
||||
}
|
||||
|
||||
// Execute the operations.
|
||||
for _, op := range operations {
|
||||
_, cidr, _ := utilnet.ParseCIDRSloppy(op.cidrStr)
|
||||
switch op.operation {
|
||||
case "occupy":
|
||||
a.Occupy(cidr)
|
||||
case "release":
|
||||
a.Release(cidr)
|
||||
default:
|
||||
t.Fatalf("test error: unknown operation %v", op.operation)
|
||||
}
|
||||
if a.allocatedCIDRs != op.numOccupied {
|
||||
t.Fatalf("CIDR %v Expected %d occupied CIDRS, got %d", cidr, op.numOccupied, a.allocatedCIDRs)
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure that we can allocate exactly `numAllocatable24s` elements.
|
||||
for i := 0; i < numAllocatable24s; i++ {
|
||||
_, err := allocateNext(a)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected to be able to allocate %d CIDRS, failed after %d", numAllocatable24s, i)
|
||||
}
|
||||
}
|
||||
|
||||
_, err = allocateNext(a)
|
||||
if err == nil {
|
||||
t.Fatalf("Expected to be able to allocate exactly %d CIDRS, got one more", numAllocatable24s)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetBitforCIDR(t *testing.T) {
|
||||
cases := []struct {
|
||||
clusterCIDRStr string
|
||||
perNodeHostBits int
|
||||
subNetCIDRStr string
|
||||
expectedBit int
|
||||
expectErr bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
clusterCIDRStr: "127.0.0.0/8",
|
||||
perNodeHostBits: 16,
|
||||
subNetCIDRStr: "127.0.0.0/16",
|
||||
expectedBit: 0,
|
||||
expectErr: false,
|
||||
description: "Get 0 Bit with IPv4",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "be00::/8",
|
||||
perNodeHostBits: 112,
|
||||
subNetCIDRStr: "be00::/16",
|
||||
expectedBit: 0,
|
||||
expectErr: false,
|
||||
description: "Get 0 Bit with IPv6",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "127.0.0.0/8",
|
||||
perNodeHostBits: 16,
|
||||
subNetCIDRStr: "127.123.0.0/16",
|
||||
expectedBit: 123,
|
||||
expectErr: false,
|
||||
description: "Get 123rd Bit with IPv4",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "be00::/8",
|
||||
perNodeHostBits: 112,
|
||||
subNetCIDRStr: "beef::/16",
|
||||
expectedBit: 0xef,
|
||||
expectErr: false,
|
||||
description: "Get xef Bit with IPv6",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "127.0.0.0/8",
|
||||
perNodeHostBits: 16,
|
||||
subNetCIDRStr: "127.168.0.0/16",
|
||||
expectedBit: 168,
|
||||
expectErr: false,
|
||||
description: "Get 168th Bit with IPv4",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "be00::/8",
|
||||
perNodeHostBits: 112,
|
||||
subNetCIDRStr: "be68::/16",
|
||||
expectedBit: 0x68,
|
||||
expectErr: false,
|
||||
description: "Get x68th Bit with IPv6",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "127.0.0.0/8",
|
||||
perNodeHostBits: 16,
|
||||
subNetCIDRStr: "127.224.0.0/16",
|
||||
expectedBit: 224,
|
||||
expectErr: false,
|
||||
description: "Get 224th Bit with IPv4",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "be00::/8",
|
||||
perNodeHostBits: 112,
|
||||
subNetCIDRStr: "be24::/16",
|
||||
expectedBit: 0x24,
|
||||
expectErr: false,
|
||||
description: "Get x24th Bit with IPv6",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "192.168.0.0/16",
|
||||
perNodeHostBits: 8,
|
||||
subNetCIDRStr: "192.168.12.0/24",
|
||||
expectedBit: 12,
|
||||
expectErr: false,
|
||||
description: "Get 12th Bit with IPv4",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "beef::/16",
|
||||
perNodeHostBits: 104,
|
||||
subNetCIDRStr: "beef:1200::/24",
|
||||
expectedBit: 0x12,
|
||||
expectErr: false,
|
||||
description: "Get x12th Bit with IPv6",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "192.168.0.0/16",
|
||||
perNodeHostBits: 8,
|
||||
subNetCIDRStr: "192.168.151.0/24",
|
||||
expectedBit: 151,
|
||||
expectErr: false,
|
||||
description: "Get 151st Bit with IPv4",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "beef::/16",
|
||||
perNodeHostBits: 104,
|
||||
subNetCIDRStr: "beef:9700::/24",
|
||||
expectedBit: 0x97,
|
||||
expectErr: false,
|
||||
description: "Get x97st Bit with IPv6",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "192.168.0.0/16",
|
||||
perNodeHostBits: 8,
|
||||
subNetCIDRStr: "127.168.224.0/24",
|
||||
expectErr: true,
|
||||
description: "Get error with IPv4",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "beef::/16",
|
||||
perNodeHostBits: 104,
|
||||
subNetCIDRStr: "2001:db00::/24",
|
||||
expectErr: true,
|
||||
description: "Get error with IPv6",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range cases {
|
||||
_, clusterCIDR, err := utilnet.ParseCIDRSloppy(tc.clusterCIDRStr)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v for %v", err, tc.description)
|
||||
}
|
||||
|
||||
cs, err := NewMultiCIDRSet(clusterCIDR, tc.perNodeHostBits)
|
||||
if err != nil {
|
||||
t.Fatalf("Error allocating CIDRSet for %v", tc.description)
|
||||
}
|
||||
_, subnetCIDR, err := utilnet.ParseCIDRSloppy(tc.subNetCIDRStr)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v for %v", err, tc.description)
|
||||
}
|
||||
|
||||
got, err := cs.getIndexForCIDR(subnetCIDR)
|
||||
if err == nil && tc.expectErr {
|
||||
klog.Errorf("expected error but got null for %v", tc.description)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil && !tc.expectErr {
|
||||
klog.Errorf("unexpected error: %v for %v", err, tc.description)
|
||||
continue
|
||||
}
|
||||
|
||||
if got != tc.expectedBit {
|
||||
klog.Errorf("expected %v, but got %v for %v", tc.expectedBit, got, tc.description)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestCIDRSetv6(t *testing.T) {
|
||||
cases := []struct {
|
||||
clusterCIDRStr string
|
||||
perNodeHostBits int
|
||||
expectedCIDR string
|
||||
expectedCIDR2 string
|
||||
expectErr bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
clusterCIDRStr: "127.0.0.0/8",
|
||||
perNodeHostBits: 0,
|
||||
expectErr: false,
|
||||
expectedCIDR: "127.0.0.0/32",
|
||||
expectedCIDR2: "127.0.0.1/32",
|
||||
description: "Max cluster subnet size with IPv4",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "beef:1234::/32",
|
||||
perNodeHostBits: 79,
|
||||
expectErr: true,
|
||||
description: "Max cluster subnet size with IPv6",
|
||||
},
|
||||
{
|
||||
clusterCIDRStr: "2001:beef:1234:369b::/60",
|
||||
perNodeHostBits: 64,
|
||||
expectedCIDR: "2001:beef:1234:3690::/64",
|
||||
expectedCIDR2: "2001:beef:1234:3691::/64",
|
||||
expectErr: false,
|
||||
description: "Allocate a few IPv6",
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(tc.clusterCIDRStr)
|
||||
a, err := NewMultiCIDRSet(clusterCIDR, tc.perNodeHostBits)
|
||||
if gotErr := err != nil; gotErr != tc.expectErr {
|
||||
t.Fatalf("NewMultiCIDRSet(%v, %v) = %v, %v; gotErr = %t, want %t", clusterCIDR, tc.perNodeHostBits, a, err, gotErr, tc.expectErr)
|
||||
}
|
||||
if a == nil {
|
||||
return
|
||||
}
|
||||
p, err := allocateNext(a)
|
||||
if err == nil && tc.expectErr {
|
||||
t.Errorf("allocateNext(a) = nil, want error")
|
||||
}
|
||||
if err != nil && !tc.expectErr {
|
||||
t.Errorf("allocateNext(a) = %+v, want no error", err)
|
||||
}
|
||||
if !tc.expectErr {
|
||||
if p != nil && p.String() != tc.expectedCIDR {
|
||||
t.Fatalf("allocateNext(a) got %+v, want %+v", p.String(), tc.expectedCIDR)
|
||||
}
|
||||
}
|
||||
p2, err := allocateNext(a)
|
||||
if err == nil && tc.expectErr {
|
||||
t.Errorf("allocateNext(a) = nil, want error")
|
||||
}
|
||||
if err != nil && !tc.expectErr {
|
||||
t.Errorf("allocateNext(a) = %+v, want no error", err)
|
||||
}
|
||||
if !tc.expectErr {
|
||||
if p2 != nil && p2.String() != tc.expectedCIDR2 {
|
||||
t.Fatalf("allocateNext(a) got %+v, want %+v", p2.String(), tc.expectedCIDR)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMultiCIDRSetMetrics(t *testing.T) {
|
||||
cidr := "10.0.0.0/16"
|
||||
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(cidr)
|
||||
// We have 256 free cidrs
|
||||
a, err := NewMultiCIDRSet(clusterCIDR, 8)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating MultiCIDRSet: %v", err)
|
||||
}
|
||||
clearMetrics(map[string]string{"clusterCIDR": cidr})
|
||||
|
||||
// Allocate next all.
|
||||
for i := 1; i <= 256; i++ {
|
||||
_, err := allocateNext(a)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error allocating a new CIDR: %v", err)
|
||||
}
|
||||
em := testMetrics{
|
||||
usage: float64(i) / float64(256),
|
||||
allocs: float64(i),
|
||||
releases: 0,
|
||||
allocTries: 0,
|
||||
}
|
||||
expectMetrics(t, cidr, em)
|
||||
}
|
||||
// Release all CIDRs.
|
||||
a.Release(clusterCIDR)
|
||||
em := testMetrics{
|
||||
usage: 0,
|
||||
allocs: 256,
|
||||
releases: 256,
|
||||
allocTries: 0,
|
||||
}
|
||||
expectMetrics(t, cidr, em)
|
||||
|
||||
// Allocate all CIDRs.
|
||||
a.Occupy(clusterCIDR)
|
||||
em = testMetrics{
|
||||
usage: 1,
|
||||
allocs: 512,
|
||||
releases: 256,
|
||||
allocTries: 0,
|
||||
}
|
||||
expectMetrics(t, cidr, em)
|
||||
|
||||
}
|
||||
|
||||
func TestMultiCIDRSetMetricsHistogram(t *testing.T) {
|
||||
cidr := "10.0.0.0/16"
|
||||
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(cidr)
|
||||
// We have 256 free cidrs.
|
||||
a, err := NewMultiCIDRSet(clusterCIDR, 8)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating MultiCIDRSet: %v", err)
|
||||
}
|
||||
clearMetrics(map[string]string{"clusterCIDR": cidr})
|
||||
|
||||
// Allocate half of the range.
|
||||
// Occupy does not update the nextCandidate.
|
||||
_, halfClusterCIDR, _ := utilnet.ParseCIDRSloppy("10.0.0.0/17")
|
||||
a.Occupy(halfClusterCIDR)
|
||||
em := testMetrics{
|
||||
usage: 0.5,
|
||||
allocs: 128,
|
||||
releases: 0,
|
||||
}
|
||||
expectMetrics(t, cidr, em)
|
||||
// Allocate next should iterate until the next free cidr
|
||||
// that is exactly the same number we allocated previously.
|
||||
_, err = allocateNext(a)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error allocating a new CIDR: %v", err)
|
||||
}
|
||||
em = testMetrics{
|
||||
usage: float64(129) / float64(256),
|
||||
allocs: 129,
|
||||
releases: 0,
|
||||
}
|
||||
expectMetrics(t, cidr, em)
|
||||
}
|
||||
|
||||
func TestMultiCIDRSetMetricsDual(t *testing.T) {
|
||||
// create IPv4 cidrSet.
|
||||
cidrIPv4 := "10.0.0.0/16"
|
||||
_, clusterCIDRv4, _ := utilnet.ParseCIDRSloppy(cidrIPv4)
|
||||
a, err := NewMultiCIDRSet(clusterCIDRv4, 8)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating MultiCIDRSet: %v", err)
|
||||
}
|
||||
clearMetrics(map[string]string{"clusterCIDR": cidrIPv4})
|
||||
// create IPv6 cidrSet.
|
||||
cidrIPv6 := "2001:db8::/48"
|
||||
_, clusterCIDRv6, _ := utilnet.ParseCIDRSloppy(cidrIPv6)
|
||||
b, err := NewMultiCIDRSet(clusterCIDRv6, 64)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error creating MultiCIDRSet: %v", err)
|
||||
}
|
||||
clearMetrics(map[string]string{"clusterCIDR": cidrIPv6})
|
||||
// Allocate all.
|
||||
a.Occupy(clusterCIDRv4)
|
||||
em := testMetrics{
|
||||
usage: 1,
|
||||
allocs: 256,
|
||||
releases: 0,
|
||||
allocTries: 0,
|
||||
}
|
||||
expectMetrics(t, cidrIPv4, em)
|
||||
|
||||
b.Occupy(clusterCIDRv6)
|
||||
em = testMetrics{
|
||||
usage: 1,
|
||||
allocs: 65536,
|
||||
releases: 0,
|
||||
allocTries: 0,
|
||||
}
|
||||
expectMetrics(t, cidrIPv6, em)
|
||||
|
||||
// Release all.
|
||||
a.Release(clusterCIDRv4)
|
||||
em = testMetrics{
|
||||
usage: 0,
|
||||
allocs: 256,
|
||||
releases: 256,
|
||||
allocTries: 0,
|
||||
}
|
||||
expectMetrics(t, cidrIPv4, em)
|
||||
b.Release(clusterCIDRv6)
|
||||
em = testMetrics{
|
||||
usage: 0,
|
||||
allocs: 65536,
|
||||
releases: 65536,
|
||||
allocTries: 0,
|
||||
}
|
||||
expectMetrics(t, cidrIPv6, em)
|
||||
|
||||
}
|
||||
|
||||
// Metrics helpers.
|
||||
func clearMetrics(labels map[string]string) {
|
||||
cidrSetAllocations.Delete(labels)
|
||||
cidrSetReleases.Delete(labels)
|
||||
cidrSetUsage.Delete(labels)
|
||||
cidrSetAllocationTriesPerRequest.Delete(labels)
|
||||
}
|
||||
|
||||
type testMetrics struct {
|
||||
usage float64
|
||||
allocs float64
|
||||
releases float64
|
||||
allocTries float64
|
||||
}
|
||||
|
||||
func expectMetrics(t *testing.T, label string, em testMetrics) {
|
||||
var m testMetrics
|
||||
var err error
|
||||
m.usage, err = testutil.GetGaugeMetricValue(cidrSetUsage.WithLabelValues(label))
|
||||
if err != nil {
|
||||
t.Errorf("failed to get %s value, err: %v", cidrSetUsage.Name, err)
|
||||
}
|
||||
m.allocs, err = testutil.GetCounterMetricValue(cidrSetAllocations.WithLabelValues(label))
|
||||
if err != nil {
|
||||
t.Errorf("failed to get %s value, err: %v", cidrSetAllocations.Name, err)
|
||||
}
|
||||
m.releases, err = testutil.GetCounterMetricValue(cidrSetReleases.WithLabelValues(label))
|
||||
if err != nil {
|
||||
t.Errorf("failed to get %s value, err: %v", cidrSetReleases.Name, err)
|
||||
}
|
||||
m.allocTries, err = testutil.GetHistogramMetricValue(cidrSetAllocationTriesPerRequest.WithLabelValues(label))
|
||||
if err != nil {
|
||||
t.Errorf("failed to get %s value, err: %v", cidrSetAllocationTriesPerRequest.Name, err)
|
||||
}
|
||||
|
||||
if m != em {
|
||||
t.Fatalf("metrics error: expected %v, received %v", em, m)
|
||||
}
|
||||
}
|
||||
|
||||
// Benchmarks
|
||||
func benchmarkAllocateAllIPv6(cidr string, perNodeHostBits int, b *testing.B) {
|
||||
_, clusterCIDR, _ := utilnet.ParseCIDRSloppy(cidr)
|
||||
a, _ := NewMultiCIDRSet(clusterCIDR, perNodeHostBits)
|
||||
for n := 0; n < b.N; n++ {
|
||||
// Allocate the whole range + 1.
|
||||
for i := 0; i <= a.MaxCIDRs; i++ {
|
||||
allocateNext(a)
|
||||
}
|
||||
// Release all.
|
||||
a.Release(clusterCIDR)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAllocateAll_48_52(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/48", 52, b) }
|
||||
func BenchmarkAllocateAll_48_56(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/48", 56, b) }
|
||||
|
||||
func BenchmarkAllocateAll_48_60(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/48", 60, b) }
|
||||
func BenchmarkAllocateAll_48_64(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/48", 64, b) }
|
||||
|
||||
func BenchmarkAllocateAll_64_68(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/64", 68, b) }
|
||||
|
||||
func BenchmarkAllocateAll_64_72(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/64", 72, b) }
|
||||
func BenchmarkAllocateAll_64_76(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/64", 76, b) }
|
||||
|
||||
func BenchmarkAllocateAll_64_80(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/64", 80, b) }
|
||||
@@ -41,13 +41,6 @@ import (
|
||||
controllerutil "k8s.io/kubernetes/pkg/controller/util/node"
|
||||
)
|
||||
|
||||
// cidrs are reserved, then node resource is patched with them
|
||||
// this type holds the reservation info for a node
|
||||
type nodeReservedCIDRs struct {
|
||||
allocatedCIDRs []*net.IPNet
|
||||
nodeName string
|
||||
}
|
||||
|
||||
type rangeAllocator struct {
|
||||
client clientset.Interface
|
||||
// cluster cidrs as passed in during controller creation
|
||||
@@ -333,7 +326,7 @@ func (r *rangeAllocator) updateCIDRsAllocation(data nodeReservedCIDRs) error {
|
||||
var err error
|
||||
var node *v1.Node
|
||||
defer r.removeNodeFromProcessing(data.nodeName)
|
||||
cidrsString := cidrsAsString(data.allocatedCIDRs)
|
||||
cidrsString := ipnetToStringList(data.allocatedCIDRs)
|
||||
node, err = r.nodeLister.Get(data.nodeName)
|
||||
if err != nil {
|
||||
klog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDRs: %v", data.nodeName, err)
|
||||
@@ -391,12 +384,3 @@ func (r *rangeAllocator) updateCIDRsAllocation(data nodeReservedCIDRs) error {
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// converts a slice of cidrs into <c-1>,<c-2>,<c-n>
|
||||
func cidrsAsString(inCIDRs []*net.IPNet) []string {
|
||||
outCIDRs := make([]string, len(inCIDRs))
|
||||
for idx, inCIDR := range inCIDRs {
|
||||
outCIDRs[idx] = inCIDR.String()
|
||||
}
|
||||
return outCIDRs
|
||||
}
|
||||
|
||||
@@ -25,40 +25,12 @@ import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test"
|
||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
const testNodePollInterval = 10 * time.Millisecond
|
||||
|
||||
var alwaysReady = func() bool { return true }
|
||||
|
||||
func waitForUpdatedNodeWithTimeout(nodeHandler *testutil.FakeNodeHandler, number int, timeout time.Duration) error {
|
||||
return wait.Poll(nodePollInterval, timeout, func() (bool, error) {
|
||||
if len(nodeHandler.GetUpdatedNodesCopy()) >= number {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
// Creates a fakeNodeInformer using the provided fakeNodeHandler.
|
||||
func getFakeNodeInformer(fakeNodeHandler *testutil.FakeNodeHandler) coreinformers.NodeInformer {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc())
|
||||
fakeNodeInformer := fakeInformerFactory.Core().V1().Nodes()
|
||||
|
||||
for _, node := range fakeNodeHandler.Existing {
|
||||
fakeNodeInformer.Informer().GetStore().Add(node)
|
||||
}
|
||||
|
||||
return fakeNodeInformer
|
||||
}
|
||||
|
||||
type testCase struct {
|
||||
description string
|
||||
fakeNodeHandler *testutil.FakeNodeHandler
|
||||
@@ -305,7 +277,7 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.description, func(t *testing.T) {
|
||||
// Initialize the range allocator.
|
||||
fakeNodeInformer := getFakeNodeInformer(tc.fakeNodeHandler)
|
||||
fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler)
|
||||
nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{})
|
||||
_, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList)
|
||||
if err == nil && tc.ctrlCreateFail {
|
||||
@@ -321,7 +293,7 @@ func TestOccupyPreExistingCIDR(t *testing.T) {
|
||||
func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
||||
// Non-parallel test (overrides global var)
|
||||
oldNodePollInterval := nodePollInterval
|
||||
nodePollInterval = testNodePollInterval
|
||||
nodePollInterval = test.NodePollInterval
|
||||
defer func() {
|
||||
nodePollInterval = oldNodePollInterval
|
||||
}()
|
||||
@@ -537,7 +509,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
||||
|
||||
// test function
|
||||
testFunc := func(tc testCase) {
|
||||
fakeNodeInformer := getFakeNodeInformer(tc.fakeNodeHandler)
|
||||
fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler)
|
||||
nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{})
|
||||
// Initialize the range allocator.
|
||||
allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList)
|
||||
@@ -550,7 +522,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
||||
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
||||
return
|
||||
}
|
||||
rangeAllocator.nodesSynced = alwaysReady
|
||||
rangeAllocator.nodesSynced = test.AlwaysReady
|
||||
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
||||
go allocator.Run(wait.NeverStop)
|
||||
|
||||
@@ -580,7 +552,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) {
|
||||
if updateCount != 1 {
|
||||
t.Fatalf("test error: all tests must update exactly one node")
|
||||
}
|
||||
if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, updateCount, wait.ForeverTestTimeout); err != nil {
|
||||
if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, updateCount, wait.ForeverTestTimeout); err != nil {
|
||||
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
|
||||
}
|
||||
|
||||
@@ -639,7 +611,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
|
||||
|
||||
testFunc := func(tc testCase) {
|
||||
// Initialize the range allocator.
|
||||
allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil)
|
||||
allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil)
|
||||
if err != nil {
|
||||
t.Logf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err)
|
||||
}
|
||||
@@ -648,7 +620,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) {
|
||||
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
||||
return
|
||||
}
|
||||
rangeAllocator.nodesSynced = alwaysReady
|
||||
rangeAllocator.nodesSynced = test.AlwaysReady
|
||||
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
||||
go allocator.Run(wait.NeverStop)
|
||||
|
||||
@@ -708,7 +680,7 @@ type releaseTestCase struct {
|
||||
func TestReleaseCIDRSuccess(t *testing.T) {
|
||||
// Non-parallel test (overrides global var)
|
||||
oldNodePollInterval := nodePollInterval
|
||||
nodePollInterval = testNodePollInterval
|
||||
nodePollInterval = test.NodePollInterval
|
||||
defer func() {
|
||||
nodePollInterval = oldNodePollInterval
|
||||
}()
|
||||
@@ -784,13 +756,13 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
||||
|
||||
testFunc := func(tc releaseTestCase) {
|
||||
// Initialize the range allocator.
|
||||
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil)
|
||||
allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil)
|
||||
rangeAllocator, ok := allocator.(*rangeAllocator)
|
||||
if !ok {
|
||||
t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description)
|
||||
return
|
||||
}
|
||||
rangeAllocator.nodesSynced = alwaysReady
|
||||
rangeAllocator.nodesSynced = test.AlwaysReady
|
||||
rangeAllocator.recorder = testutil.NewFakeRecorder()
|
||||
go allocator.Run(wait.NeverStop)
|
||||
|
||||
@@ -813,7 +785,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
|
||||
}
|
||||
if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
|
||||
if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
|
||||
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
|
||||
}
|
||||
} else {
|
||||
@@ -841,7 +813,7 @@ func TestReleaseCIDRSuccess(t *testing.T) {
|
||||
if err = allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]); err != nil {
|
||||
t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err)
|
||||
}
|
||||
if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
|
||||
if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil {
|
||||
t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err)
|
||||
}
|
||||
|
||||
|
||||
@@ -18,10 +18,21 @@ package test
|
||||
|
||||
import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controller/testutil"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
const NodePollInterval = 10 * time.Millisecond
|
||||
|
||||
var AlwaysReady = func() bool { return true }
|
||||
|
||||
// MustParseCIDR returns the CIDR range parsed from s or panics if the string
|
||||
// cannot be parsed.
|
||||
func MustParseCIDR(s string) *net.IPNet {
|
||||
@@ -31,3 +42,25 @@ func MustParseCIDR(s string) *net.IPNet {
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// FakeNodeInformer creates a fakeNodeInformer using the provided fakeNodeHandler.
|
||||
func FakeNodeInformer(fakeNodeHandler *testutil.FakeNodeHandler) coreinformers.NodeInformer {
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc())
|
||||
fakeNodeInformer := fakeInformerFactory.Core().V1().Nodes()
|
||||
|
||||
for _, node := range fakeNodeHandler.Existing {
|
||||
fakeNodeInformer.Informer().GetStore().Add(node)
|
||||
}
|
||||
|
||||
return fakeNodeInformer
|
||||
}
|
||||
|
||||
func WaitForUpdatedNodeWithTimeout(nodeHandler *testutil.FakeNodeHandler, number int, timeout time.Duration) error {
|
||||
return wait.Poll(NodePollInterval, timeout, func() (bool, error) {
|
||||
if len(nodeHandler.GetUpdatedNodesCopy()) >= number {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
})
|
||||
}
|
||||
|
||||
@@ -20,20 +20,18 @@ import (
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
networkinginformers "k8s.io/client-go/informers/networking/v1alpha1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
||||
coreinformers "k8s.io/client-go/informers/core/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
corelisters "k8s.io/client-go/listers/core/v1"
|
||||
cloudprovider "k8s.io/cloud-provider"
|
||||
controllersmetrics "k8s.io/component-base/metrics/prometheus/controllers"
|
||||
"k8s.io/component-base/metrics/prometheus/ratelimiter"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/controller/nodeipam/ipam"
|
||||
)
|
||||
|
||||
@@ -74,6 +72,7 @@ type Controller struct {
|
||||
// currently, this should be handled as a fatal error.
|
||||
func NewNodeIpamController(
|
||||
nodeInformer coreinformers.NodeInformer,
|
||||
clusterCIDRInformer networkinginformers.ClusterCIDRInformer,
|
||||
cloud cloudprovider.Interface,
|
||||
kubeClient clientset.Interface,
|
||||
clusterCIDRs []*net.IPNet,
|
||||
@@ -136,7 +135,7 @@ func NewNodeIpamController(
|
||||
NodeCIDRMaskSizes: nodeCIDRMaskSizes,
|
||||
}
|
||||
|
||||
ic.cidrAllocator, err = ipam.New(kubeClient, cloud, nodeInformer, ic.allocatorType, allocatorParams)
|
||||
ic.cidrAllocator, err = ipam.New(kubeClient, cloud, nodeInformer, clusterCIDRInformer, ic.allocatorType, allocatorParams)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -48,6 +48,7 @@ func newTestNodeIpamController(clusterCIDR []*net.IPNet, serviceCIDR *net.IPNet,
|
||||
fakeClient := &fake.Clientset{}
|
||||
fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc())
|
||||
fakeNodeInformer := fakeInformerFactory.Core().V1().Nodes()
|
||||
fakeClusterCIDRInformer := fakeInformerFactory.Networking().V1alpha1().ClusterCIDRs()
|
||||
|
||||
for _, node := range fakeNodeHandler.Existing {
|
||||
fakeNodeInformer.Informer().GetStore().Add(node)
|
||||
@@ -55,7 +56,7 @@ func newTestNodeIpamController(clusterCIDR []*net.IPNet, serviceCIDR *net.IPNet,
|
||||
|
||||
fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues())
|
||||
return NewNodeIpamController(
|
||||
fakeNodeInformer, fakeGCE, clientSet,
|
||||
fakeNodeInformer, fakeClusterCIDRInformer, fakeGCE, clientSet,
|
||||
clusterCIDR, serviceCIDR, secondaryServiceCIDR, nodeCIDRMaskSizes, allocatorType,
|
||||
)
|
||||
}
|
||||
@@ -78,6 +79,9 @@ func TestNewNodeIpamControllerWithCIDRMasks(t *testing.T) {
|
||||
{"valid_range_allocator_dualstack", "10.0.0.0/21,2000::/10", "10.1.0.0/21", emptyServiceCIDR, []int{24, 98}, ipam.RangeAllocatorType, false},
|
||||
{"valid_range_allocator_dualstack_dualstackservice", "10.0.0.0/21,2000::/10", "10.1.0.0/21", "3000::/10", []int{24, 98}, ipam.RangeAllocatorType, false},
|
||||
|
||||
{"valid_multi_cidr_range_allocator", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.MultiCIDRRangeAllocatorType, false},
|
||||
{"valid_multi_cidr_range_allocator_dualstack", "10.0.0.0/21,2000::/10", "10.1.0.0/21", emptyServiceCIDR, []int{24, 98}, ipam.MultiCIDRRangeAllocatorType, false},
|
||||
|
||||
{"valid_cloud_allocator", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.CloudAllocatorType, false},
|
||||
{"valid_ipam_from_cluster", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.IPAMFromClusterAllocatorType, false},
|
||||
{"valid_ipam_from_cloud", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.IPAMFromCloudAllocatorType, false},
|
||||
|
||||
@@ -45,6 +45,7 @@ import (
|
||||
eventsv1beta1 "k8s.io/api/events/v1beta1"
|
||||
flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1"
|
||||
networkingapiv1 "k8s.io/api/networking/v1"
|
||||
networkingapiv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
nodev1 "k8s.io/api/node/v1"
|
||||
nodev1beta1 "k8s.io/api/node/v1beta1"
|
||||
policyapiv1 "k8s.io/api/policy/v1"
|
||||
@@ -689,6 +690,7 @@ var (
|
||||
// alphaAPIGroupVersionsDisabledByDefault holds the alpha APIs we have. They are always disabled by default.
|
||||
alphaAPIGroupVersionsDisabledByDefault = []schema.GroupVersion{
|
||||
apiserverinternalv1alpha1.SchemeGroupVersion,
|
||||
networkingapiv1alpha1.SchemeGroupVersion,
|
||||
storageapiv1alpha1.SchemeGroupVersion,
|
||||
flowcontrolv1alpha1.SchemeGroupVersion,
|
||||
}
|
||||
|
||||
@@ -588,6 +588,13 @@ const (
|
||||
// Enables the usage of different protocols in the same Service with type=LoadBalancer
|
||||
MixedProtocolLBService featuregate.Feature = "MixedProtocolLBService"
|
||||
|
||||
// owner: @sarveshr7
|
||||
// kep: http://kep.k8s.io/2593
|
||||
// alpha: v1.25
|
||||
//
|
||||
// Enables the MultiCIDR Range allocator.
|
||||
MultiCIDRRangeAllocator featuregate.Feature = "MultiCIDRRangeAllocator"
|
||||
|
||||
// owner: @rikatz
|
||||
// kep: http://kep.k8s.io/2079
|
||||
// alpha: v1.21
|
||||
@@ -1042,6 +1049,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
|
||||
|
||||
MixedProtocolLBService: {Default: true, PreRelease: featuregate.Beta},
|
||||
|
||||
MultiCIDRRangeAllocator: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
NetworkPolicyEndPort: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27
|
||||
|
||||
NetworkPolicyStatus: {Default: false, PreRelease: featuregate.Alpha},
|
||||
|
||||
143
pkg/generated/openapi/zz_generated.openapi.go
generated
143
pkg/generated/openapi/zz_generated.openapi.go
generated
@@ -687,6 +687,9 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
|
||||
"k8s.io/api/networking/v1.NetworkPolicySpec": schema_k8sio_api_networking_v1_NetworkPolicySpec(ref),
|
||||
"k8s.io/api/networking/v1.NetworkPolicyStatus": schema_k8sio_api_networking_v1_NetworkPolicyStatus(ref),
|
||||
"k8s.io/api/networking/v1.ServiceBackendPort": schema_k8sio_api_networking_v1_ServiceBackendPort(ref),
|
||||
"k8s.io/api/networking/v1alpha1.ClusterCIDR": schema_k8sio_api_networking_v1alpha1_ClusterCIDR(ref),
|
||||
"k8s.io/api/networking/v1alpha1.ClusterCIDRList": schema_k8sio_api_networking_v1alpha1_ClusterCIDRList(ref),
|
||||
"k8s.io/api/networking/v1alpha1.ClusterCIDRSpec": schema_k8sio_api_networking_v1alpha1_ClusterCIDRSpec(ref),
|
||||
"k8s.io/api/networking/v1beta1.HTTPIngressPath": schema_k8sio_api_networking_v1beta1_HTTPIngressPath(ref),
|
||||
"k8s.io/api/networking/v1beta1.HTTPIngressRuleValue": schema_k8sio_api_networking_v1beta1_HTTPIngressRuleValue(ref),
|
||||
"k8s.io/api/networking/v1beta1.Ingress": schema_k8sio_api_networking_v1beta1_Ingress(ref),
|
||||
@@ -34339,6 +34342,146 @@ func schema_k8sio_api_networking_v1_ServiceBackendPort(ref common.ReferenceCallb
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_networking_v1alpha1_ClusterCIDR(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"apiVersion": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"metadata": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
|
||||
},
|
||||
},
|
||||
"spec": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/api/networking/v1alpha1.ClusterCIDRSpec"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"k8s.io/api/networking/v1alpha1.ClusterCIDRSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_networking_v1alpha1_ClusterCIDRList(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "ClusterCIDRList contains a list of ClusterCIDR.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"apiVersion": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"metadata": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
|
||||
},
|
||||
},
|
||||
"items": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Items is the list of ClusterCIDRs.",
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/api/networking/v1alpha1.ClusterCIDR"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"items"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"k8s.io/api/networking/v1alpha1.ClusterCIDR", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_networking_v1alpha1_ClusterCIDRSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "ClusterCIDRSpec defines the desired state of ClusterCIDR.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"nodeSelector": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "NodeSelector defines which nodes the config is applicable to. An empty or nil NodeSelector selects all nodes. This field is immutable.",
|
||||
Ref: ref("k8s.io/api/core/v1.NodeSelector"),
|
||||
},
|
||||
},
|
||||
"perNodeHostBits": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "PerNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.",
|
||||
Default: 0,
|
||||
Type: []string{"integer"},
|
||||
Format: "int32",
|
||||
},
|
||||
},
|
||||
"ipv4": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "IPv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.",
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"ipv6": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "IPv6 defines an IPv6 IP block in CIDR notation(e.g. \"fd12:3456:789a:1::/64\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.",
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"perNodeHostBits"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"k8s.io/api/core/v1.NodeSelector"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_networking_v1beta1_HTTPIngressPath(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
|
||||
@@ -71,6 +71,7 @@ func NewStorageFactoryConfig() *StorageFactoryConfig {
|
||||
//
|
||||
// TODO (https://github.com/kubernetes/kubernetes/issues/108451): remove the override in 1.25.
|
||||
// apisstorage.Resource("csistoragecapacities").WithVersion("v1beta1"),
|
||||
networking.Resource("clustercidrs").WithVersion("v1alpha1"),
|
||||
}
|
||||
|
||||
return &StorageFactoryConfig{
|
||||
|
||||
@@ -37,6 +37,7 @@ import (
|
||||
discoveryv1beta1 "k8s.io/api/discovery/v1beta1"
|
||||
extensionsv1beta1 "k8s.io/api/extensions/v1beta1"
|
||||
flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2"
|
||||
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
policyv1beta1 "k8s.io/api/policy/v1beta1"
|
||||
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
|
||||
schedulingv1 "k8s.io/api/scheduling/v1"
|
||||
@@ -591,6 +592,18 @@ func AddHandlers(h printers.PrintHandler) {
|
||||
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
|
||||
}
|
||||
h.TableHandler(scaleColumnDefinitions, printScale)
|
||||
|
||||
clusterCIDRColumnDefinitions := []metav1.TableColumnDefinition{
|
||||
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
|
||||
{Name: "PerNodeHostBits", Type: "string", Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["perNodeHostBits"]},
|
||||
{Name: "IPv4", Type: "string", Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["ipv4"]},
|
||||
{Name: "IPv6", Type: "string", Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["ipv6"]},
|
||||
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
|
||||
{Name: "NodeSelector", Type: "string", Priority: 1, Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["nodeSelector"]},
|
||||
}
|
||||
|
||||
h.TableHandler(clusterCIDRColumnDefinitions, printClusterCIDR)
|
||||
h.TableHandler(clusterCIDRColumnDefinitions, printClusterCIDRList)
|
||||
}
|
||||
|
||||
// Pass ports=nil for all ports.
|
||||
@@ -2624,6 +2637,57 @@ func printPriorityLevelConfigurationList(list *flowcontrol.PriorityLevelConfigur
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
func printClusterCIDR(obj *networking.ClusterCIDR, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
row := metav1.TableRow{
|
||||
Object: runtime.RawExtension{Object: obj},
|
||||
}
|
||||
ipv4 := "<none>"
|
||||
ipv6 := "<none>"
|
||||
|
||||
if obj.Spec.IPv4 != "" {
|
||||
ipv4 = obj.Spec.IPv4
|
||||
}
|
||||
if obj.Spec.IPv6 != "" {
|
||||
ipv6 = obj.Spec.IPv6
|
||||
}
|
||||
|
||||
row.Cells = append(row.Cells, obj.Name, fmt.Sprint(obj.Spec.PerNodeHostBits), ipv4, ipv6, translateTimestampSince(obj.CreationTimestamp))
|
||||
if options.Wide {
|
||||
nodeSelector := "<none>"
|
||||
if obj.Spec.NodeSelector != nil {
|
||||
allTerms := make([]string, 0)
|
||||
for _, term := range obj.Spec.NodeSelector.NodeSelectorTerms {
|
||||
if len(term.MatchExpressions) > 0 {
|
||||
matchExpressions := fmt.Sprintf("MatchExpressions: %v", term.MatchExpressions)
|
||||
allTerms = append(allTerms, matchExpressions)
|
||||
}
|
||||
|
||||
if len(term.MatchFields) > 0 {
|
||||
matchFields := fmt.Sprintf("MatchFields: %v", term.MatchFields)
|
||||
allTerms = append(allTerms, matchFields)
|
||||
}
|
||||
}
|
||||
nodeSelector = strings.Join(allTerms, ",")
|
||||
}
|
||||
|
||||
row.Cells = append(row.Cells, nodeSelector)
|
||||
}
|
||||
|
||||
return []metav1.TableRow{row}, nil
|
||||
}
|
||||
|
||||
func printClusterCIDRList(list *networking.ClusterCIDRList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
rows := make([]metav1.TableRow, 0, len(list.Items))
|
||||
for i := range list.Items {
|
||||
r, err := printClusterCIDR(&list.Items[i], options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, r...)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
func printScale(obj *autoscaling.Scale, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
row := metav1.TableRow{
|
||||
Object: runtime.RawExtension{Object: obj},
|
||||
|
||||
@@ -6184,3 +6184,277 @@ func TestTableRowDeepCopyShouldNotPanic(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrintClusterCIDR(t *testing.T) {
|
||||
ipv4CIDR := "10.1.0.0/16"
|
||||
perNodeHostBits := int32(8)
|
||||
ipv6CIDR := "fd00:1:1::/64"
|
||||
|
||||
tests := []struct {
|
||||
ccc networking.ClusterCIDR
|
||||
options printers.GenerateOptions
|
||||
expected []metav1.TableRow
|
||||
}{
|
||||
{
|
||||
// Test name, IPv4 only with no node selector.
|
||||
ccc: networking.ClusterCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test1"},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: perNodeHostBits,
|
||||
IPv4: ipv4CIDR,
|
||||
},
|
||||
},
|
||||
options: printers.GenerateOptions{},
|
||||
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age.
|
||||
expected: []metav1.TableRow{{Cells: []interface{}{"test1", "8", ipv4CIDR, "<none>", "<unknown>"}}},
|
||||
},
|
||||
{
|
||||
// Test name, IPv4 only with node selector, Not wide.
|
||||
ccc: networking.ClusterCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test2"},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: perNodeHostBits,
|
||||
IPv4: ipv4CIDR,
|
||||
// Does NOT get printed.
|
||||
NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}),
|
||||
},
|
||||
},
|
||||
options: printers.GenerateOptions{},
|
||||
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age.
|
||||
expected: []metav1.TableRow{{Cells: []interface{}{"test2", "8", ipv4CIDR, "<none>", "<unknown>"}}},
|
||||
},
|
||||
{
|
||||
// Test name, IPv4 only with no node selector, wide.
|
||||
ccc: networking.ClusterCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test3"},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: perNodeHostBits,
|
||||
IPv4: ipv4CIDR,
|
||||
},
|
||||
},
|
||||
options: printers.GenerateOptions{Wide: true},
|
||||
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector .
|
||||
expected: []metav1.TableRow{{Cells: []interface{}{"test3", "8", ipv4CIDR, "<none>", "<unknown>", "<none>"}}},
|
||||
},
|
||||
{
|
||||
// Test name, IPv4 only with node selector, wide.
|
||||
ccc: networking.ClusterCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test4"},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: perNodeHostBits,
|
||||
IPv4: ipv4CIDR,
|
||||
NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}),
|
||||
},
|
||||
},
|
||||
options: printers.GenerateOptions{Wide: true},
|
||||
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector .
|
||||
expected: []metav1.TableRow{{Cells: []interface{}{"test4", "8", ipv4CIDR, "<none>", "<unknown>", "MatchExpressions: [{foo In [bar]}]"}}},
|
||||
},
|
||||
{
|
||||
// Test name, IPv6 only with no node selector.
|
||||
ccc: networking.ClusterCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test5"},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: perNodeHostBits,
|
||||
IPv6: ipv6CIDR,
|
||||
},
|
||||
},
|
||||
options: printers.GenerateOptions{},
|
||||
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age
|
||||
expected: []metav1.TableRow{{Cells: []interface{}{"test5", "8", "<none>", ipv6CIDR, "<unknown>"}}},
|
||||
},
|
||||
{
|
||||
// Test name, IPv6 only with node selector, Not wide.
|
||||
ccc: networking.ClusterCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test6"},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: perNodeHostBits,
|
||||
IPv6: ipv6CIDR,
|
||||
// Does NOT get printed.
|
||||
NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}),
|
||||
},
|
||||
},
|
||||
options: printers.GenerateOptions{},
|
||||
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age.
|
||||
expected: []metav1.TableRow{{Cells: []interface{}{"test6", "8", "<none>", ipv6CIDR, "<unknown>"}}},
|
||||
},
|
||||
{
|
||||
// Test name, IPv6 only with no node selector, wide.
|
||||
ccc: networking.ClusterCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test7"},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: perNodeHostBits,
|
||||
IPv6: ipv6CIDR,
|
||||
},
|
||||
},
|
||||
options: printers.GenerateOptions{Wide: true},
|
||||
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector .
|
||||
expected: []metav1.TableRow{{Cells: []interface{}{"test7", "8", "<none>", ipv6CIDR, "<unknown>", "<none>"}}},
|
||||
},
|
||||
{
|
||||
// Test name, IPv6 only with node selector, wide.
|
||||
ccc: networking.ClusterCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test8"},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: perNodeHostBits,
|
||||
IPv6: ipv6CIDR,
|
||||
NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}),
|
||||
},
|
||||
},
|
||||
options: printers.GenerateOptions{Wide: true},
|
||||
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector .
|
||||
expected: []metav1.TableRow{{Cells: []interface{}{"test8", "8", "<none>", ipv6CIDR, "<unknown>", "MatchExpressions: [{foo In [bar]}]"}}},
|
||||
},
|
||||
{
|
||||
// Test name, DualStack with no node selector.
|
||||
ccc: networking.ClusterCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test9"},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: perNodeHostBits,
|
||||
IPv4: ipv4CIDR,
|
||||
IPv6: ipv6CIDR,
|
||||
},
|
||||
},
|
||||
options: printers.GenerateOptions{},
|
||||
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age.
|
||||
expected: []metav1.TableRow{{Cells: []interface{}{"test9", "8", ipv4CIDR, ipv6CIDR, "<unknown>"}}},
|
||||
},
|
||||
{
|
||||
// Test name,DualStack with node selector, Not wide.
|
||||
ccc: networking.ClusterCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test10"},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: perNodeHostBits,
|
||||
IPv4: ipv4CIDR,
|
||||
IPv6: ipv6CIDR,
|
||||
// Does NOT get printed.
|
||||
NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}),
|
||||
},
|
||||
},
|
||||
options: printers.GenerateOptions{},
|
||||
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age.
|
||||
expected: []metav1.TableRow{{Cells: []interface{}{"test10", "8", ipv4CIDR, ipv6CIDR, "<unknown>"}}},
|
||||
},
|
||||
{
|
||||
// Test name, DualStack with no node selector, wide.
|
||||
ccc: networking.ClusterCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test11"},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: perNodeHostBits,
|
||||
IPv4: ipv4CIDR,
|
||||
IPv6: ipv6CIDR,
|
||||
},
|
||||
},
|
||||
options: printers.GenerateOptions{Wide: true},
|
||||
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector.
|
||||
expected: []metav1.TableRow{{Cells: []interface{}{"test11", "8", ipv4CIDR, ipv6CIDR, "<unknown>", "<none>"}}},
|
||||
},
|
||||
{
|
||||
// Test name, DualStack with node selector, wide.
|
||||
ccc: networking.ClusterCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test12"},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: perNodeHostBits,
|
||||
IPv4: ipv4CIDR,
|
||||
IPv6: ipv6CIDR,
|
||||
NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}),
|
||||
},
|
||||
},
|
||||
options: printers.GenerateOptions{Wide: true},
|
||||
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector .
|
||||
expected: []metav1.TableRow{{Cells: []interface{}{"test12", "8", ipv4CIDR, ipv6CIDR, "<unknown>", "MatchExpressions: [{foo In [bar]}]"}}},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
rows, err := printClusterCIDR(&test.ccc, test.options)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i := range rows {
|
||||
rows[i].Object.Object = nil
|
||||
}
|
||||
if !reflect.DeepEqual(test.expected, rows) {
|
||||
t.Errorf("%d mismatch: %s", i, diff.ObjectReflectDiff(test.expected, rows))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func makeNodeSelector(key string, op api.NodeSelectorOperator, values []string) *api.NodeSelector {
|
||||
return &api.NodeSelector{
|
||||
NodeSelectorTerms: []api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: key,
|
||||
Operator: op,
|
||||
Values: values,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrintClusterCIDRList(t *testing.T) {
|
||||
|
||||
cccList := networking.ClusterCIDRList{
|
||||
Items: []networking.ClusterCIDR{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "ccc1"},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: int32(8),
|
||||
IPv4: "10.1.0.0/16",
|
||||
IPv6: "fd00:1:1::/64",
|
||||
NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "ccc2"},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: int32(8),
|
||||
IPv4: "10.2.0.0/16",
|
||||
IPv6: "fd00:2:1::/64",
|
||||
NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
options printers.GenerateOptions
|
||||
expected []metav1.TableRow
|
||||
}{
|
||||
{
|
||||
// Test name, DualStack with node selector, wide.
|
||||
options: printers.GenerateOptions{Wide: false},
|
||||
expected: []metav1.TableRow{
|
||||
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age.
|
||||
{Cells: []interface{}{"ccc1", "8", "10.1.0.0/16", "fd00:1:1::/64", "<unknown>"}},
|
||||
{Cells: []interface{}{"ccc2", "8", "10.2.0.0/16", "fd00:2:1::/64", "<unknown>"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Test name, DualStack with node selector, wide.
|
||||
options: printers.GenerateOptions{Wide: true},
|
||||
expected: []metav1.TableRow{
|
||||
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector.
|
||||
{Cells: []interface{}{"ccc1", "8", "10.1.0.0/16", "fd00:1:1::/64", "<unknown>", "MatchExpressions: [{foo In [bar]}]"}},
|
||||
{Cells: []interface{}{"ccc2", "8", "10.2.0.0/16", "fd00:2:1::/64", "<unknown>", "MatchExpressions: [{foo In [bar]}]"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
rows, err := printClusterCIDRList(&cccList, test.options)
|
||||
if err != nil {
|
||||
t.Fatalf("Error printing service list: %#v", err)
|
||||
}
|
||||
for i := range rows {
|
||||
rows[i].Object.Object = nil
|
||||
}
|
||||
if !reflect.DeepEqual(test.expected, rows) {
|
||||
t.Errorf("mismatch: %s", diff.ObjectReflectDiff(test.expected, rows))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
17
pkg/registry/networking/clustercidr/doc.go
Normal file
17
pkg/registry/networking/clustercidr/doc.go
Normal file
@@ -0,0 +1,17 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package clustercidr // import "k8s.io/kubernetes/pkg/registry/networking/clustercidr"
|
||||
63
pkg/registry/networking/clustercidr/storage/storage.go
Normal file
63
pkg/registry/networking/clustercidr/storage/storage.go
Normal file
@@ -0,0 +1,63 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
networkingapi "k8s.io/kubernetes/pkg/apis/networking"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
|
||||
printerstorage "k8s.io/kubernetes/pkg/printers/storage"
|
||||
"k8s.io/kubernetes/pkg/registry/networking/clustercidr"
|
||||
)
|
||||
|
||||
// REST implements a RESTStorage for ClusterCIDRs against etcd.
|
||||
type REST struct {
|
||||
*genericregistry.Store
|
||||
}
|
||||
|
||||
// NewREST returns a RESTStorage object that will work against ClusterCIDRs.
|
||||
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, error) {
|
||||
store := &genericregistry.Store{
|
||||
NewFunc: func() runtime.Object { return &networkingapi.ClusterCIDR{} },
|
||||
NewListFunc: func() runtime.Object { return &networkingapi.ClusterCIDRList{} },
|
||||
DefaultQualifiedResource: networkingapi.Resource("clustercidrs"),
|
||||
|
||||
CreateStrategy: clustercidr.Strategy,
|
||||
UpdateStrategy: clustercidr.Strategy,
|
||||
DeleteStrategy: clustercidr.Strategy,
|
||||
|
||||
TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)},
|
||||
}
|
||||
options := &generic.StoreOptions{RESTOptions: optsGetter}
|
||||
if err := store.CompleteWithOptions(options); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &REST{store}, nil
|
||||
}
|
||||
|
||||
// Implement ShortNamesProvider.
|
||||
var _ rest.ShortNamesProvider = &REST{}
|
||||
|
||||
// ShortNames implements the ShortNamesProvider interface. Returns a list of short names for a resource.
|
||||
func (r *REST) ShortNames() []string {
|
||||
return []string{"cc"}
|
||||
}
|
||||
196
pkg/registry/networking/clustercidr/storage/storage_test.go
Normal file
196
pkg/registry/networking/clustercidr/storage/storage_test.go
Normal file
@@ -0,0 +1,196 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
genericregistrytest "k8s.io/apiserver/pkg/registry/generic/testing"
|
||||
etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/networking"
|
||||
_ "k8s.io/kubernetes/pkg/apis/networking/install"
|
||||
"k8s.io/kubernetes/pkg/registry/registrytest"
|
||||
)
|
||||
|
||||
func newStorage(t *testing.T) (*REST, *etcd3testing.EtcdTestServer) {
|
||||
etcdStorage, server := registrytest.NewEtcdStorageForResource(t, networking.Resource("clustercidrs"))
|
||||
restOptions := generic.RESTOptions{
|
||||
StorageConfig: etcdStorage,
|
||||
Decorator: generic.UndecoratedStorage,
|
||||
DeleteCollectionWorkers: 1,
|
||||
ResourcePrefix: "clustercidrs",
|
||||
}
|
||||
clusterCIDRStorage, err := NewREST(restOptions)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error from REST storage: %v", err)
|
||||
}
|
||||
return clusterCIDRStorage, server
|
||||
}
|
||||
|
||||
var (
|
||||
namespace = metav1.NamespaceNone
|
||||
name = "foo-clustercidr"
|
||||
)
|
||||
|
||||
func newClusterCIDR() *networking.ClusterCIDR {
|
||||
return &networking.ClusterCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: int32(8),
|
||||
IPv4: "10.1.0.0/16",
|
||||
IPv6: "fd00:1:1::/64",
|
||||
NodeSelector: &api.NodeSelector{
|
||||
NodeSelectorTerms: []api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: api.NodeSelectorOpIn,
|
||||
Values: []string{"bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func validClusterCIDR() *networking.ClusterCIDR {
|
||||
return newClusterCIDR()
|
||||
}
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test = test.ClusterScope()
|
||||
validCC := validClusterCIDR()
|
||||
noCIDRCC := validClusterCIDR()
|
||||
noCIDRCC.Spec.IPv4 = ""
|
||||
noCIDRCC.Spec.IPv6 = ""
|
||||
invalidCCPerNodeHostBits := validClusterCIDR()
|
||||
invalidCCPerNodeHostBits.Spec.PerNodeHostBits = 100
|
||||
invalidCCCIDR := validClusterCIDR()
|
||||
invalidCCCIDR.Spec.IPv6 = "10.1.0.0/16"
|
||||
|
||||
test.TestCreate(
|
||||
// valid
|
||||
validCC,
|
||||
//invalid
|
||||
noCIDRCC,
|
||||
invalidCCPerNodeHostBits,
|
||||
invalidCCCIDR,
|
||||
)
|
||||
}
|
||||
|
||||
func TestUpdate(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test = test.ClusterScope()
|
||||
test.TestUpdate(
|
||||
// valid
|
||||
validClusterCIDR(),
|
||||
// updateFunc
|
||||
func(obj runtime.Object) runtime.Object {
|
||||
object := obj.(*networking.ClusterCIDR)
|
||||
object.Finalizers = []string{"test.k8s.io/test-finalizer"}
|
||||
return object
|
||||
},
|
||||
// invalid updateFunc: ObjectMeta is not to be tampered with.
|
||||
func(obj runtime.Object) runtime.Object {
|
||||
object := obj.(*networking.ClusterCIDR)
|
||||
object.Name = ""
|
||||
return object
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test = test.ClusterScope()
|
||||
test.TestDelete(validClusterCIDR())
|
||||
}
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test = test.ClusterScope()
|
||||
test.TestGet(validClusterCIDR())
|
||||
}
|
||||
|
||||
func TestList(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test = test.ClusterScope()
|
||||
test.TestList(validClusterCIDR())
|
||||
}
|
||||
|
||||
func TestWatch(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
test := genericregistrytest.New(t, storage.Store)
|
||||
test = test.ClusterScope()
|
||||
test.TestWatch(
|
||||
validClusterCIDR(),
|
||||
// matching labels
|
||||
[]labels.Set{},
|
||||
// not matching labels
|
||||
[]labels.Set{
|
||||
{"a": "c"},
|
||||
{"foo": "bar"},
|
||||
},
|
||||
// matching fields
|
||||
[]fields.Set{
|
||||
{"metadata.name": name},
|
||||
},
|
||||
// not matching fields
|
||||
[]fields.Set{
|
||||
{"metadata.name": "bar"},
|
||||
{"name": name},
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
func TestShortNames(t *testing.T) {
|
||||
storage, server := newStorage(t)
|
||||
defer server.Terminate(t)
|
||||
defer storage.Store.DestroyFunc()
|
||||
expected := []string{"cc"}
|
||||
registrytest.AssertShortNames(t, storage, expected)
|
||||
}
|
||||
82
pkg/registry/networking/clustercidr/strategy.go
Normal file
82
pkg/registry/networking/clustercidr/strategy.go
Normal file
@@ -0,0 +1,82 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package clustercidr
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/networking"
|
||||
"k8s.io/kubernetes/pkg/apis/networking/validation"
|
||||
)
|
||||
|
||||
// clusterCIDRStrategy implements verification logic for ClusterCIDRs.
|
||||
type clusterCIDRStrategy struct {
|
||||
runtime.ObjectTyper
|
||||
names.NameGenerator
|
||||
}
|
||||
|
||||
// Strategy is the default logic that applies when creating and updating clusterCIDR objects.
|
||||
var Strategy = clusterCIDRStrategy{legacyscheme.Scheme, names.SimpleNameGenerator}
|
||||
|
||||
// NamespaceScoped returns false because all clusterCIDRs do not need to be within a namespace.
|
||||
func (clusterCIDRStrategy) NamespaceScoped() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (clusterCIDRStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {}
|
||||
|
||||
func (clusterCIDRStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {}
|
||||
|
||||
// Validate validates a new ClusterCIDR.
|
||||
func (clusterCIDRStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||
clusterCIDR := obj.(*networking.ClusterCIDR)
|
||||
return validation.ValidateClusterCIDR(clusterCIDR)
|
||||
}
|
||||
|
||||
// WarningsOnCreate returns warnings for the creation of the given object.
|
||||
func (clusterCIDRStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Canonicalize normalizes the object after validation.
|
||||
func (clusterCIDRStrategy) Canonicalize(obj runtime.Object) {}
|
||||
|
||||
// AllowCreateOnUpdate is false for ClusterCIDR; this means POST is needed to create one.
|
||||
func (clusterCIDRStrategy) AllowCreateOnUpdate() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// ValidateUpdate is the default update validation for an end user.
|
||||
func (clusterCIDRStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||
validationErrorList := validation.ValidateClusterCIDR(obj.(*networking.ClusterCIDR))
|
||||
updateErrorList := validation.ValidateClusterCIDRUpdate(obj.(*networking.ClusterCIDR), old.(*networking.ClusterCIDR))
|
||||
return append(validationErrorList, updateErrorList...)
|
||||
}
|
||||
|
||||
// WarningsOnUpdate returns warnings for the given update.
|
||||
func (clusterCIDRStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AllowUnconditionalUpdate is the default update policy for ClusterCIDR objects.
|
||||
func (clusterCIDRStrategy) AllowUnconditionalUpdate() bool {
|
||||
return true
|
||||
}
|
||||
86
pkg/registry/networking/clustercidr/strategy_test.go
Normal file
86
pkg/registry/networking/clustercidr/strategy_test.go
Normal file
@@ -0,0 +1,86 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package clustercidr
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
genericapirequest "k8s.io/apiserver/pkg/endpoints/request"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/apis/networking"
|
||||
)
|
||||
|
||||
func newClusterCIDR() networking.ClusterCIDR {
|
||||
return networking.ClusterCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo",
|
||||
},
|
||||
Spec: networking.ClusterCIDRSpec{
|
||||
PerNodeHostBits: int32(8),
|
||||
IPv4: "10.1.0.0/16",
|
||||
IPv6: "fd00:1:1::/64",
|
||||
NodeSelector: &api.NodeSelector{
|
||||
NodeSelectorTerms: []api.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []api.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "foo",
|
||||
Operator: api.NodeSelectorOpIn,
|
||||
Values: []string{"bar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestClusterCIDRStrategy(t *testing.T) {
|
||||
ctx := genericapirequest.NewDefaultContext()
|
||||
apiRequest := genericapirequest.RequestInfo{APIGroup: "networking.k8s.io",
|
||||
APIVersion: "v1alpha1",
|
||||
Resource: "clustercidrs",
|
||||
}
|
||||
ctx = genericapirequest.WithRequestInfo(ctx, &apiRequest)
|
||||
if Strategy.NamespaceScoped() {
|
||||
t.Errorf("ClusterCIDRs must be cluster scoped")
|
||||
}
|
||||
if Strategy.AllowCreateOnUpdate() {
|
||||
t.Errorf("ClusterCIDRs should not allow create on update")
|
||||
}
|
||||
|
||||
ccc := newClusterCIDR()
|
||||
Strategy.PrepareForCreate(ctx, &ccc)
|
||||
|
||||
errs := Strategy.Validate(ctx, &ccc)
|
||||
if len(errs) != 0 {
|
||||
t.Errorf("Unexpected error validating %v", errs)
|
||||
}
|
||||
invalidCCC := newClusterCIDR()
|
||||
invalidCCC.ResourceVersion = "4"
|
||||
invalidCCC.Spec = networking.ClusterCIDRSpec{}
|
||||
Strategy.PrepareForUpdate(ctx, &invalidCCC, &ccc)
|
||||
errs = Strategy.ValidateUpdate(ctx, &invalidCCC, &ccc)
|
||||
if len(errs) == 0 {
|
||||
t.Errorf("Expected a validation error")
|
||||
}
|
||||
if invalidCCC.ResourceVersion != "4" {
|
||||
t.Errorf("Incoming resource version on update should not be mutated")
|
||||
}
|
||||
}
|
||||
@@ -18,12 +18,14 @@ package rest
|
||||
|
||||
import (
|
||||
networkingapiv1 "k8s.io/api/networking/v1"
|
||||
networkingapiv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
genericapiserver "k8s.io/apiserver/pkg/server"
|
||||
serverstorage "k8s.io/apiserver/pkg/server/storage"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/networking"
|
||||
clustercidrstore "k8s.io/kubernetes/pkg/registry/networking/clustercidr/storage"
|
||||
ingressstore "k8s.io/kubernetes/pkg/registry/networking/ingress/storage"
|
||||
ingressclassstore "k8s.io/kubernetes/pkg/registry/networking/ingressclass/storage"
|
||||
networkpolicystore "k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage"
|
||||
@@ -36,6 +38,12 @@ func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorag
|
||||
// If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities.
|
||||
// TODO refactor the plumbing to provide the information in the APIGroupInfo
|
||||
|
||||
if storageMap, err := p.v1alpha1Storage(apiResourceConfigSource, restOptionsGetter); err != nil {
|
||||
return genericapiserver.APIGroupInfo{}, err
|
||||
} else if len(storageMap) > 0 {
|
||||
apiGroupInfo.VersionedResourcesStorageMap[networkingapiv1alpha1.SchemeGroupVersion.Version] = storageMap
|
||||
}
|
||||
|
||||
if storageMap, err := p.v1Storage(apiResourceConfigSource, restOptionsGetter); err != nil {
|
||||
return genericapiserver.APIGroupInfo{}, err
|
||||
} else if len(storageMap) > 0 {
|
||||
@@ -80,6 +88,20 @@ func (p RESTStorageProvider) v1Storage(apiResourceConfigSource serverstorage.API
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
func (p RESTStorageProvider) v1alpha1Storage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (map[string]rest.Storage, error) {
|
||||
storage := map[string]rest.Storage{}
|
||||
// clustercidrs
|
||||
if resource := "clustercidrs"; apiResourceConfigSource.ResourceEnabled(networkingapiv1alpha1.SchemeGroupVersion.WithResource(resource)) {
|
||||
clusterCIDRCStorage, err := clustercidrstore.NewREST(restOptionsGetter)
|
||||
if err != nil {
|
||||
return storage, err
|
||||
}
|
||||
storage[resource] = clusterCIDRCStorage
|
||||
}
|
||||
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
func (p RESTStorageProvider) GroupName() string {
|
||||
return networking.GroupName
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user