Merge pull request #116516 from aojea/servicecidr
KEP-1880 Multiple ServiceCIDR ( and IPAddress allocation)
This commit is contained in:
@@ -179,6 +179,7 @@ API rule violation: list_type_missing,k8s.io/api/networking/v1,NetworkPolicyIngr
|
||||
API rule violation: list_type_missing,k8s.io/api/networking/v1,NetworkPolicySpec,Egress
|
||||
API rule violation: list_type_missing,k8s.io/api/networking/v1,NetworkPolicySpec,Ingress
|
||||
API rule violation: list_type_missing,k8s.io/api/networking/v1,NetworkPolicySpec,PolicyTypes
|
||||
API rule violation: list_type_missing,k8s.io/api/networking/v1alpha1,ServiceCIDRSpec,CIDRs
|
||||
API rule violation: list_type_missing,k8s.io/api/networking/v1beta1,HTTPIngressRuleValue,Paths
|
||||
API rule violation: list_type_missing,k8s.io/api/networking/v1beta1,IngressLoadBalancerStatus,Ingress
|
||||
API rule violation: list_type_missing,k8s.io/api/networking/v1beta1,IngressSpec,Rules
|
||||
@@ -355,6 +356,7 @@ API rule violation: names_match,k8s.io/api/core/v1,RBDVolumeSource,RBDPool
|
||||
API rule violation: names_match,k8s.io/api/core/v1,RBDVolumeSource,RadosUser
|
||||
API rule violation: names_match,k8s.io/api/core/v1,VolumeSource,CephFS
|
||||
API rule violation: names_match,k8s.io/api/core/v1,VolumeSource,StorageOS
|
||||
API rule violation: names_match,k8s.io/api/networking/v1alpha1,ServiceCIDRSpec,CIDRs
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Ref
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Schema
|
||||
API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XEmbeddedResource
|
||||
|
||||
@@ -875,6 +875,41 @@
|
||||
"update",
|
||||
"watch"
|
||||
]
|
||||
},
|
||||
{
|
||||
"resource": "servicecidrs",
|
||||
"responseKind": {
|
||||
"group": "",
|
||||
"kind": "ServiceCIDR",
|
||||
"version": ""
|
||||
},
|
||||
"scope": "Cluster",
|
||||
"singularResource": "servicecidr",
|
||||
"subresources": [
|
||||
{
|
||||
"responseKind": {
|
||||
"group": "",
|
||||
"kind": "ServiceCIDR",
|
||||
"version": ""
|
||||
},
|
||||
"subresource": "status",
|
||||
"verbs": [
|
||||
"get",
|
||||
"patch",
|
||||
"update"
|
||||
]
|
||||
}
|
||||
],
|
||||
"verbs": [
|
||||
"create",
|
||||
"delete",
|
||||
"deletecollection",
|
||||
"get",
|
||||
"list",
|
||||
"patch",
|
||||
"update",
|
||||
"watch"
|
||||
]
|
||||
}
|
||||
],
|
||||
"version": "v1alpha1"
|
||||
|
||||
@@ -22,6 +22,34 @@
|
||||
"update",
|
||||
"watch"
|
||||
]
|
||||
},
|
||||
{
|
||||
"kind": "ServiceCIDR",
|
||||
"name": "servicecidrs",
|
||||
"namespaced": false,
|
||||
"singularName": "servicecidr",
|
||||
"storageVersionHash": "3cDF5hqTkLY=",
|
||||
"verbs": [
|
||||
"create",
|
||||
"delete",
|
||||
"deletecollection",
|
||||
"get",
|
||||
"list",
|
||||
"patch",
|
||||
"update",
|
||||
"watch"
|
||||
]
|
||||
},
|
||||
{
|
||||
"kind": "ServiceCIDR",
|
||||
"name": "servicecidrs/status",
|
||||
"namespaced": false,
|
||||
"singularName": "",
|
||||
"verbs": [
|
||||
"get",
|
||||
"patch",
|
||||
"update"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
922
api/openapi-spec/swagger.json
generated
922
api/openapi-spec/swagger.json
generated
@@ -13842,6 +13842,106 @@
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"io.k8s.api.networking.v1alpha1.ServiceCIDR": {
|
||||
"description": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
"type": "string"
|
||||
},
|
||||
"kind": {
|
||||
"description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
"type": "string"
|
||||
},
|
||||
"metadata": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta",
|
||||
"description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata"
|
||||
},
|
||||
"spec": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDRSpec",
|
||||
"description": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status"
|
||||
},
|
||||
"status": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDRStatus",
|
||||
"description": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status"
|
||||
}
|
||||
},
|
||||
"type": "object",
|
||||
"x-kubernetes-group-version-kind": [
|
||||
{
|
||||
"group": "networking.k8s.io",
|
||||
"kind": "ServiceCIDR",
|
||||
"version": "v1alpha1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"io.k8s.api.networking.v1alpha1.ServiceCIDRList": {
|
||||
"description": "ServiceCIDRList contains a list of ServiceCIDR objects.",
|
||||
"properties": {
|
||||
"apiVersion": {
|
||||
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
"type": "string"
|
||||
},
|
||||
"items": {
|
||||
"description": "items is the list of ServiceCIDRs.",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR"
|
||||
},
|
||||
"type": "array"
|
||||
},
|
||||
"kind": {
|
||||
"description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
"type": "string"
|
||||
},
|
||||
"metadata": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta",
|
||||
"description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"items"
|
||||
],
|
||||
"type": "object",
|
||||
"x-kubernetes-group-version-kind": [
|
||||
{
|
||||
"group": "networking.k8s.io",
|
||||
"kind": "ServiceCIDRList",
|
||||
"version": "v1alpha1"
|
||||
}
|
||||
]
|
||||
},
|
||||
"io.k8s.api.networking.v1alpha1.ServiceCIDRSpec": {
|
||||
"description": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.",
|
||||
"properties": {
|
||||
"cidrs": {
|
||||
"description": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"type": "array"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"io.k8s.api.networking.v1alpha1.ServiceCIDRStatus": {
|
||||
"description": "ServiceCIDRStatus describes the current state of the ServiceCIDR.",
|
||||
"properties": {
|
||||
"conditions": {
|
||||
"description": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state",
|
||||
"items": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Condition"
|
||||
},
|
||||
"type": "array",
|
||||
"x-kubernetes-list-map-keys": [
|
||||
"type"
|
||||
],
|
||||
"x-kubernetes-list-type": "map",
|
||||
"x-kubernetes-patch-merge-key": "type",
|
||||
"x-kubernetes-patch-strategy": "merge"
|
||||
}
|
||||
},
|
||||
"type": "object"
|
||||
},
|
||||
"io.k8s.api.node.v1.Overhead": {
|
||||
"description": "Overhead structure represents the resource overhead associated with running a pod.",
|
||||
"properties": {
|
||||
@@ -63794,6 +63894,672 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/apis/networking.k8s.io/v1alpha1/servicecidrs": {
|
||||
"delete": {
|
||||
"consumes": [
|
||||
"*/*"
|
||||
],
|
||||
"description": "delete collection of ServiceCIDR",
|
||||
"operationId": "deleteNetworkingV1alpha1CollectionServiceCIDR",
|
||||
"parameters": [
|
||||
{
|
||||
"$ref": "#/parameters/body-2Y1dVQaQ"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/continue-QfD61s0i"
|
||||
},
|
||||
{
|
||||
"description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
|
||||
"in": "query",
|
||||
"name": "dryRun",
|
||||
"type": "string",
|
||||
"uniqueItems": true
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/fieldSelector-xIcQKXFG"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/gracePeriodSeconds--K5HaBOS"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/labelSelector-5Zw57w4C"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/limit-1NfNmdNH"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/orphanDependents-uRB25kX5"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/propagationPolicy-6jk3prlO"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/resourceVersion-5WAnf1kx"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/resourceVersionMatch-t8XhRHeC"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/sendInitialEvents-rLXlEK_k"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/timeoutSeconds-yvYezaOC"
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"networking_v1alpha1"
|
||||
],
|
||||
"x-kubernetes-action": "deletecollection",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "networking.k8s.io",
|
||||
"kind": "ServiceCIDR",
|
||||
"version": "v1alpha1"
|
||||
}
|
||||
},
|
||||
"get": {
|
||||
"consumes": [
|
||||
"*/*"
|
||||
],
|
||||
"description": "list or watch objects of kind ServiceCIDR",
|
||||
"operationId": "listNetworkingV1alpha1ServiceCIDR",
|
||||
"parameters": [
|
||||
{
|
||||
"$ref": "#/parameters/allowWatchBookmarks-HC2hJt-J"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/continue-QfD61s0i"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/fieldSelector-xIcQKXFG"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/labelSelector-5Zw57w4C"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/limit-1NfNmdNH"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/resourceVersion-5WAnf1kx"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/resourceVersionMatch-t8XhRHeC"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/sendInitialEvents-rLXlEK_k"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/timeoutSeconds-yvYezaOC"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/watch-XNNPZGbK"
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf",
|
||||
"application/json;stream=watch",
|
||||
"application/vnd.kubernetes.protobuf;stream=watch"
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDRList"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"networking_v1alpha1"
|
||||
],
|
||||
"x-kubernetes-action": "list",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "networking.k8s.io",
|
||||
"kind": "ServiceCIDR",
|
||||
"version": "v1alpha1"
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"$ref": "#/parameters/pretty-tJGM1-ng"
|
||||
}
|
||||
],
|
||||
"post": {
|
||||
"consumes": [
|
||||
"*/*"
|
||||
],
|
||||
"description": "create a ServiceCIDR",
|
||||
"operationId": "createNetworkingV1alpha1ServiceCIDR",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "body",
|
||||
"name": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR"
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
|
||||
"in": "query",
|
||||
"name": "dryRun",
|
||||
"type": "string",
|
||||
"uniqueItems": true
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/fieldManager-Qy4HdaTW"
|
||||
},
|
||||
{
|
||||
"description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
|
||||
"in": "query",
|
||||
"name": "fieldValidation",
|
||||
"type": "string",
|
||||
"uniqueItems": true
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR"
|
||||
}
|
||||
},
|
||||
"201": {
|
||||
"description": "Created",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR"
|
||||
}
|
||||
},
|
||||
"202": {
|
||||
"description": "Accepted",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"networking_v1alpha1"
|
||||
],
|
||||
"x-kubernetes-action": "post",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "networking.k8s.io",
|
||||
"kind": "ServiceCIDR",
|
||||
"version": "v1alpha1"
|
||||
}
|
||||
}
|
||||
},
|
||||
"/apis/networking.k8s.io/v1alpha1/servicecidrs/{name}": {
|
||||
"delete": {
|
||||
"consumes": [
|
||||
"*/*"
|
||||
],
|
||||
"description": "delete a ServiceCIDR",
|
||||
"operationId": "deleteNetworkingV1alpha1ServiceCIDR",
|
||||
"parameters": [
|
||||
{
|
||||
"$ref": "#/parameters/body-2Y1dVQaQ"
|
||||
},
|
||||
{
|
||||
"description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
|
||||
"in": "query",
|
||||
"name": "dryRun",
|
||||
"type": "string",
|
||||
"uniqueItems": true
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/gracePeriodSeconds--K5HaBOS"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/orphanDependents-uRB25kX5"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/propagationPolicy-6jk3prlO"
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
|
||||
}
|
||||
},
|
||||
"202": {
|
||||
"description": "Accepted",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"networking_v1alpha1"
|
||||
],
|
||||
"x-kubernetes-action": "delete",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "networking.k8s.io",
|
||||
"kind": "ServiceCIDR",
|
||||
"version": "v1alpha1"
|
||||
}
|
||||
},
|
||||
"get": {
|
||||
"consumes": [
|
||||
"*/*"
|
||||
],
|
||||
"description": "read the specified ServiceCIDR",
|
||||
"operationId": "readNetworkingV1alpha1ServiceCIDR",
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"networking_v1alpha1"
|
||||
],
|
||||
"x-kubernetes-action": "get",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "networking.k8s.io",
|
||||
"kind": "ServiceCIDR",
|
||||
"version": "v1alpha1"
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"description": "name of the ServiceCIDR",
|
||||
"in": "path",
|
||||
"name": "name",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"uniqueItems": true
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/pretty-tJGM1-ng"
|
||||
}
|
||||
],
|
||||
"patch": {
|
||||
"consumes": [
|
||||
"application/json-patch+json",
|
||||
"application/merge-patch+json",
|
||||
"application/strategic-merge-patch+json",
|
||||
"application/apply-patch+yaml"
|
||||
],
|
||||
"description": "partially update the specified ServiceCIDR",
|
||||
"operationId": "patchNetworkingV1alpha1ServiceCIDR",
|
||||
"parameters": [
|
||||
{
|
||||
"$ref": "#/parameters/body-78PwaGsr"
|
||||
},
|
||||
{
|
||||
"description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
|
||||
"in": "query",
|
||||
"name": "dryRun",
|
||||
"type": "string",
|
||||
"uniqueItems": true
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/fieldManager-7c6nTn1T"
|
||||
},
|
||||
{
|
||||
"description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
|
||||
"in": "query",
|
||||
"name": "fieldValidation",
|
||||
"type": "string",
|
||||
"uniqueItems": true
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/force-tOGGb0Yi"
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR"
|
||||
}
|
||||
},
|
||||
"201": {
|
||||
"description": "Created",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"networking_v1alpha1"
|
||||
],
|
||||
"x-kubernetes-action": "patch",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "networking.k8s.io",
|
||||
"kind": "ServiceCIDR",
|
||||
"version": "v1alpha1"
|
||||
}
|
||||
},
|
||||
"put": {
|
||||
"consumes": [
|
||||
"*/*"
|
||||
],
|
||||
"description": "replace the specified ServiceCIDR",
|
||||
"operationId": "replaceNetworkingV1alpha1ServiceCIDR",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "body",
|
||||
"name": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR"
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
|
||||
"in": "query",
|
||||
"name": "dryRun",
|
||||
"type": "string",
|
||||
"uniqueItems": true
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/fieldManager-Qy4HdaTW"
|
||||
},
|
||||
{
|
||||
"description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
|
||||
"in": "query",
|
||||
"name": "fieldValidation",
|
||||
"type": "string",
|
||||
"uniqueItems": true
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR"
|
||||
}
|
||||
},
|
||||
"201": {
|
||||
"description": "Created",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"networking_v1alpha1"
|
||||
],
|
||||
"x-kubernetes-action": "put",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "networking.k8s.io",
|
||||
"kind": "ServiceCIDR",
|
||||
"version": "v1alpha1"
|
||||
}
|
||||
}
|
||||
},
|
||||
"/apis/networking.k8s.io/v1alpha1/servicecidrs/{name}/status": {
|
||||
"get": {
|
||||
"consumes": [
|
||||
"*/*"
|
||||
],
|
||||
"description": "read status of the specified ServiceCIDR",
|
||||
"operationId": "readNetworkingV1alpha1ServiceCIDRStatus",
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"networking_v1alpha1"
|
||||
],
|
||||
"x-kubernetes-action": "get",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "networking.k8s.io",
|
||||
"kind": "ServiceCIDR",
|
||||
"version": "v1alpha1"
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"description": "name of the ServiceCIDR",
|
||||
"in": "path",
|
||||
"name": "name",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"uniqueItems": true
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/pretty-tJGM1-ng"
|
||||
}
|
||||
],
|
||||
"patch": {
|
||||
"consumes": [
|
||||
"application/json-patch+json",
|
||||
"application/merge-patch+json",
|
||||
"application/strategic-merge-patch+json",
|
||||
"application/apply-patch+yaml"
|
||||
],
|
||||
"description": "partially update status of the specified ServiceCIDR",
|
||||
"operationId": "patchNetworkingV1alpha1ServiceCIDRStatus",
|
||||
"parameters": [
|
||||
{
|
||||
"$ref": "#/parameters/body-78PwaGsr"
|
||||
},
|
||||
{
|
||||
"description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
|
||||
"in": "query",
|
||||
"name": "dryRun",
|
||||
"type": "string",
|
||||
"uniqueItems": true
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/fieldManager-7c6nTn1T"
|
||||
},
|
||||
{
|
||||
"description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
|
||||
"in": "query",
|
||||
"name": "fieldValidation",
|
||||
"type": "string",
|
||||
"uniqueItems": true
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/force-tOGGb0Yi"
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR"
|
||||
}
|
||||
},
|
||||
"201": {
|
||||
"description": "Created",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"networking_v1alpha1"
|
||||
],
|
||||
"x-kubernetes-action": "patch",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "networking.k8s.io",
|
||||
"kind": "ServiceCIDR",
|
||||
"version": "v1alpha1"
|
||||
}
|
||||
},
|
||||
"put": {
|
||||
"consumes": [
|
||||
"*/*"
|
||||
],
|
||||
"description": "replace status of the specified ServiceCIDR",
|
||||
"operationId": "replaceNetworkingV1alpha1ServiceCIDRStatus",
|
||||
"parameters": [
|
||||
{
|
||||
"in": "body",
|
||||
"name": "body",
|
||||
"required": true,
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR"
|
||||
}
|
||||
},
|
||||
{
|
||||
"description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
|
||||
"in": "query",
|
||||
"name": "dryRun",
|
||||
"type": "string",
|
||||
"uniqueItems": true
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/fieldManager-Qy4HdaTW"
|
||||
},
|
||||
{
|
||||
"description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
|
||||
"in": "query",
|
||||
"name": "fieldValidation",
|
||||
"type": "string",
|
||||
"uniqueItems": true
|
||||
}
|
||||
],
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf"
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR"
|
||||
}
|
||||
},
|
||||
"201": {
|
||||
"description": "Created",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ServiceCIDR"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"networking_v1alpha1"
|
||||
],
|
||||
"x-kubernetes-action": "put",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "networking.k8s.io",
|
||||
"kind": "ServiceCIDR",
|
||||
"version": "v1alpha1"
|
||||
}
|
||||
}
|
||||
},
|
||||
"/apis/networking.k8s.io/v1alpha1/watch/ipaddresses": {
|
||||
"get": {
|
||||
"consumes": [
|
||||
@@ -63950,6 +64716,162 @@
|
||||
}
|
||||
]
|
||||
},
|
||||
"/apis/networking.k8s.io/v1alpha1/watch/servicecidrs": {
|
||||
"get": {
|
||||
"consumes": [
|
||||
"*/*"
|
||||
],
|
||||
"description": "watch individual changes to a list of ServiceCIDR. deprecated: use the 'watch' parameter with a list operation instead.",
|
||||
"operationId": "watchNetworkingV1alpha1ServiceCIDRList",
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf",
|
||||
"application/json;stream=watch",
|
||||
"application/vnd.kubernetes.protobuf;stream=watch"
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"networking_v1alpha1"
|
||||
],
|
||||
"x-kubernetes-action": "watchlist",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "networking.k8s.io",
|
||||
"kind": "ServiceCIDR",
|
||||
"version": "v1alpha1"
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"$ref": "#/parameters/allowWatchBookmarks-HC2hJt-J"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/continue-QfD61s0i"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/fieldSelector-xIcQKXFG"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/labelSelector-5Zw57w4C"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/limit-1NfNmdNH"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/pretty-tJGM1-ng"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/resourceVersion-5WAnf1kx"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/resourceVersionMatch-t8XhRHeC"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/sendInitialEvents-rLXlEK_k"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/timeoutSeconds-yvYezaOC"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/watch-XNNPZGbK"
|
||||
}
|
||||
]
|
||||
},
|
||||
"/apis/networking.k8s.io/v1alpha1/watch/servicecidrs/{name}": {
|
||||
"get": {
|
||||
"consumes": [
|
||||
"*/*"
|
||||
],
|
||||
"description": "watch changes to an object of kind ServiceCIDR. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.",
|
||||
"operationId": "watchNetworkingV1alpha1ServiceCIDR",
|
||||
"produces": [
|
||||
"application/json",
|
||||
"application/yaml",
|
||||
"application/vnd.kubernetes.protobuf",
|
||||
"application/json;stream=watch",
|
||||
"application/vnd.kubernetes.protobuf;stream=watch"
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "OK",
|
||||
"schema": {
|
||||
"$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"description": "Unauthorized"
|
||||
}
|
||||
},
|
||||
"schemes": [
|
||||
"https"
|
||||
],
|
||||
"tags": [
|
||||
"networking_v1alpha1"
|
||||
],
|
||||
"x-kubernetes-action": "watch",
|
||||
"x-kubernetes-group-version-kind": {
|
||||
"group": "networking.k8s.io",
|
||||
"kind": "ServiceCIDR",
|
||||
"version": "v1alpha1"
|
||||
}
|
||||
},
|
||||
"parameters": [
|
||||
{
|
||||
"$ref": "#/parameters/allowWatchBookmarks-HC2hJt-J"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/continue-QfD61s0i"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/fieldSelector-xIcQKXFG"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/labelSelector-5Zw57w4C"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/limit-1NfNmdNH"
|
||||
},
|
||||
{
|
||||
"description": "name of the ServiceCIDR",
|
||||
"in": "path",
|
||||
"name": "name",
|
||||
"required": true,
|
||||
"type": "string",
|
||||
"uniqueItems": true
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/pretty-tJGM1-ng"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/resourceVersion-5WAnf1kx"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/resourceVersionMatch-t8XhRHeC"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/sendInitialEvents-rLXlEK_k"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/timeoutSeconds-yvYezaOC"
|
||||
},
|
||||
{
|
||||
"$ref": "#/parameters/watch-XNNPZGbK"
|
||||
}
|
||||
]
|
||||
},
|
||||
"/apis/node.k8s.io/": {
|
||||
"get": {
|
||||
"consumes": [
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -37,9 +37,6 @@ func validateClusterIPFlags(options Extra) []error {
|
||||
var errs []error
|
||||
// maxCIDRBits is used to define the maximum CIDR size for the cluster ip(s)
|
||||
maxCIDRBits := 20
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRServiceAllocator) {
|
||||
maxCIDRBits = 64
|
||||
}
|
||||
|
||||
// validate that primary has been processed by user provided values or it has been defaulted
|
||||
if options.PrimaryServiceClusterIPRange.IP == nil {
|
||||
@@ -51,10 +48,12 @@ func validateClusterIPFlags(options Extra) []error {
|
||||
errs = append(errs, errors.New("--service-cluster-ip-range must not contain more than two entries"))
|
||||
}
|
||||
|
||||
// Complete() expected to have set Primary* and Secondary*
|
||||
// primary CIDR validation
|
||||
if err := validateMaxCIDRRange(options.PrimaryServiceClusterIPRange, maxCIDRBits, "--service-cluster-ip-range"); err != nil {
|
||||
errs = append(errs, err)
|
||||
// Complete() expected to have set Primary* and Secondary
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRServiceAllocator) {
|
||||
// primary CIDR validation
|
||||
if err := validateMaxCIDRRange(options.PrimaryServiceClusterIPRange, maxCIDRBits, "--service-cluster-ip-range"); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
secondaryServiceClusterIPRangeUsed := (options.SecondaryServiceClusterIPRange.IP != nil)
|
||||
@@ -72,9 +71,10 @@ func validateClusterIPFlags(options Extra) []error {
|
||||
if !dualstack {
|
||||
errs = append(errs, errors.New("--service-cluster-ip-range[0] and --service-cluster-ip-range[1] must be of different IP family"))
|
||||
}
|
||||
|
||||
if err := validateMaxCIDRRange(options.SecondaryServiceClusterIPRange, maxCIDRBits, "--service-cluster-ip-range[1]"); err != nil {
|
||||
errs = append(errs, err)
|
||||
if !utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRServiceAllocator) {
|
||||
if err := validateMaxCIDRRange(options.SecondaryServiceClusterIPRange, maxCIDRBits, "--service-cluster-ip-range[1]"); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -103,8 +103,8 @@ func TestClusterServiceIPRange(t *testing.T) {
|
||||
gate: true,
|
||||
},
|
||||
{
|
||||
name: "service cidr IPv6 is too big despuite gate enbled",
|
||||
expectErrors: true,
|
||||
name: "service cidr IPv6 is too big and gate enbled",
|
||||
expectErrors: false,
|
||||
options: makeOptionsWithCIDRs("2001:db8::/12", ""),
|
||||
gate: true,
|
||||
},
|
||||
@@ -113,6 +113,12 @@ func TestClusterServiceIPRange(t *testing.T) {
|
||||
expectErrors: true,
|
||||
options: makeOptionsWithCIDRs("10.0.0.0/16", "3000::/64"),
|
||||
},
|
||||
{
|
||||
name: "dual-stack secondary cidr too big gate enabled",
|
||||
expectErrors: false,
|
||||
options: makeOptionsWithCIDRs("10.0.0.0/16", "3000::/48"),
|
||||
gate: true,
|
||||
},
|
||||
{
|
||||
name: "more than two entries",
|
||||
expectErrors: true,
|
||||
|
||||
@@ -557,6 +557,7 @@ func NewControllerDescriptors() map[string]*ControllerDescriptor {
|
||||
register(newLegacyServiceAccountTokenCleanerControllerDescriptor())
|
||||
register(newValidatingAdmissionPolicyStatusControllerDescriptor())
|
||||
register(newTaintEvictionControllerDescriptor())
|
||||
register(newServiceCIDRsControllerDescriptor())
|
||||
|
||||
for _, alias := range aliases.UnsortedList() {
|
||||
if _, ok := controllers[alias]; ok {
|
||||
|
||||
@@ -93,6 +93,7 @@ func TestControllerNamesDeclaration(t *testing.T) {
|
||||
names.ResourceClaimController,
|
||||
names.LegacyServiceAccountTokenCleanerController,
|
||||
names.ValidatingAdmissionPolicyStatusController,
|
||||
names.ServiceCIDRController,
|
||||
)
|
||||
|
||||
for _, name := range KnownControllers() {
|
||||
|
||||
49
cmd/kube-controller-manager/app/networking.go
Normal file
49
cmd/kube-controller-manager/app/networking.go
Normal file
@@ -0,0 +1,49 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package app implements a server that runs a set of active
|
||||
// components. This includes replication controllers, service endpoints and
|
||||
// nodes.
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/component-base/featuregate"
|
||||
"k8s.io/controller-manager/controller"
|
||||
"k8s.io/kubernetes/cmd/kube-controller-manager/names"
|
||||
"k8s.io/kubernetes/pkg/controller/servicecidrs"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
)
|
||||
|
||||
func newServiceCIDRsControllerDescriptor() *ControllerDescriptor {
|
||||
return &ControllerDescriptor{
|
||||
name: names.ServiceCIDRController,
|
||||
initFunc: startServiceCIDRsController,
|
||||
requiredFeatureGates: []featuregate.Feature{
|
||||
features.MultiCIDRServiceAllocator,
|
||||
}}
|
||||
}
|
||||
func startServiceCIDRsController(ctx context.Context, controllerContext ControllerContext, controllerName string) (controller.Interface, bool, error) {
|
||||
go servicecidrs.NewController(
|
||||
controllerContext.InformerFactory.Networking().V1alpha1().ServiceCIDRs(),
|
||||
controllerContext.InformerFactory.Networking().V1alpha1().IPAddresses(),
|
||||
controllerContext.ClientBuilder.ClientOrDie("service-cidrs-controller"),
|
||||
).Run(ctx, 5)
|
||||
// TODO use component config
|
||||
return nil, true, nil
|
||||
|
||||
}
|
||||
@@ -82,4 +82,5 @@ const (
|
||||
ResourceClaimController = "resourceclaim-controller"
|
||||
LegacyServiceAccountTokenCleanerController = "legacy-serviceaccount-token-cleaner-controller"
|
||||
ValidatingAdmissionPolicyStatusController = "validatingadmissionpolicy-status-controller"
|
||||
ServiceCIDRController = "service-cidr-controller"
|
||||
)
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package fuzzer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/netip"
|
||||
|
||||
fuzz "github.com/google/gofuzz"
|
||||
@@ -84,6 +85,18 @@ var Funcs = func(codecs runtimeserializer.CodecFactory) []interface{} {
|
||||
ip := generateRandomIP(is6, c)
|
||||
obj.Name = ip
|
||||
},
|
||||
func(obj *networking.ServiceCIDR, c fuzz.Continue) {
|
||||
c.FuzzNoCustom(obj) // fuzz self without calling this function again
|
||||
boolean := []bool{false, true}
|
||||
|
||||
is6 := boolean[c.Rand.Intn(2)]
|
||||
primary := generateRandomCIDR(is6, c)
|
||||
obj.Spec.CIDRs = []string{primary}
|
||||
|
||||
if boolean[c.Rand.Intn(2)] {
|
||||
obj.Spec.CIDRs = append(obj.Spec.CIDRs, generateRandomCIDR(!is6, c))
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,10 +114,23 @@ func generateRandomIP(is6 bool, c fuzz.Continue) string {
|
||||
if ok {
|
||||
return ip.String()
|
||||
}
|
||||
// this should not happen but is better to
|
||||
// return a good IP address than nothing
|
||||
if is6 {
|
||||
return "2001:db8::1"
|
||||
}
|
||||
return "192.168.1.1"
|
||||
// this should not happen
|
||||
panic(fmt.Sprintf("invalid IP %v", bytes))
|
||||
}
|
||||
|
||||
func generateRandomCIDR(is6 bool, c fuzz.Continue) string {
|
||||
ip, err := netip.ParseAddr(generateRandomIP(is6, c))
|
||||
if err != nil {
|
||||
// generateRandomIP already panics if returns a not valid ip
|
||||
panic(err)
|
||||
}
|
||||
|
||||
n := 32
|
||||
if is6 {
|
||||
n = 128
|
||||
}
|
||||
|
||||
bits := c.Rand.Intn(n)
|
||||
prefix := netip.PrefixFrom(ip, bits)
|
||||
return prefix.Masked().String()
|
||||
}
|
||||
|
||||
@@ -54,6 +54,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
&IngressClassList{},
|
||||
&IPAddress{},
|
||||
&IPAddressList{},
|
||||
&ServiceCIDR{},
|
||||
&ServiceCIDRList{},
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -642,3 +642,53 @@ type IPAddressList struct {
|
||||
// Items is the list of IPAddress
|
||||
Items []IPAddress
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
|
||||
// This range is used to allocate ClusterIPs to Service objects.
|
||||
type ServiceCIDR struct {
|
||||
metav1.TypeMeta
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta
|
||||
// spec is the desired state of the ServiceCIDR.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Spec ServiceCIDRSpec
|
||||
// status represents the current state of the ServiceCIDR.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Status ServiceCIDRStatus
|
||||
}
|
||||
|
||||
type ServiceCIDRSpec struct {
|
||||
// CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
|
||||
// from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
CIDRs []string
|
||||
}
|
||||
|
||||
// ServiceCIDRStatus describes the current state of the ServiceCIDR.
|
||||
type ServiceCIDRStatus struct {
|
||||
// conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
|
||||
Conditions []metav1.Condition
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.27
|
||||
|
||||
// ServiceCIDRList contains a list of ServiceCIDR objects.
|
||||
type ServiceCIDRList struct {
|
||||
metav1.TypeMeta
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ListMeta
|
||||
// items is the list of ServiceCIDRs.
|
||||
Items []ServiceCIDR
|
||||
}
|
||||
|
||||
135
pkg/apis/networking/v1alpha1/zz_generated.conversion.go
generated
135
pkg/apis/networking/v1alpha1/zz_generated.conversion.go
generated
@@ -25,6 +25,7 @@ import (
|
||||
unsafe "unsafe"
|
||||
|
||||
v1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
conversion "k8s.io/apimachinery/pkg/conversion"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
networking "k8s.io/kubernetes/pkg/apis/networking"
|
||||
@@ -77,6 +78,46 @@ func RegisterConversions(s *runtime.Scheme) error {
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha1.ServiceCIDR)(nil), (*networking.ServiceCIDR)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_ServiceCIDR_To_networking_ServiceCIDR(a.(*v1alpha1.ServiceCIDR), b.(*networking.ServiceCIDR), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*networking.ServiceCIDR)(nil), (*v1alpha1.ServiceCIDR)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_networking_ServiceCIDR_To_v1alpha1_ServiceCIDR(a.(*networking.ServiceCIDR), b.(*v1alpha1.ServiceCIDR), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha1.ServiceCIDRList)(nil), (*networking.ServiceCIDRList)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_ServiceCIDRList_To_networking_ServiceCIDRList(a.(*v1alpha1.ServiceCIDRList), b.(*networking.ServiceCIDRList), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*networking.ServiceCIDRList)(nil), (*v1alpha1.ServiceCIDRList)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_networking_ServiceCIDRList_To_v1alpha1_ServiceCIDRList(a.(*networking.ServiceCIDRList), b.(*v1alpha1.ServiceCIDRList), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha1.ServiceCIDRSpec)(nil), (*networking.ServiceCIDRSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_ServiceCIDRSpec_To_networking_ServiceCIDRSpec(a.(*v1alpha1.ServiceCIDRSpec), b.(*networking.ServiceCIDRSpec), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*networking.ServiceCIDRSpec)(nil), (*v1alpha1.ServiceCIDRSpec)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_networking_ServiceCIDRSpec_To_v1alpha1_ServiceCIDRSpec(a.(*networking.ServiceCIDRSpec), b.(*v1alpha1.ServiceCIDRSpec), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*v1alpha1.ServiceCIDRStatus)(nil), (*networking.ServiceCIDRStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_v1alpha1_ServiceCIDRStatus_To_networking_ServiceCIDRStatus(a.(*v1alpha1.ServiceCIDRStatus), b.(*networking.ServiceCIDRStatus), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.AddGeneratedConversionFunc((*networking.ServiceCIDRStatus)(nil), (*v1alpha1.ServiceCIDRStatus)(nil), func(a, b interface{}, scope conversion.Scope) error {
|
||||
return Convert_networking_ServiceCIDRStatus_To_v1alpha1_ServiceCIDRStatus(a.(*networking.ServiceCIDRStatus), b.(*v1alpha1.ServiceCIDRStatus), scope)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -173,3 +214,97 @@ func autoConvert_networking_ParentReference_To_v1alpha1_ParentReference(in *netw
|
||||
func Convert_networking_ParentReference_To_v1alpha1_ParentReference(in *networking.ParentReference, out *v1alpha1.ParentReference, s conversion.Scope) error {
|
||||
return autoConvert_networking_ParentReference_To_v1alpha1_ParentReference(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_ServiceCIDR_To_networking_ServiceCIDR(in *v1alpha1.ServiceCIDR, out *networking.ServiceCIDR, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_v1alpha1_ServiceCIDRSpec_To_networking_ServiceCIDRSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_v1alpha1_ServiceCIDRStatus_To_networking_ServiceCIDRStatus(&in.Status, &out.Status, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_ServiceCIDR_To_networking_ServiceCIDR is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_ServiceCIDR_To_networking_ServiceCIDR(in *v1alpha1.ServiceCIDR, out *networking.ServiceCIDR, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_ServiceCIDR_To_networking_ServiceCIDR(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_networking_ServiceCIDR_To_v1alpha1_ServiceCIDR(in *networking.ServiceCIDR, out *v1alpha1.ServiceCIDR, s conversion.Scope) error {
|
||||
out.ObjectMeta = in.ObjectMeta
|
||||
if err := Convert_networking_ServiceCIDRSpec_To_v1alpha1_ServiceCIDRSpec(&in.Spec, &out.Spec, s); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := Convert_networking_ServiceCIDRStatus_To_v1alpha1_ServiceCIDRStatus(&in.Status, &out.Status, s); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_networking_ServiceCIDR_To_v1alpha1_ServiceCIDR is an autogenerated conversion function.
|
||||
func Convert_networking_ServiceCIDR_To_v1alpha1_ServiceCIDR(in *networking.ServiceCIDR, out *v1alpha1.ServiceCIDR, s conversion.Scope) error {
|
||||
return autoConvert_networking_ServiceCIDR_To_v1alpha1_ServiceCIDR(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_ServiceCIDRList_To_networking_ServiceCIDRList(in *v1alpha1.ServiceCIDRList, out *networking.ServiceCIDRList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]networking.ServiceCIDR)(unsafe.Pointer(&in.Items))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_ServiceCIDRList_To_networking_ServiceCIDRList is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_ServiceCIDRList_To_networking_ServiceCIDRList(in *v1alpha1.ServiceCIDRList, out *networking.ServiceCIDRList, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_ServiceCIDRList_To_networking_ServiceCIDRList(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_networking_ServiceCIDRList_To_v1alpha1_ServiceCIDRList(in *networking.ServiceCIDRList, out *v1alpha1.ServiceCIDRList, s conversion.Scope) error {
|
||||
out.ListMeta = in.ListMeta
|
||||
out.Items = *(*[]v1alpha1.ServiceCIDR)(unsafe.Pointer(&in.Items))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_networking_ServiceCIDRList_To_v1alpha1_ServiceCIDRList is an autogenerated conversion function.
|
||||
func Convert_networking_ServiceCIDRList_To_v1alpha1_ServiceCIDRList(in *networking.ServiceCIDRList, out *v1alpha1.ServiceCIDRList, s conversion.Scope) error {
|
||||
return autoConvert_networking_ServiceCIDRList_To_v1alpha1_ServiceCIDRList(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_ServiceCIDRSpec_To_networking_ServiceCIDRSpec(in *v1alpha1.ServiceCIDRSpec, out *networking.ServiceCIDRSpec, s conversion.Scope) error {
|
||||
out.CIDRs = *(*[]string)(unsafe.Pointer(&in.CIDRs))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_ServiceCIDRSpec_To_networking_ServiceCIDRSpec is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_ServiceCIDRSpec_To_networking_ServiceCIDRSpec(in *v1alpha1.ServiceCIDRSpec, out *networking.ServiceCIDRSpec, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_ServiceCIDRSpec_To_networking_ServiceCIDRSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_networking_ServiceCIDRSpec_To_v1alpha1_ServiceCIDRSpec(in *networking.ServiceCIDRSpec, out *v1alpha1.ServiceCIDRSpec, s conversion.Scope) error {
|
||||
out.CIDRs = *(*[]string)(unsafe.Pointer(&in.CIDRs))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_networking_ServiceCIDRSpec_To_v1alpha1_ServiceCIDRSpec is an autogenerated conversion function.
|
||||
func Convert_networking_ServiceCIDRSpec_To_v1alpha1_ServiceCIDRSpec(in *networking.ServiceCIDRSpec, out *v1alpha1.ServiceCIDRSpec, s conversion.Scope) error {
|
||||
return autoConvert_networking_ServiceCIDRSpec_To_v1alpha1_ServiceCIDRSpec(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_v1alpha1_ServiceCIDRStatus_To_networking_ServiceCIDRStatus(in *v1alpha1.ServiceCIDRStatus, out *networking.ServiceCIDRStatus, s conversion.Scope) error {
|
||||
out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_v1alpha1_ServiceCIDRStatus_To_networking_ServiceCIDRStatus is an autogenerated conversion function.
|
||||
func Convert_v1alpha1_ServiceCIDRStatus_To_networking_ServiceCIDRStatus(in *v1alpha1.ServiceCIDRStatus, out *networking.ServiceCIDRStatus, s conversion.Scope) error {
|
||||
return autoConvert_v1alpha1_ServiceCIDRStatus_To_networking_ServiceCIDRStatus(in, out, s)
|
||||
}
|
||||
|
||||
func autoConvert_networking_ServiceCIDRStatus_To_v1alpha1_ServiceCIDRStatus(in *networking.ServiceCIDRStatus, out *v1alpha1.ServiceCIDRStatus, s conversion.Scope) error {
|
||||
out.Conditions = *(*[]v1.Condition)(unsafe.Pointer(&in.Conditions))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert_networking_ServiceCIDRStatus_To_v1alpha1_ServiceCIDRStatus is an autogenerated conversion function.
|
||||
func Convert_networking_ServiceCIDRStatus_To_v1alpha1_ServiceCIDRStatus(in *networking.ServiceCIDRStatus, out *v1alpha1.ServiceCIDRStatus, s conversion.Scope) error {
|
||||
return autoConvert_networking_ServiceCIDRStatus_To_v1alpha1_ServiceCIDRStatus(in, out, s)
|
||||
}
|
||||
|
||||
@@ -656,7 +656,7 @@ func ValidateIPAddressName(name string, prefix bool) []string {
|
||||
if err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
} else if ip.String() != name {
|
||||
errs = append(errs, "not a valid ip in canonical format")
|
||||
errs = append(errs, "must be a canonical format IP address")
|
||||
|
||||
}
|
||||
return errs
|
||||
@@ -721,3 +721,65 @@ func ValidateIPAddressUpdate(update, old *networking.IPAddress) field.ErrorList
|
||||
allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.Spec.ParentRef, old.Spec.ParentRef, field.NewPath("spec").Child("parentRef"))...)
|
||||
return allErrs
|
||||
}
|
||||
|
||||
var ValidateServiceCIDRName = apimachineryvalidation.NameIsDNSSubdomain
|
||||
|
||||
func ValidateServiceCIDR(cidrConfig *networking.ServiceCIDR) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMeta(&cidrConfig.ObjectMeta, false, ValidateServiceCIDRName, field.NewPath("metadata"))
|
||||
fieldPath := field.NewPath("spec", "cidrs")
|
||||
|
||||
if len(cidrConfig.Spec.CIDRs) == 0 {
|
||||
allErrs = append(allErrs, field.Required(fieldPath, "at least one CIDR required"))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
if len(cidrConfig.Spec.CIDRs) > 2 {
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath, cidrConfig.Spec, "may only hold up to 2 values"))
|
||||
return allErrs
|
||||
}
|
||||
// validate cidrs are dual stack, one of each IP family
|
||||
if len(cidrConfig.Spec.CIDRs) == 2 {
|
||||
isDual, err := netutils.IsDualStackCIDRStrings(cidrConfig.Spec.CIDRs)
|
||||
if err != nil || !isDual {
|
||||
allErrs = append(allErrs, field.Invalid(fieldPath, cidrConfig.Spec, "may specify no more than one IP for each IP family, i.e 192.168.0.0/24 and 2001:db8::/64"))
|
||||
return allErrs
|
||||
}
|
||||
}
|
||||
|
||||
for i, cidr := range cidrConfig.Spec.CIDRs {
|
||||
allErrs = append(allErrs, validateCIDR(cidr, fieldPath.Index(i))...)
|
||||
}
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
func validateCIDR(cidr string, fldPath *field.Path) field.ErrorList {
|
||||
allErrs := field.ErrorList{}
|
||||
prefix, err := netip.ParsePrefix(cidr)
|
||||
if err != nil {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, cidr, err.Error()))
|
||||
} else {
|
||||
if prefix.Addr() != prefix.Masked().Addr() {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, cidr, "wrong CIDR format, IP doesn't match network IP address"))
|
||||
}
|
||||
if prefix.String() != cidr {
|
||||
allErrs = append(allErrs, field.Invalid(fldPath, cidr, "CIDR not in RFC 5952 canonical format"))
|
||||
}
|
||||
}
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateServiceCIDRUpdate tests if an update to a ServiceCIDR is valid.
|
||||
func ValidateServiceCIDRUpdate(update, old *networking.ServiceCIDR) field.ErrorList {
|
||||
var allErrs field.ErrorList
|
||||
allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...)
|
||||
allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.Spec.CIDRs, old.Spec.CIDRs, field.NewPath("spec").Child("cidrs"))...)
|
||||
|
||||
return allErrs
|
||||
}
|
||||
|
||||
// ValidateServiceCIDRStatusUpdate tests if if an update to a ServiceCIDR Status is valid.
|
||||
func ValidateServiceCIDRStatusUpdate(update, old *networking.ServiceCIDR) field.ErrorList {
|
||||
allErrs := apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))
|
||||
return allErrs
|
||||
}
|
||||
|
||||
@@ -2046,3 +2046,205 @@ func TestValidateIPAddressUpdate(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateServiceCIDR(t *testing.T) {
|
||||
|
||||
testCases := map[string]struct {
|
||||
expectedErrors int
|
||||
ipRange *networking.ServiceCIDR
|
||||
}{
|
||||
"empty-iprange": {
|
||||
expectedErrors: 1,
|
||||
ipRange: &networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-name",
|
||||
},
|
||||
},
|
||||
},
|
||||
"three-ipranges": {
|
||||
expectedErrors: 1,
|
||||
ipRange: &networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-name",
|
||||
},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{"192.168.0.0/24", "fd00::/64", "10.0.0.0/16"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"good-iprange-ipv4": {
|
||||
expectedErrors: 0,
|
||||
ipRange: &networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-name",
|
||||
},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{"192.168.0.0/24"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"good-iprange-ipv6": {
|
||||
expectedErrors: 0,
|
||||
ipRange: &networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-name",
|
||||
},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{"fd00:1234::/64"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"good-iprange-ipv4-ipv6": {
|
||||
expectedErrors: 0,
|
||||
ipRange: &networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-name",
|
||||
},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{"192.168.0.0/24", "fd00:1234::/64"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"not-iprange-ipv4": {
|
||||
expectedErrors: 1,
|
||||
ipRange: &networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-name",
|
||||
},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{"asdasdasd"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"iponly-iprange-ipv4": {
|
||||
expectedErrors: 1,
|
||||
ipRange: &networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-name",
|
||||
},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{"192.168.0.1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"badip-iprange-ipv4": {
|
||||
expectedErrors: 1,
|
||||
ipRange: &networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-name",
|
||||
},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{"192.168.0.1/24"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"badip-iprange-ipv6": {
|
||||
expectedErrors: 1,
|
||||
ipRange: &networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-name",
|
||||
},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{"fd00:1234::2/64"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"badip-iprange-caps-ipv6": {
|
||||
expectedErrors: 2,
|
||||
ipRange: &networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-name",
|
||||
},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{"FD00:1234::2/64"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"good-iprange-ipv4-bad-ipv6": {
|
||||
expectedErrors: 1,
|
||||
ipRange: &networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-name",
|
||||
},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{"192.168.0.0/24", "FD00:1234::/64"},
|
||||
},
|
||||
},
|
||||
},
|
||||
"good-iprange-ipv6-bad-ipv4": {
|
||||
expectedErrors: 1,
|
||||
ipRange: &networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-name",
|
||||
},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{"192.168.007.0/24", "fd00:1234::/64"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, testCase := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
errs := ValidateServiceCIDR(testCase.ipRange)
|
||||
if len(errs) != testCase.expectedErrors {
|
||||
t.Errorf("Expected %d errors, got %d errors: %v", testCase.expectedErrors, len(errs), errs)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidateServiceCIDRUpdate(t *testing.T) {
|
||||
oldServiceCIDR := &networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "mysvc",
|
||||
ResourceVersion: "1",
|
||||
},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{"192.168.0.0/24", "fd00:1234::/64"},
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
svc func(svc *networking.ServiceCIDR) *networking.ServiceCIDR
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
name: "Successful update, no changes",
|
||||
svc: func(svc *networking.ServiceCIDR) *networking.ServiceCIDR {
|
||||
out := svc.DeepCopy()
|
||||
return out
|
||||
},
|
||||
expectErr: false,
|
||||
},
|
||||
|
||||
{
|
||||
name: "Failed update, update spec.CIDRs single stack",
|
||||
svc: func(svc *networking.ServiceCIDR) *networking.ServiceCIDR {
|
||||
out := svc.DeepCopy()
|
||||
out.Spec.CIDRs = []string{"10.0.0.0/16"}
|
||||
return out
|
||||
}, expectErr: true,
|
||||
},
|
||||
{
|
||||
name: "Failed update, update spec.CIDRs dual stack",
|
||||
svc: func(svc *networking.ServiceCIDR) *networking.ServiceCIDR {
|
||||
out := svc.DeepCopy()
|
||||
out.Spec.CIDRs = []string{"10.0.0.0/24", "fd00:1234::/64"}
|
||||
return out
|
||||
}, expectErr: true,
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
err := ValidateServiceCIDRUpdate(testCase.svc(oldServiceCIDR), oldServiceCIDR)
|
||||
if !testCase.expectErr && err != nil {
|
||||
t.Errorf("ValidateServiceCIDRUpdate must be successful for test '%s', got %v", testCase.name, err)
|
||||
}
|
||||
if testCase.expectErr && err == nil {
|
||||
t.Errorf("ValidateServiceCIDRUpdate must return error for test: %s, but got nil", testCase.name)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
105
pkg/apis/networking/zz_generated.deepcopy.go
generated
105
pkg/apis/networking/zz_generated.deepcopy.go
generated
@@ -823,3 +823,108 @@ func (in *ServiceBackendPort) DeepCopy() *ServiceBackendPort {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR.
|
||||
func (in *ServiceCIDR) DeepCopy() *ServiceCIDR {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceCIDR)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ServiceCIDR) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ServiceCIDR, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList.
|
||||
func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceCIDRList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ServiceCIDRList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) {
|
||||
*out = *in
|
||||
if in.CIDRs != nil {
|
||||
in, out := &in.CIDRs, &out.CIDRs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec.
|
||||
func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceCIDRSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]v1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus.
|
||||
func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceCIDRStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
542
pkg/controller/servicecidrs/servicecidrs_controller.go
Normal file
542
pkg/controller/servicecidrs/servicecidrs_controller.go
Normal file
@@ -0,0 +1,542 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package servicecidrs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingapiv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
metav1apply "k8s.io/client-go/applyconfigurations/meta/v1"
|
||||
networkingapiv1alpha1apply "k8s.io/client-go/applyconfigurations/networking/v1alpha1"
|
||||
networkinginformers "k8s.io/client-go/informers/networking/v1alpha1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
networkinglisters "k8s.io/client-go/listers/networking/v1alpha1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
|
||||
"k8s.io/kubernetes/pkg/util/iptree"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxRetries is the max number of times a service object will be retried before it is dropped out of the queue.
|
||||
// With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the
|
||||
// sequence of delays between successive queuings of a service.
|
||||
//
|
||||
// 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s
|
||||
maxRetries = 15
|
||||
controllerName = "service-cidr-controller"
|
||||
|
||||
ServiceCIDRProtectionFinalizer = "networking.k8s.io/service-cidr-finalizer"
|
||||
|
||||
// deletionGracePeriod is the time in seconds to wait to remove the finalizer from a ServiceCIDR to ensure the
|
||||
// deletion informations has been propagated to the apiserver allocators to avoid allocating any IP address
|
||||
// before we complete delete the ServiceCIDR
|
||||
deletionGracePeriod = 10 * time.Second
|
||||
)
|
||||
|
||||
// NewController returns a new *Controller.
|
||||
func NewController(
|
||||
serviceCIDRInformer networkinginformers.ServiceCIDRInformer,
|
||||
ipAddressInformer networkinginformers.IPAddressInformer,
|
||||
client clientset.Interface,
|
||||
) *Controller {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerName})
|
||||
c := &Controller{
|
||||
client: client,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ipaddresses"),
|
||||
tree: iptree.New[sets.Set[string]](),
|
||||
workerLoopPeriod: time.Second,
|
||||
}
|
||||
|
||||
_, _ = serviceCIDRInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.addServiceCIDR,
|
||||
UpdateFunc: c.updateServiceCIDR,
|
||||
DeleteFunc: c.deleteServiceCIDR,
|
||||
})
|
||||
c.serviceCIDRLister = serviceCIDRInformer.Lister()
|
||||
c.serviceCIDRsSynced = serviceCIDRInformer.Informer().HasSynced
|
||||
|
||||
_, _ = ipAddressInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.addIPAddress,
|
||||
DeleteFunc: c.deleteIPAddress,
|
||||
})
|
||||
|
||||
c.ipAddressLister = ipAddressInformer.Lister()
|
||||
c.ipAddressSynced = ipAddressInformer.Informer().HasSynced
|
||||
|
||||
c.eventBroadcaster = broadcaster
|
||||
c.eventRecorder = recorder
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Controller manages selector-based service ipAddress.
|
||||
type Controller struct {
|
||||
client clientset.Interface
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
eventRecorder record.EventRecorder
|
||||
|
||||
serviceCIDRLister networkinglisters.ServiceCIDRLister
|
||||
serviceCIDRsSynced cache.InformerSynced
|
||||
|
||||
ipAddressLister networkinglisters.IPAddressLister
|
||||
ipAddressSynced cache.InformerSynced
|
||||
|
||||
queue workqueue.RateLimitingInterface
|
||||
|
||||
// workerLoopPeriod is the time between worker runs. The workers process the queue of service and ipRange changes.
|
||||
workerLoopPeriod time.Duration
|
||||
|
||||
// tree store the ServiceCIDRs names associated to each
|
||||
muTree sync.Mutex
|
||||
tree *iptree.Tree[sets.Set[string]]
|
||||
}
|
||||
|
||||
// Run will not return until stopCh is closed.
|
||||
func (c *Controller) Run(ctx context.Context, workers int) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.queue.ShutDown()
|
||||
|
||||
c.eventBroadcaster.StartStructuredLogging(0)
|
||||
c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.client.CoreV1().Events("")})
|
||||
defer c.eventBroadcaster.Shutdown()
|
||||
|
||||
logger := klog.FromContext(ctx)
|
||||
|
||||
logger.Info("Starting", "controller", controllerName)
|
||||
defer logger.Info("Shutting down", "controller", controllerName)
|
||||
|
||||
if !cache.WaitForNamedCacheSync(controllerName, ctx.Done(), c.serviceCIDRsSynced, c.ipAddressSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.UntilWithContext(ctx, c.worker, c.workerLoopPeriod)
|
||||
}
|
||||
<-ctx.Done()
|
||||
}
|
||||
|
||||
func (c *Controller) addServiceCIDR(obj interface{}) {
|
||||
cidr, ok := obj.(*networkingapiv1alpha1.ServiceCIDR)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
c.queue.Add(cidr.Name)
|
||||
for _, key := range c.overlappingServiceCIDRs(cidr) {
|
||||
c.queue.Add(key)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) updateServiceCIDR(oldObj, obj interface{}) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(obj)
|
||||
if err == nil {
|
||||
c.queue.Add(key)
|
||||
}
|
||||
}
|
||||
|
||||
// deleteServiceCIDR
|
||||
func (c *Controller) deleteServiceCIDR(obj interface{}) {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err == nil {
|
||||
c.queue.Add(key)
|
||||
}
|
||||
}
|
||||
|
||||
// addIPAddress may block a ServiceCIDR deletion
|
||||
func (c *Controller) addIPAddress(obj interface{}) {
|
||||
ip, ok := obj.(*networkingapiv1alpha1.IPAddress)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
for _, cidr := range c.containingServiceCIDRs(ip) {
|
||||
c.queue.Add(cidr)
|
||||
}
|
||||
}
|
||||
|
||||
// deleteIPAddress may unblock a ServiceCIDR deletion
|
||||
func (c *Controller) deleteIPAddress(obj interface{}) {
|
||||
ip, ok := obj.(*networkingapiv1alpha1.IPAddress)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ip, ok = tombstone.Obj.(*networkingapiv1alpha1.IPAddress)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, cidr := range c.containingServiceCIDRs(ip) {
|
||||
c.queue.Add(cidr)
|
||||
}
|
||||
}
|
||||
|
||||
// overlappingServiceCIDRs, given a ServiceCIDR return the ServiceCIDRs that contain or are contained,
|
||||
// this is required because adding or removing a CIDR will require to recompute the
|
||||
// state of each ServiceCIDR to check if can be unblocked on deletion.
|
||||
func (c *Controller) overlappingServiceCIDRs(serviceCIDR *networkingapiv1alpha1.ServiceCIDR) []string {
|
||||
c.muTree.Lock()
|
||||
defer c.muTree.Unlock()
|
||||
|
||||
serviceCIDRs := sets.New[string]()
|
||||
for _, cidr := range serviceCIDR.Spec.CIDRs {
|
||||
if prefix, err := netip.ParsePrefix(cidr); err == nil { // if is empty err will not be nil
|
||||
c.tree.WalkPath(prefix, func(k netip.Prefix, v sets.Set[string]) bool {
|
||||
serviceCIDRs.Insert(v.UnsortedList()...)
|
||||
return false
|
||||
})
|
||||
c.tree.WalkPrefix(prefix, func(k netip.Prefix, v sets.Set[string]) bool {
|
||||
serviceCIDRs.Insert(v.UnsortedList()...)
|
||||
return false
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return serviceCIDRs.UnsortedList()
|
||||
}
|
||||
|
||||
// containingServiceCIDRs, given an IPAddress return the ServiceCIDRs that contains the IP,
|
||||
// as it may block or be blocking the deletion of the ServiceCIDRs that contain it.
|
||||
func (c *Controller) containingServiceCIDRs(ip *networkingapiv1alpha1.IPAddress) []string {
|
||||
// only process IPs managed by the kube-apiserver
|
||||
managedBy, ok := ip.Labels[networkingapiv1alpha1.LabelManagedBy]
|
||||
if !ok || managedBy != ipallocator.ControllerName {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
address, err := netip.ParseAddr(ip.Name)
|
||||
if err != nil {
|
||||
// This should not happen, the IPAddress object validates
|
||||
// the name is a valid IPAddress
|
||||
return []string{}
|
||||
}
|
||||
|
||||
c.muTree.Lock()
|
||||
defer c.muTree.Unlock()
|
||||
serviceCIDRs := []string{}
|
||||
// walk the tree to get all the ServiceCIDRs that contain this IP address
|
||||
prefixes := c.tree.GetHostIPPrefixMatches(address)
|
||||
for _, v := range prefixes {
|
||||
serviceCIDRs = append(serviceCIDRs, v.UnsortedList()...)
|
||||
}
|
||||
|
||||
return serviceCIDRs
|
||||
}
|
||||
|
||||
func (c *Controller) worker(ctx context.Context) {
|
||||
for c.processNext(ctx) {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) processNext(ctx context.Context) bool {
|
||||
eKey, quit := c.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer c.queue.Done(eKey)
|
||||
|
||||
key := eKey.(string)
|
||||
err := c.sync(ctx, key)
|
||||
if err == nil {
|
||||
c.queue.Forget(key)
|
||||
return true
|
||||
}
|
||||
logger := klog.FromContext(ctx)
|
||||
if c.queue.NumRequeues(key) < maxRetries {
|
||||
logger.V(2).Info("Error syncing ServiceCIDR, retrying", "ServiceCIDR", key, "err", err)
|
||||
c.queue.AddRateLimited(key)
|
||||
} else {
|
||||
logger.Info("Dropping ServiceCIDR out of the queue", "ServiceCIDR", key, "err", err)
|
||||
c.queue.Forget(key)
|
||||
utilruntime.HandleError(err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// syncCIDRs rebuilds the radix tree based from the informers cache
|
||||
func (c *Controller) syncCIDRs() error {
|
||||
serviceCIDRList, err := c.serviceCIDRLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// track the names of the different ServiceCIDRs, there
|
||||
// can be multiple ServiceCIDRs sharing the same prefixes
|
||||
// and this is important to determine if a ServiceCIDR can
|
||||
// be deleted.
|
||||
tree := iptree.New[sets.Set[string]]()
|
||||
for _, serviceCIDR := range serviceCIDRList {
|
||||
for _, cidr := range serviceCIDR.Spec.CIDRs {
|
||||
if prefix, err := netip.ParsePrefix(cidr); err == nil { // if is empty err will not be nil
|
||||
// if the prefix already exist append the new ServiceCIDR name
|
||||
v, ok := tree.GetPrefix(prefix)
|
||||
if !ok {
|
||||
v = sets.Set[string]{}
|
||||
}
|
||||
v.Insert(serviceCIDR.Name)
|
||||
tree.InsertPrefix(prefix, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
c.muTree.Lock()
|
||||
defer c.muTree.Unlock()
|
||||
c.tree = tree
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) sync(ctx context.Context, key string) error {
|
||||
logger := klog.FromContext(ctx)
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
logger.V(4).Info("Finished syncing ServiceCIDR)", "ServiceCIDR", key, "elapsed", time.Since(startTime))
|
||||
}()
|
||||
|
||||
// TODO(aojea) verify if this present a performance problem
|
||||
// restore the radix tree from the current state
|
||||
err := c.syncCIDRs()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.V(4).Info("syncing ServiceCIDR", "ServiceCIDR", key)
|
||||
cidr, err := c.serviceCIDRLister.Get(key)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
logger.V(4).Info("ServiceCIDR no longer exist", "ServiceCIDR", key)
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Deleting ....
|
||||
if !cidr.GetDeletionTimestamp().IsZero() {
|
||||
// check if the existing ServiceCIDR can be deleted before removing the finalizer
|
||||
ok, err := c.canDeleteCIDR(ctx, cidr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
// update the status to indicate why the ServiceCIDR can not be deleted,
|
||||
// it will be reevaludated by an event on any ServiceCIDR or IPAddress related object
|
||||
// that may remove this condition.
|
||||
svcApplyStatus := networkingapiv1alpha1apply.ServiceCIDRStatus().WithConditions(
|
||||
metav1apply.Condition().
|
||||
WithType(networkingapiv1alpha1.ServiceCIDRConditionReady).
|
||||
WithStatus(metav1.ConditionFalse).
|
||||
WithReason(networkingapiv1alpha1.ServiceCIDRReasonTerminating).
|
||||
WithMessage("There are still IPAddresses referencing the ServiceCIDR, please remove them or create a new ServiceCIDR").
|
||||
WithLastTransitionTime(metav1.Now()))
|
||||
svcApply := networkingapiv1alpha1apply.ServiceCIDR(cidr.Name).WithStatus(svcApplyStatus)
|
||||
_, err = c.client.NetworkingV1alpha1().ServiceCIDRs().ApplyStatus(ctx, svcApply, metav1.ApplyOptions{FieldManager: controllerName, Force: true})
|
||||
return err
|
||||
}
|
||||
// If there are no IPAddress depending on this ServiceCIDR is safe to remove it,
|
||||
// however, there can be a race when the allocators still consider the ServiceCIDR
|
||||
// ready and allocate a new IPAddress from them, to avoid that, we wait during a
|
||||
// a grace period to be sure the deletion change has been propagated to the allocators
|
||||
// and no new IPAddress is going to be allocated.
|
||||
timeUntilDeleted := deletionGracePeriod - time.Since(cidr.GetDeletionTimestamp().Time)
|
||||
if timeUntilDeleted > 0 {
|
||||
c.queue.AddAfter(key, timeUntilDeleted)
|
||||
return nil
|
||||
}
|
||||
return c.removeServiceCIDRFinalizerIfNeeded(ctx, cidr)
|
||||
}
|
||||
|
||||
// Created or Updated, the ServiceCIDR must have a finalizer.
|
||||
err = c.addServiceCIDRFinalizerIfNeeded(ctx, cidr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set Ready condition to True.
|
||||
svcApplyStatus := networkingapiv1alpha1apply.ServiceCIDRStatus().WithConditions(
|
||||
metav1apply.Condition().
|
||||
WithType(networkingapiv1alpha1.ServiceCIDRConditionReady).
|
||||
WithStatus(metav1.ConditionTrue).
|
||||
WithMessage("Kubernetes Service CIDR is ready").
|
||||
WithLastTransitionTime(metav1.Now()))
|
||||
svcApply := networkingapiv1alpha1apply.ServiceCIDR(cidr.Name).WithStatus(svcApplyStatus)
|
||||
if _, err := c.client.NetworkingV1alpha1().ServiceCIDRs().ApplyStatus(ctx, svcApply, metav1.ApplyOptions{FieldManager: controllerName, Force: true}); err != nil {
|
||||
logger.Info("error updating default ServiceCIDR status", "error", err)
|
||||
c.eventRecorder.Eventf(cidr, v1.EventTypeWarning, "KubernetesServiceCIDRError", "The ServiceCIDR Status can not be set to Ready=True")
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// canDeleteCIDR checks that the ServiceCIDR can be safely deleted and not leave orphan IPAddresses
|
||||
func (c *Controller) canDeleteCIDR(ctx context.Context, serviceCIDR *networkingapiv1alpha1.ServiceCIDR) (bool, error) {
|
||||
// TODO(aojea) Revisit the lock usage and if we need to keep it only for the tree operations
|
||||
// to avoid holding it during the whole operation.
|
||||
c.muTree.Lock()
|
||||
defer c.muTree.Unlock()
|
||||
logger := klog.FromContext(ctx)
|
||||
// Check if there is a subnet that already contains the ServiceCIDR that is going to be deleted.
|
||||
hasParent := true
|
||||
for _, cidr := range serviceCIDR.Spec.CIDRs {
|
||||
// Walk the tree to find if there is a larger subnet that contains the existing one,
|
||||
// or there is another ServiceCIDR with the same subnet.
|
||||
if prefix, err := netip.ParsePrefix(cidr); err == nil {
|
||||
serviceCIDRs := sets.New[string]()
|
||||
c.tree.WalkPath(prefix, func(k netip.Prefix, v sets.Set[string]) bool {
|
||||
serviceCIDRs.Insert(v.UnsortedList()...)
|
||||
return false
|
||||
})
|
||||
if serviceCIDRs.Len() == 1 && serviceCIDRs.Has(serviceCIDR.Name) {
|
||||
hasParent = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// All the existing IP addresses will be contained on the parent ServiceCIDRs,
|
||||
// it is safe to delete, remove the finalizer.
|
||||
if hasParent {
|
||||
logger.V(2).Info("Removing finalizer for ServiceCIDR", "ServiceCIDR", serviceCIDR.String())
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// TODO: optimize this
|
||||
// Since current ServiceCIDR does not have another ServiceCIDR containing it,
|
||||
// verify there are no existing IPAddresses referencing it that will be orphan.
|
||||
for _, cidr := range serviceCIDR.Spec.CIDRs {
|
||||
// get all the IPv4 addresses
|
||||
ipLabelSelector := labels.Set(map[string]string{
|
||||
networkingapiv1alpha1.LabelIPAddressFamily: string(convertToV1IPFamily(netutils.IPFamilyOfCIDRString(cidr))),
|
||||
networkingapiv1alpha1.LabelManagedBy: ipallocator.ControllerName,
|
||||
}).AsSelectorPreValidated()
|
||||
ips, err := c.ipAddressLister.List(ipLabelSelector)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
for _, ip := range ips {
|
||||
// if the longest prefix match is the ServiceCIDR to be deleted
|
||||
// and is the only existing one, at least one IPAddress will be
|
||||
// orphan, block the ServiceCIDR deletion.
|
||||
address, err := netip.ParseAddr(ip.Name)
|
||||
if err != nil {
|
||||
// the IPAddress object validates that the name is a valid IPAddress
|
||||
logger.Info("[SHOULD NOT HAPPEN] unexpected error parsing IPAddress", "IPAddress", ip.Name, "error", err)
|
||||
continue
|
||||
}
|
||||
// walk the tree to find all ServiceCIDRs containing this IP
|
||||
prefixes := c.tree.GetHostIPPrefixMatches(address)
|
||||
if len(prefixes) != 1 {
|
||||
continue
|
||||
}
|
||||
for _, v := range prefixes {
|
||||
if v.Len() == 1 && v.Has(serviceCIDR.Name) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// There are no IPAddresses that depend on the existing ServiceCIDR, so
|
||||
// it is safe to delete, remove finalizer.
|
||||
logger.Info("ServiceCIDR no longer have orphan IPs", "ServiceCDIR", serviceCIDR.String())
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (c *Controller) addServiceCIDRFinalizerIfNeeded(ctx context.Context, cidr *networkingapiv1alpha1.ServiceCIDR) error {
|
||||
for _, f := range cidr.GetFinalizers() {
|
||||
if f == ServiceCIDRProtectionFinalizer {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
patch := map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"finalizers": []string{ServiceCIDRProtectionFinalizer},
|
||||
},
|
||||
}
|
||||
patchBytes, err := json.Marshal(patch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = c.client.NetworkingV1alpha1().ServiceCIDRs().Patch(ctx, cidr.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
klog.FromContext(ctx).V(4).Info("Added protection finalizer to ServiceCIDR", "ServiceCIDR", cidr.Name)
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (c *Controller) removeServiceCIDRFinalizerIfNeeded(ctx context.Context, cidr *networkingapiv1alpha1.ServiceCIDR) error {
|
||||
found := false
|
||||
for _, f := range cidr.GetFinalizers() {
|
||||
if f == ServiceCIDRProtectionFinalizer {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil
|
||||
}
|
||||
patch := map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"$deleteFromPrimitiveList/finalizers": []string{ServiceCIDRProtectionFinalizer},
|
||||
},
|
||||
}
|
||||
patchBytes, err := json.Marshal(patch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = c.client.NetworkingV1alpha1().ServiceCIDRs().Patch(ctx, cidr.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
klog.FromContext(ctx).V(4).Info("Removed protection finalizer from ServiceCIDRs", "ServiceCIDR", cidr.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Convert netutils.IPFamily to v1.IPFamily
|
||||
// TODO: consolidate helpers
|
||||
// copied from pkg/proxy/util/utils.go
|
||||
func convertToV1IPFamily(ipFamily netutils.IPFamily) v1.IPFamily {
|
||||
switch ipFamily {
|
||||
case netutils.IPv4:
|
||||
return v1.IPv4Protocol
|
||||
case netutils.IPv6:
|
||||
return v1.IPv6Protocol
|
||||
}
|
||||
|
||||
return v1.IPFamilyUnknown
|
||||
}
|
||||
597
pkg/controller/servicecidrs/servicecidrs_controller_test.go
Normal file
597
pkg/controller/servicecidrs/servicecidrs_controller_test.go
Normal file
@@ -0,0 +1,597 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package servicecidrs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingapiv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
k8stesting "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/kubernetes/pkg/controller"
|
||||
"k8s.io/kubernetes/pkg/controlplane/controller/defaultservicecidr"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
|
||||
netutils "k8s.io/utils/net"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
type testController struct {
|
||||
*Controller
|
||||
servicecidrsStore cache.Store
|
||||
ipaddressesStore cache.Store
|
||||
}
|
||||
|
||||
func newController(t *testing.T, cidrs []*networkingapiv1alpha1.ServiceCIDR, ips []*networkingapiv1alpha1.IPAddress) (*fake.Clientset, *testController) {
|
||||
client := fake.NewSimpleClientset()
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc())
|
||||
|
||||
serviceCIDRInformer := informerFactory.Networking().V1alpha1().ServiceCIDRs()
|
||||
cidrStore := serviceCIDRInformer.Informer().GetStore()
|
||||
for _, obj := range cidrs {
|
||||
err := cidrStore.Add(obj)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
ipAddressInformer := informerFactory.Networking().V1alpha1().IPAddresses()
|
||||
ipStore := ipAddressInformer.Informer().GetStore()
|
||||
for _, obj := range ips {
|
||||
err := ipStore.Add(obj)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
controller := NewController(
|
||||
serviceCIDRInformer,
|
||||
ipAddressInformer,
|
||||
client)
|
||||
|
||||
var alwaysReady = func() bool { return true }
|
||||
controller.serviceCIDRsSynced = alwaysReady
|
||||
controller.ipAddressSynced = alwaysReady
|
||||
|
||||
return client, &testController{
|
||||
controller,
|
||||
cidrStore,
|
||||
ipStore,
|
||||
}
|
||||
}
|
||||
|
||||
func TestControllerSync(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
// ServiceCIDR that is just being deleted
|
||||
deletingServiceCIDR := makeServiceCIDR("deleting-cidr", "192.168.0.0/24", "2001:db2::/64")
|
||||
deletingServiceCIDR.Finalizers = []string{ServiceCIDRProtectionFinalizer}
|
||||
deletingServiceCIDR.DeletionTimestamp = ptr.To[metav1.Time](metav1.Now())
|
||||
|
||||
// ServiceCIDR that has been deleted for longer than the deletionGracePeriod
|
||||
deletedServiceCIDR := makeServiceCIDR("deleted-cidr", "192.168.0.0/24", "2001:db2::/64")
|
||||
deletedServiceCIDR.Finalizers = []string{ServiceCIDRProtectionFinalizer}
|
||||
deletedServiceCIDR.DeletionTimestamp = ptr.To[metav1.Time](metav1.NewTime(now.Add(-deletionGracePeriod - 1*time.Second)))
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
cidrs []*networkingapiv1alpha1.ServiceCIDR
|
||||
ips []*networkingapiv1alpha1.IPAddress
|
||||
cidrSynced string
|
||||
actions [][]string // verb and resource and subresource
|
||||
}{
|
||||
{
|
||||
name: "no existing service CIDRs",
|
||||
},
|
||||
{
|
||||
name: "default service CIDR must have finalizer",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
},
|
||||
cidrSynced: defaultservicecidr.DefaultServiceCIDRName,
|
||||
actions: [][]string{{"patch", "servicecidrs", ""}, {"patch", "servicecidrs", "status"}},
|
||||
},
|
||||
{
|
||||
name: "service CIDR must have finalizer",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR("no-finalizer", "192.168.0.0/24", "2001:db2::/64"),
|
||||
},
|
||||
cidrSynced: "no-finalizer",
|
||||
actions: [][]string{{"patch", "servicecidrs", ""}, {"patch", "servicecidrs", "status"}},
|
||||
},
|
||||
{
|
||||
name: "service CIDR being deleted must remove the finalizer",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
deletedServiceCIDR,
|
||||
},
|
||||
cidrSynced: deletedServiceCIDR.Name,
|
||||
actions: [][]string{{"patch", "servicecidrs", ""}},
|
||||
},
|
||||
{
|
||||
name: "service CIDR being deleted but within the grace period must be requeued not remove the finalizer", // TODO: assert is actually requeued
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
deletingServiceCIDR,
|
||||
},
|
||||
cidrSynced: deletingServiceCIDR.Name,
|
||||
actions: [][]string{},
|
||||
},
|
||||
{
|
||||
name: "service CIDR being deleted with IPv4 addresses should update the status",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
deletedServiceCIDR,
|
||||
},
|
||||
ips: []*networkingapiv1alpha1.IPAddress{
|
||||
makeIPAddress("192.168.0.1"),
|
||||
},
|
||||
cidrSynced: deletedServiceCIDR.Name,
|
||||
actions: [][]string{{"patch", "servicecidrs", "status"}},
|
||||
},
|
||||
{
|
||||
name: "service CIDR being deleted and overlapping same range and IPv4 addresses should remove the finalizer",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
deletedServiceCIDR,
|
||||
makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/64"),
|
||||
},
|
||||
ips: []*networkingapiv1alpha1.IPAddress{
|
||||
makeIPAddress("192.168.0.1"),
|
||||
},
|
||||
cidrSynced: deletedServiceCIDR.Name,
|
||||
actions: [][]string{{"patch", "servicecidrs", ""}},
|
||||
},
|
||||
{
|
||||
name: "service CIDR being deleted and overlapping and IPv4 addresses should remove the finalizer",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
deletedServiceCIDR,
|
||||
makeServiceCIDR("overlapping", "192.168.0.0/16", "2001:db2::/64"),
|
||||
},
|
||||
ips: []*networkingapiv1alpha1.IPAddress{
|
||||
makeIPAddress("192.168.0.1"),
|
||||
},
|
||||
cidrSynced: deletedServiceCIDR.Name,
|
||||
actions: [][]string{{"patch", "servicecidrs", ""}},
|
||||
},
|
||||
{
|
||||
name: "service CIDR being deleted and not overlapping and IPv4 addresses should update the status",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
deletedServiceCIDR,
|
||||
makeServiceCIDR("overlapping", "192.168.255.0/26", "2001:db2::/64"),
|
||||
},
|
||||
ips: []*networkingapiv1alpha1.IPAddress{
|
||||
makeIPAddress("192.168.0.1"),
|
||||
},
|
||||
cidrSynced: deletedServiceCIDR.Name,
|
||||
actions: [][]string{{"patch", "servicecidrs", "status"}},
|
||||
},
|
||||
{
|
||||
name: "service CIDR being deleted with IPv6 addresses should update the status",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
deletedServiceCIDR,
|
||||
},
|
||||
ips: []*networkingapiv1alpha1.IPAddress{
|
||||
makeIPAddress("2001:db2::1"),
|
||||
},
|
||||
cidrSynced: deletedServiceCIDR.Name,
|
||||
actions: [][]string{{"patch", "servicecidrs", "status"}},
|
||||
},
|
||||
{
|
||||
name: "service CIDR being deleted and overlapping same range and IPv6 addresses should remove the finalizer",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
deletedServiceCIDR,
|
||||
makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/64"),
|
||||
},
|
||||
ips: []*networkingapiv1alpha1.IPAddress{
|
||||
makeIPAddress("2001:db2::1"),
|
||||
},
|
||||
cidrSynced: deletedServiceCIDR.Name,
|
||||
actions: [][]string{{"patch", "servicecidrs", ""}},
|
||||
},
|
||||
{
|
||||
name: "service CIDR being deleted and overlapping and IPv6 addresses should remove the finalizer",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
deletedServiceCIDR,
|
||||
makeServiceCIDR("overlapping", "192.168.0.0/16", "2001:db2::/48"),
|
||||
},
|
||||
ips: []*networkingapiv1alpha1.IPAddress{
|
||||
makeIPAddress("2001:db2::1"),
|
||||
},
|
||||
cidrSynced: deletedServiceCIDR.Name,
|
||||
actions: [][]string{{"patch", "servicecidrs", ""}},
|
||||
},
|
||||
{
|
||||
name: "service CIDR being deleted and not overlapping and IPv6 addresses should update the status",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
deletedServiceCIDR,
|
||||
makeServiceCIDR("overlapping", "192.168.255.0/26", "2001:db2:a:b::/64"),
|
||||
},
|
||||
ips: []*networkingapiv1alpha1.IPAddress{
|
||||
makeIPAddress("2001:db2::1"),
|
||||
},
|
||||
cidrSynced: deletedServiceCIDR.Name,
|
||||
actions: [][]string{{"patch", "servicecidrs", "status"}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
client, controller := newController(t, tc.cidrs, tc.ips)
|
||||
// server side apply does not play well with fake client go
|
||||
// so we skup the errors and only assert on the actions
|
||||
// https://github.com/kubernetes/kubernetes/issues/99953
|
||||
_ = controller.sync(context.Background(), tc.cidrSynced)
|
||||
expectAction(t, client.Actions(), tc.actions)
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func makeServiceCIDR(name, primary, secondary string) *networkingapiv1alpha1.ServiceCIDR {
|
||||
serviceCIDR := &networkingapiv1alpha1.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: networkingapiv1alpha1.ServiceCIDRSpec{},
|
||||
}
|
||||
serviceCIDR.Spec.CIDRs = append(serviceCIDR.Spec.CIDRs, primary)
|
||||
if secondary != "" {
|
||||
serviceCIDR.Spec.CIDRs = append(serviceCIDR.Spec.CIDRs, secondary)
|
||||
}
|
||||
return serviceCIDR
|
||||
}
|
||||
|
||||
func makeIPAddress(name string) *networkingapiv1alpha1.IPAddress {
|
||||
family := string(v1.IPv4Protocol)
|
||||
if netutils.IsIPv6String(name) {
|
||||
family = string(v1.IPv6Protocol)
|
||||
}
|
||||
return &networkingapiv1alpha1.IPAddress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
networkingapiv1alpha1.LabelIPAddressFamily: family,
|
||||
networkingapiv1alpha1.LabelManagedBy: ipallocator.ControllerName,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func expectAction(t *testing.T, actions []k8stesting.Action, expected [][]string) {
|
||||
t.Helper()
|
||||
if len(actions) != len(expected) {
|
||||
t.Fatalf("Expected at least %d actions, got %d \ndiff: %v", len(expected), len(actions), cmp.Diff(expected, actions))
|
||||
}
|
||||
|
||||
for i, action := range actions {
|
||||
verb := expected[i][0]
|
||||
if action.GetVerb() != verb {
|
||||
t.Errorf("Expected action %d verb to be %s, got %s", i, verb, action.GetVerb())
|
||||
}
|
||||
resource := expected[i][1]
|
||||
if action.GetResource().Resource != resource {
|
||||
t.Errorf("Expected action %d resource to be %s, got %s", i, resource, action.GetResource().Resource)
|
||||
}
|
||||
subresource := expected[i][2]
|
||||
if action.GetSubresource() != subresource {
|
||||
t.Errorf("Expected action %d subresource to be %s, got %s", i, subresource, action.GetSubresource())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestController_canDeleteCIDR(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cidrs []*networkingapiv1alpha1.ServiceCIDR
|
||||
ips []*networkingapiv1alpha1.IPAddress
|
||||
cidrSynced *networkingapiv1alpha1.ServiceCIDR
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "CIDR and no IPs",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
},
|
||||
cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "CIDR with IPs",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
},
|
||||
ips: []*networkingapiv1alpha1.IPAddress{
|
||||
makeIPAddress("192.168.0.24"),
|
||||
},
|
||||
cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "CIDR without IPs",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
},
|
||||
ips: []*networkingapiv1alpha1.IPAddress{
|
||||
makeIPAddress("192.168.1.24"),
|
||||
},
|
||||
cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "CIDR with IPv4 address referencing the subnet address",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
},
|
||||
ips: []*networkingapiv1alpha1.IPAddress{
|
||||
makeIPAddress("192.168.0.0"),
|
||||
},
|
||||
cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "CIDR with IPv4 address referencing the broadcast address",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
},
|
||||
ips: []*networkingapiv1alpha1.IPAddress{
|
||||
makeIPAddress("192.168.0.255"),
|
||||
},
|
||||
cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "CIDR with IPv6 address referencing the broadcast address",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
},
|
||||
ips: []*networkingapiv1alpha1.IPAddress{
|
||||
makeIPAddress("2001:0db2::ffff:ffff:ffff:ffff"),
|
||||
},
|
||||
cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "CIDR with same range overlapping and IPs",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/64"),
|
||||
},
|
||||
ips: []*networkingapiv1alpha1.IPAddress{
|
||||
makeIPAddress("192.168.0.23"),
|
||||
},
|
||||
cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "CIDR with smaller range overlapping and IPs",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("overlapping", "192.168.0.0/26", "2001:db2::/64"),
|
||||
},
|
||||
ips: []*networkingapiv1alpha1.IPAddress{
|
||||
makeIPAddress("192.168.0.23"),
|
||||
},
|
||||
cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "CIDR with smaller range overlapping but IPs orphan",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("overlapping", "192.168.0.0/28", "2001:db2::/64"),
|
||||
},
|
||||
ips: []*networkingapiv1alpha1.IPAddress{
|
||||
makeIPAddress("192.168.0.23"),
|
||||
},
|
||||
cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "CIDR with larger range overlapping and IPs",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("overlapping", "192.168.0.0/16", "2001:db2::/64"),
|
||||
},
|
||||
ips: []*networkingapiv1alpha1.IPAddress{
|
||||
makeIPAddress("192.168.0.23"),
|
||||
},
|
||||
cidrSynced: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
_, controller := newController(t, tc.cidrs, tc.ips)
|
||||
err := controller.syncCIDRs()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
got, err := controller.canDeleteCIDR(context.Background(), tc.cidrSynced)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got != tc.want {
|
||||
t.Errorf("Controller.canDeleteCIDR() = %v, want %v", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestController_ipToCidrs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cidrs []*networkingapiv1alpha1.ServiceCIDR
|
||||
ip *networkingapiv1alpha1.IPAddress
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
ip: makeIPAddress("192.168.0.23"),
|
||||
want: []string{},
|
||||
}, {
|
||||
name: "one CIDR",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("unrelated", "10.0.0.0/24", ""),
|
||||
makeServiceCIDR("unrelated2", "10.0.0.0/16", ""),
|
||||
},
|
||||
ip: makeIPAddress("192.168.0.23"),
|
||||
want: []string{defaultservicecidr.DefaultServiceCIDRName},
|
||||
}, {
|
||||
name: "two equal CIDR",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/96"),
|
||||
makeServiceCIDR("unrelated", "10.0.0.0/24", ""),
|
||||
makeServiceCIDR("unrelated2", "10.0.0.0/16", ""),
|
||||
},
|
||||
ip: makeIPAddress("192.168.0.23"),
|
||||
want: []string{defaultservicecidr.DefaultServiceCIDRName, "overlapping"},
|
||||
}, {
|
||||
name: "three CIDR - two same and one larger",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("overlapping2", "192.168.0.0/26", "2001:db2::/96"),
|
||||
makeServiceCIDR("unrelated", "10.0.0.0/24", ""),
|
||||
makeServiceCIDR("unrelated2", "10.0.0.0/16", ""),
|
||||
},
|
||||
ip: makeIPAddress("192.168.0.23"),
|
||||
want: []string{defaultservicecidr.DefaultServiceCIDRName, "overlapping", "overlapping2"},
|
||||
}, {
|
||||
name: "three CIDR - two same and one larger - IPv4 subnet address",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("overlapping2", "192.168.0.0/26", "2001:db2::/96"),
|
||||
makeServiceCIDR("unrelated", "10.0.0.0/24", ""),
|
||||
makeServiceCIDR("unrelated2", "10.0.0.0/16", ""),
|
||||
},
|
||||
ip: makeIPAddress("192.168.0.0"),
|
||||
want: []string{},
|
||||
}, {
|
||||
name: "three CIDR - two same and one larger - IPv4 broadcast address",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("overlapping2", "192.168.0.0/26", "2001:db2::/96"),
|
||||
makeServiceCIDR("unrelated", "10.0.0.0/24", ""),
|
||||
makeServiceCIDR("unrelated2", "10.0.0.0/16", ""),
|
||||
},
|
||||
ip: makeIPAddress("192.168.0.63"), // broadcast for 192.168.0.0/26
|
||||
want: []string{defaultservicecidr.DefaultServiceCIDRName, "overlapping"},
|
||||
}, {
|
||||
name: "three CIDR - two same and one larger - IPv6 subnet address",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("overlapping2", "192.168.0.0/26", "2001:db2::/96"),
|
||||
makeServiceCIDR("unrelated", "10.0.0.0/24", ""),
|
||||
makeServiceCIDR("unrelated2", "10.0.0.0/16", ""),
|
||||
},
|
||||
ip: makeIPAddress("2001:db2::"),
|
||||
want: []string{},
|
||||
}, {
|
||||
name: "three CIDR - two same and one larger - IPv6 broadcast address",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("overlapping2", "192.168.0.0/26", "2001:db2::/96"),
|
||||
makeServiceCIDR("unrelated", "10.0.0.0/24", ""),
|
||||
makeServiceCIDR("unrelated2", "10.0.0.0/16", ""),
|
||||
},
|
||||
ip: makeIPAddress("2001:0db2::ffff:ffff:ffff:ffff"),
|
||||
want: []string{defaultservicecidr.DefaultServiceCIDRName, "overlapping"},
|
||||
}}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, controller := newController(t, tt.cidrs, nil)
|
||||
err := controller.syncCIDRs()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got := controller.containingServiceCIDRs(tt.ip); !cmp.Equal(got, tt.want, cmpopts.SortSlices(func(a, b string) bool { return a < b })) {
|
||||
t.Errorf("Controller.ipToCidrs() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestController_cidrToCidrs(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
cidrs []*networkingapiv1alpha1.ServiceCIDR
|
||||
cidr *networkingapiv1alpha1.ServiceCIDR
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
cidr: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
want: []string{},
|
||||
}, {
|
||||
name: "one CIDR",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("unrelated", "10.0.0.0/24", ""),
|
||||
makeServiceCIDR("unrelated2", "10.0.0.0/16", ""),
|
||||
},
|
||||
cidr: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
want: []string{defaultservicecidr.DefaultServiceCIDRName},
|
||||
}, {
|
||||
name: "two equal CIDR",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/96"),
|
||||
makeServiceCIDR("unrelated", "10.0.0.0/24", ""),
|
||||
makeServiceCIDR("unrelated2", "10.0.0.0/16", ""),
|
||||
},
|
||||
cidr: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
want: []string{defaultservicecidr.DefaultServiceCIDRName, "overlapping"},
|
||||
}, {
|
||||
name: "three CIDR - two same and one larger",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("overlapping", "192.168.0.0/24", "2001:db2::/64"),
|
||||
makeServiceCIDR("overlapping2", "192.168.0.0/26", "2001:db2::/96"),
|
||||
makeServiceCIDR("unrelated", "10.0.0.0/24", ""),
|
||||
makeServiceCIDR("unrelated2", "10.0.0.0/16", ""),
|
||||
},
|
||||
cidr: makeServiceCIDR(defaultservicecidr.DefaultServiceCIDRName, "192.168.0.0/24", "2001:db2::/64"),
|
||||
want: []string{defaultservicecidr.DefaultServiceCIDRName, "overlapping", "overlapping2"},
|
||||
}}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
_, controller := newController(t, tt.cidrs, nil)
|
||||
err := controller.syncCIDRs()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got := controller.overlappingServiceCIDRs(tt.cidr); !cmp.Equal(got, tt.want, cmpopts.SortSlices(func(a, b string) bool { return a < b })) {
|
||||
t.Errorf("Controller.cidrToCidrs() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,210 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaultservicecidr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingapiv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
metav1apply "k8s.io/client-go/applyconfigurations/meta/v1"
|
||||
networkingapiv1alpha1apply "k8s.io/client-go/applyconfigurations/networking/v1alpha1"
|
||||
networkingv1alpha1informers "k8s.io/client-go/informers/networking/v1alpha1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
networkingv1alpha1listers "k8s.io/client-go/listers/networking/v1alpha1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
controllerName = "kubernetes-service-cidr-controller"
|
||||
DefaultServiceCIDRName = "kubernetes"
|
||||
)
|
||||
|
||||
// NewController returns a new *Controller that generates the default ServiceCIDR
|
||||
// from the `--service-cluster-ip-range` flag and recreates it if necessary,
|
||||
// but doesn't update it if is different.
|
||||
// It follows the same logic that the kubernetes.default Service.
|
||||
func NewController(
|
||||
primaryRange net.IPNet,
|
||||
secondaryRange net.IPNet,
|
||||
client clientset.Interface,
|
||||
) *Controller {
|
||||
broadcaster := record.NewBroadcaster()
|
||||
recorder := broadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: controllerName})
|
||||
|
||||
c := &Controller{
|
||||
client: client,
|
||||
interval: 10 * time.Second, // same as DefaultEndpointReconcilerInterval
|
||||
}
|
||||
|
||||
// obtain configuration from flags
|
||||
c.cidrs = append(c.cidrs, primaryRange.String())
|
||||
if secondaryRange.IP != nil {
|
||||
c.cidrs = append(c.cidrs, secondaryRange.String())
|
||||
}
|
||||
// instead of using the shared informers from the controlplane instance, we construct our own informer
|
||||
// because we need such a small subset of the information available, only the kubernetes.default ServiceCIDR
|
||||
c.serviceCIDRInformer = networkingv1alpha1informers.NewFilteredServiceCIDRInformer(client, 12*time.Hour,
|
||||
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
|
||||
func(options *metav1.ListOptions) {
|
||||
options.FieldSelector = fields.OneTermEqualSelector("metadata.name", DefaultServiceCIDRName).String()
|
||||
})
|
||||
|
||||
c.serviceCIDRLister = networkingv1alpha1listers.NewServiceCIDRLister(c.serviceCIDRInformer.GetIndexer())
|
||||
c.serviceCIDRsSynced = c.serviceCIDRInformer.HasSynced
|
||||
|
||||
c.eventBroadcaster = broadcaster
|
||||
c.eventRecorder = recorder
|
||||
|
||||
c.readyCh = make(chan struct{})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Controller manages selector-based service ipAddress.
|
||||
type Controller struct {
|
||||
cidrs []string // order matters, first cidr defines the default IP family
|
||||
|
||||
client clientset.Interface
|
||||
eventBroadcaster record.EventBroadcaster
|
||||
eventRecorder record.EventRecorder
|
||||
|
||||
serviceCIDRInformer cache.SharedIndexInformer
|
||||
serviceCIDRLister networkingv1alpha1listers.ServiceCIDRLister
|
||||
serviceCIDRsSynced cache.InformerSynced
|
||||
|
||||
readyCh chan struct{} // channel to block until the default ServiceCIDR exists
|
||||
|
||||
interval time.Duration
|
||||
}
|
||||
|
||||
// Start will not return until the default ServiceCIDR exists or stopCh is closed.
|
||||
func (c *Controller) Start(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
|
||||
c.eventBroadcaster.StartStructuredLogging(0)
|
||||
c.eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: c.client.CoreV1().Events("")})
|
||||
defer c.eventBroadcaster.Shutdown()
|
||||
|
||||
klog.Infof("Starting %s", controllerName)
|
||||
defer klog.Infof("Shutting down %s", controllerName)
|
||||
|
||||
go c.serviceCIDRInformer.Run(stopCh)
|
||||
if !cache.WaitForNamedCacheSync(controllerName, stopCh, c.serviceCIDRsSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
go wait.Until(c.sync, c.interval, stopCh)
|
||||
|
||||
select {
|
||||
case <-stopCh:
|
||||
case <-c.readyCh:
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) sync() {
|
||||
// check if the default ServiceCIDR already exist
|
||||
serviceCIDR, err := c.serviceCIDRLister.Get(DefaultServiceCIDRName)
|
||||
// if exists
|
||||
if err == nil {
|
||||
c.setReady()
|
||||
c.syncStatus(serviceCIDR)
|
||||
return
|
||||
}
|
||||
|
||||
// unknown error
|
||||
if !apierrors.IsNotFound(err) {
|
||||
klog.Infof("error trying to obtain the default ServiceCIDR: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// default ServiceCIDR does not exist
|
||||
klog.Infof("Creating default ServiceCIDR with CIDRs: %v", c.cidrs)
|
||||
serviceCIDR = &networkingapiv1alpha1.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceCIDRName,
|
||||
},
|
||||
Spec: networkingapiv1alpha1.ServiceCIDRSpec{
|
||||
CIDRs: c.cidrs,
|
||||
},
|
||||
}
|
||||
serviceCIDR, err = c.client.NetworkingV1alpha1().ServiceCIDRs().Create(context.Background(), serviceCIDR, metav1.CreateOptions{})
|
||||
if err != nil && !apierrors.IsAlreadyExists(err) {
|
||||
klog.Infof("error creating default ServiceCIDR: %v", err)
|
||||
c.eventRecorder.Eventf(serviceCIDR, v1.EventTypeWarning, "KubernetesDefaultServiceCIDRError", "The default ServiceCIDR can not be created")
|
||||
return
|
||||
}
|
||||
|
||||
c.setReady()
|
||||
c.syncStatus(serviceCIDR)
|
||||
}
|
||||
|
||||
func (c *Controller) setReady() {
|
||||
select {
|
||||
case <-c.readyCh:
|
||||
default:
|
||||
close(c.readyCh)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) syncStatus(serviceCIDR *networkingapiv1alpha1.ServiceCIDR) {
|
||||
// don't sync the status of the ServiceCIDR if is being deleted,
|
||||
// deletion must be handled by the controller-manager
|
||||
if !serviceCIDR.GetDeletionTimestamp().IsZero() {
|
||||
return
|
||||
}
|
||||
|
||||
// This controller will set the Ready condition to true if the Ready condition
|
||||
// does not exist and the CIDR values match this controller CIDR values.
|
||||
for _, condition := range serviceCIDR.Status.Conditions {
|
||||
if condition.Type == networkingapiv1alpha1.ServiceCIDRConditionReady {
|
||||
if condition.Status == metav1.ConditionTrue {
|
||||
return
|
||||
}
|
||||
klog.Infof("default ServiceCIDR condition Ready is not True: %v", condition.Status)
|
||||
c.eventRecorder.Eventf(serviceCIDR, v1.EventTypeWarning, condition.Reason, condition.Message)
|
||||
return
|
||||
}
|
||||
}
|
||||
// set status to ready if the ServiceCIDR matches this configuration
|
||||
if reflect.DeepEqual(c.cidrs, serviceCIDR.Spec.CIDRs) {
|
||||
klog.Infof("Setting default ServiceCIDR condition Ready to True")
|
||||
svcApplyStatus := networkingapiv1alpha1apply.ServiceCIDRStatus().WithConditions(
|
||||
metav1apply.Condition().
|
||||
WithType(networkingapiv1alpha1.ServiceCIDRConditionReady).
|
||||
WithStatus(metav1.ConditionTrue).
|
||||
WithMessage("Kubernetes default Service CIDR is ready").
|
||||
WithLastTransitionTime(metav1.Now()))
|
||||
svcApply := networkingapiv1alpha1apply.ServiceCIDR(DefaultServiceCIDRName).WithStatus(svcApplyStatus)
|
||||
if _, errApply := c.client.NetworkingV1alpha1().ServiceCIDRs().ApplyStatus(context.Background(), svcApply, metav1.ApplyOptions{FieldManager: controllerName, Force: true}); errApply != nil {
|
||||
klog.Infof("error updating default ServiceCIDR status: %v", errApply)
|
||||
c.eventRecorder.Eventf(serviceCIDR, v1.EventTypeWarning, "KubernetesDefaultServiceCIDRError", "The default ServiceCIDR Status can not be set to Ready=True")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,178 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaultservicecidr
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
networkingapiv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
k8stesting "k8s.io/client-go/testing"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultIPv4CIDR = "10.16.0.0/16"
|
||||
defaultIPv6CIDR = "2001:db8::/64"
|
||||
)
|
||||
|
||||
func newController(t *testing.T, objects []*networkingapiv1alpha1.ServiceCIDR) (*fake.Clientset, *Controller) {
|
||||
client := fake.NewSimpleClientset()
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0)
|
||||
serviceCIDRInformer := informerFactory.Networking().V1alpha1().ServiceCIDRs()
|
||||
|
||||
store := serviceCIDRInformer.Informer().GetStore()
|
||||
for _, obj := range objects {
|
||||
err := store.Add(obj)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
||||
c := &Controller{
|
||||
client: client,
|
||||
interval: time.Second,
|
||||
cidrs: []string{defaultIPv4CIDR, defaultIPv6CIDR},
|
||||
eventRecorder: record.NewFakeRecorder(100),
|
||||
serviceCIDRLister: serviceCIDRInformer.Lister(),
|
||||
serviceCIDRsSynced: func() bool { return true },
|
||||
readyCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
return client, c
|
||||
}
|
||||
|
||||
func TestControllerSync(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
cidrs []*networkingapiv1alpha1.ServiceCIDR
|
||||
actions [][]string // verb and resource
|
||||
}{
|
||||
{
|
||||
name: "no existing service CIDRs",
|
||||
actions: [][]string{{"create", "servicecidrs"}, {"patch", "servicecidrs"}},
|
||||
},
|
||||
{
|
||||
name: "existing default service CIDR update Ready condition",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceCIDRName,
|
||||
},
|
||||
Spec: networkingapiv1alpha1.ServiceCIDRSpec{
|
||||
CIDRs: []string{defaultIPv4CIDR, defaultIPv6CIDR},
|
||||
},
|
||||
},
|
||||
},
|
||||
actions: [][]string{{"patch", "servicecidrs"}},
|
||||
},
|
||||
{
|
||||
name: "existing default service CIDR not matching cidrs",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceCIDRName,
|
||||
},
|
||||
Spec: networkingapiv1alpha1.ServiceCIDRSpec{
|
||||
CIDRs: []string{"fd00::/112"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing default service CIDR not ready",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceCIDRName,
|
||||
},
|
||||
Spec: networkingapiv1alpha1.ServiceCIDRSpec{
|
||||
CIDRs: []string{defaultIPv4CIDR, defaultIPv6CIDR},
|
||||
},
|
||||
Status: networkingapiv1alpha1.ServiceCIDRStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: string(networkingapiv1alpha1.ServiceCIDRConditionReady),
|
||||
Status: metav1.ConditionFalse,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing default service CIDR being deleted",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: DefaultServiceCIDRName,
|
||||
DeletionTimestamp: ptr.To(metav1.Now()),
|
||||
},
|
||||
Spec: networkingapiv1alpha1.ServiceCIDRSpec{
|
||||
CIDRs: []string{defaultIPv4CIDR, defaultIPv6CIDR},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "existing service CIDRs but not default",
|
||||
cidrs: []*networkingapiv1alpha1.ServiceCIDR{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "non-default-cidr",
|
||||
},
|
||||
Spec: networkingapiv1alpha1.ServiceCIDRSpec{
|
||||
CIDRs: []string{defaultIPv4CIDR, defaultIPv6CIDR},
|
||||
},
|
||||
},
|
||||
},
|
||||
actions: [][]string{{"create", "servicecidrs"}, {"patch", "servicecidrs"}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
client, controller := newController(t, tc.cidrs)
|
||||
controller.sync()
|
||||
expectAction(t, client.Actions(), tc.actions)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func expectAction(t *testing.T, actions []k8stesting.Action, expected [][]string) {
|
||||
t.Helper()
|
||||
if len(actions) != len(expected) {
|
||||
t.Fatalf("Expected at least %d actions, got %d \ndiff: %v", len(expected), len(actions), cmp.Diff(expected, actions))
|
||||
}
|
||||
|
||||
for i, action := range actions {
|
||||
verb := expected[i][0]
|
||||
if action.GetVerb() != verb {
|
||||
t.Errorf("Expected action %d verb to be %s, got %s", i, verb, action.GetVerb())
|
||||
}
|
||||
resource := expected[i][1]
|
||||
if action.GetResource().Resource != resource {
|
||||
t.Errorf("Expected action %d resource to be %s, got %s", i, resource, action.GetResource().Resource)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -82,6 +82,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/controlplane/apiserver/options"
|
||||
"k8s.io/kubernetes/pkg/controlplane/controller/apiserverleasegc"
|
||||
"k8s.io/kubernetes/pkg/controlplane/controller/clusterauthenticationtrust"
|
||||
"k8s.io/kubernetes/pkg/controlplane/controller/defaultservicecidr"
|
||||
"k8s.io/kubernetes/pkg/controlplane/controller/kubernetesservice"
|
||||
"k8s.io/kubernetes/pkg/controlplane/controller/legacytokentracking"
|
||||
"k8s.io/kubernetes/pkg/controlplane/controller/systemnamespaces"
|
||||
@@ -511,6 +512,20 @@ func (c completedConfig) New(delegationTarget genericapiserver.DelegationTarget)
|
||||
return nil
|
||||
})
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRServiceAllocator) {
|
||||
m.GenericAPIServer.AddPostStartHookOrDie("start-kubernetes-service-cidr-controller", func(hookContext genericapiserver.PostStartHookContext) error {
|
||||
controller := defaultservicecidr.NewController(
|
||||
c.ExtraConfig.ServiceIPRange,
|
||||
c.ExtraConfig.SecondaryServiceIPRange,
|
||||
clientset,
|
||||
)
|
||||
// The default serviceCIDR must exist before the apiserver is healthy
|
||||
// otherwise the allocators for Services will not work.
|
||||
controller.Start(hookContext.StopCh)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.UnknownVersionInteroperabilityProxy) {
|
||||
peeraddress := getPeerAddress(c.ExtraConfig.PeerAdvertiseAddress, c.GenericConfig.PublicAddress, publicServicePort)
|
||||
peerEndpointCtrl := peerreconcilers.New(
|
||||
|
||||
172
pkg/generated/openapi/zz_generated.openapi.go
generated
172
pkg/generated/openapi/zz_generated.openapi.go
generated
@@ -767,6 +767,10 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
|
||||
"k8s.io/api/networking/v1alpha1.IPAddressList": schema_k8sio_api_networking_v1alpha1_IPAddressList(ref),
|
||||
"k8s.io/api/networking/v1alpha1.IPAddressSpec": schema_k8sio_api_networking_v1alpha1_IPAddressSpec(ref),
|
||||
"k8s.io/api/networking/v1alpha1.ParentReference": schema_k8sio_api_networking_v1alpha1_ParentReference(ref),
|
||||
"k8s.io/api/networking/v1alpha1.ServiceCIDR": schema_k8sio_api_networking_v1alpha1_ServiceCIDR(ref),
|
||||
"k8s.io/api/networking/v1alpha1.ServiceCIDRList": schema_k8sio_api_networking_v1alpha1_ServiceCIDRList(ref),
|
||||
"k8s.io/api/networking/v1alpha1.ServiceCIDRSpec": schema_k8sio_api_networking_v1alpha1_ServiceCIDRSpec(ref),
|
||||
"k8s.io/api/networking/v1alpha1.ServiceCIDRStatus": schema_k8sio_api_networking_v1alpha1_ServiceCIDRStatus(ref),
|
||||
"k8s.io/api/networking/v1beta1.HTTPIngressPath": schema_k8sio_api_networking_v1beta1_HTTPIngressPath(ref),
|
||||
"k8s.io/api/networking/v1beta1.HTTPIngressRuleValue": schema_k8sio_api_networking_v1beta1_HTTPIngressRuleValue(ref),
|
||||
"k8s.io/api/networking/v1beta1.Ingress": schema_k8sio_api_networking_v1beta1_Ingress(ref),
|
||||
@@ -38366,6 +38370,174 @@ func schema_k8sio_api_networking_v1alpha1_ParentReference(ref common.ReferenceCa
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_networking_v1alpha1_ServiceCIDR(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"apiVersion": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"metadata": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"),
|
||||
},
|
||||
},
|
||||
"spec": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/api/networking/v1alpha1.ServiceCIDRSpec"),
|
||||
},
|
||||
},
|
||||
"status": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/api/networking/v1alpha1.ServiceCIDRStatus"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"k8s.io/api/networking/v1alpha1.ServiceCIDRSpec", "k8s.io/api/networking/v1alpha1.ServiceCIDRStatus", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_networking_v1alpha1_ServiceCIDRList(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "ServiceCIDRList contains a list of ServiceCIDR objects.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"kind": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"apiVersion": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
"metadata": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"),
|
||||
},
|
||||
},
|
||||
"items": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "items is the list of ServiceCIDRs.",
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/api/networking/v1alpha1.ServiceCIDR"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Required: []string{"items"},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"k8s.io/api/networking/v1alpha1.ServiceCIDR", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_networking_v1alpha1_ServiceCIDRSpec(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"cidrs": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.",
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: "",
|
||||
Type: []string{"string"},
|
||||
Format: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_networking_v1alpha1_ServiceCIDRStatus(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "ServiceCIDRStatus describes the current state of the ServiceCIDR.",
|
||||
Type: []string{"object"},
|
||||
Properties: map[string]spec.Schema{
|
||||
"conditions": {
|
||||
VendorExtensible: spec.VendorExtensible{
|
||||
Extensions: spec.Extensions{
|
||||
"x-kubernetes-list-map-keys": []interface{}{
|
||||
"type",
|
||||
},
|
||||
"x-kubernetes-list-type": "map",
|
||||
"x-kubernetes-patch-merge-key": "type",
|
||||
"x-kubernetes-patch-strategy": "merge",
|
||||
},
|
||||
},
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state",
|
||||
Type: []string{"array"},
|
||||
Items: &spec.SchemaOrArray{
|
||||
Schema: &spec.Schema{
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Default: map[string]interface{}{},
|
||||
Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.Condition"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1.Condition"},
|
||||
}
|
||||
}
|
||||
|
||||
func schema_k8sio_api_networking_v1beta1_HTTPIngressPath(ref common.ReferenceCallback) common.OpenAPIDefinition {
|
||||
return common.OpenAPIDefinition{
|
||||
Schema: spec.Schema{
|
||||
|
||||
@@ -73,6 +73,7 @@ func NewStorageFactoryConfig() *StorageFactoryConfig {
|
||||
admissionregistration.Resource("validatingadmissionpolicies").WithVersion("v1beta1"),
|
||||
admissionregistration.Resource("validatingadmissionpolicybindings").WithVersion("v1beta1"),
|
||||
networking.Resource("ipaddresses").WithVersion("v1alpha1"),
|
||||
networking.Resource("servicecidrs").WithVersion("v1alpha1"),
|
||||
certificates.Resource("clustertrustbundles").WithVersion("v1alpha1"),
|
||||
storage.Resource("volumeattributesclasses").WithVersion("v1alpha1"),
|
||||
}
|
||||
|
||||
@@ -654,13 +654,22 @@ func AddHandlers(h printers.PrintHandler) {
|
||||
_ = h.TableHandler(podSchedulingCtxColumnDefinitions, printPodSchedulingContext)
|
||||
_ = h.TableHandler(podSchedulingCtxColumnDefinitions, printPodSchedulingContextList)
|
||||
|
||||
serviceCIDRColumnDefinitions := []metav1.TableColumnDefinition{
|
||||
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
|
||||
{Name: "CIDRs", Type: "string", Description: networkingv1alpha1.ServiceCIDRSpec{}.SwaggerDoc()["cidrs"]},
|
||||
{Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]},
|
||||
}
|
||||
|
||||
_ = h.TableHandler(serviceCIDRColumnDefinitions, printServiceCIDR)
|
||||
_ = h.TableHandler(serviceCIDRColumnDefinitions, printServiceCIDRList)
|
||||
|
||||
ipAddressColumnDefinitions := []metav1.TableColumnDefinition{
|
||||
{Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]},
|
||||
{Name: "ParentRef", Type: "string", Description: networkingv1alpha1.IPAddressSpec{}.SwaggerDoc()["parentRef"]},
|
||||
}
|
||||
|
||||
h.TableHandler(ipAddressColumnDefinitions, printIPAddress)
|
||||
h.TableHandler(ipAddressColumnDefinitions, printIPAddressList)
|
||||
_ = h.TableHandler(ipAddressColumnDefinitions, printIPAddress)
|
||||
_ = h.TableHandler(ipAddressColumnDefinitions, printIPAddressList)
|
||||
}
|
||||
|
||||
// Pass ports=nil for all ports.
|
||||
@@ -2838,6 +2847,28 @@ func printPriorityLevelConfigurationList(list *flowcontrol.PriorityLevelConfigur
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
func printServiceCIDR(obj *networking.ServiceCIDR, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
row := metav1.TableRow{
|
||||
Object: runtime.RawExtension{Object: obj},
|
||||
}
|
||||
|
||||
cidrs := strings.Join(obj.Spec.CIDRs, ",")
|
||||
row.Cells = append(row.Cells, obj.Name, cidrs, translateTimestampSince(obj.CreationTimestamp))
|
||||
return []metav1.TableRow{row}, nil
|
||||
}
|
||||
|
||||
func printServiceCIDRList(list *networking.ServiceCIDRList, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
rows := make([]metav1.TableRow, 0, len(list.Items))
|
||||
for i := range list.Items {
|
||||
r, err := printServiceCIDR(&list.Items[i], options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, r...)
|
||||
}
|
||||
return rows, nil
|
||||
}
|
||||
|
||||
func printIPAddress(obj *networking.IPAddress, options printers.GenerateOptions) ([]metav1.TableRow, error) {
|
||||
row := metav1.TableRow{
|
||||
Object: runtime.RawExtension{Object: obj},
|
||||
|
||||
@@ -6574,3 +6574,120 @@ func TestPrintIPAddressList(t *testing.T) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestPrintServiceCIDR(t *testing.T) {
|
||||
ipv4CIDR := "10.1.0.0/16"
|
||||
ipv6CIDR := "fd00:1:1::/64"
|
||||
|
||||
tests := []struct {
|
||||
ccc networking.ServiceCIDR
|
||||
options printers.GenerateOptions
|
||||
expected []metav1.TableRow
|
||||
}{
|
||||
{
|
||||
// Test name, IPv4 only.
|
||||
ccc: networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test1"},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{ipv4CIDR},
|
||||
},
|
||||
},
|
||||
options: printers.GenerateOptions{},
|
||||
// Columns: Name, IPv4, IPv6, Age.
|
||||
expected: []metav1.TableRow{{Cells: []interface{}{"test1", ipv4CIDR, "<unknown>"}}},
|
||||
},
|
||||
{
|
||||
// Test name, IPv6 only.
|
||||
ccc: networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test5"},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{ipv6CIDR},
|
||||
},
|
||||
},
|
||||
options: printers.GenerateOptions{},
|
||||
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age
|
||||
expected: []metav1.TableRow{{Cells: []interface{}{"test5", ipv6CIDR, "<unknown>"}}},
|
||||
},
|
||||
{
|
||||
// Test name, DualStack.
|
||||
ccc: networking.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "test9"},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{ipv4CIDR, ipv6CIDR},
|
||||
},
|
||||
},
|
||||
options: printers.GenerateOptions{},
|
||||
// Columns: Name, PerNodeHostBits, IPv4, IPv6, Age.
|
||||
expected: []metav1.TableRow{{Cells: []interface{}{"test9", ipv4CIDR + "," + ipv6CIDR, "<unknown>"}}},
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
rows, err := printServiceCIDR(&test.ccc, test.options)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i := range rows {
|
||||
rows[i].Object.Object = nil
|
||||
}
|
||||
if !reflect.DeepEqual(test.expected, rows) {
|
||||
t.Errorf("%d mismatch: %s", i, cmp.Diff(test.expected, rows))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrintServiceCIDRList(t *testing.T) {
|
||||
cccList := networking.ServiceCIDRList{
|
||||
Items: []networking.ServiceCIDR{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "ccc1"},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{"10.1.0.0/16", "fd00:1:1::/64"},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: "ccc2"},
|
||||
Spec: networking.ServiceCIDRSpec{
|
||||
CIDRs: []string{"10.2.0.0/16", "fd00:2:1::/64"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
options printers.GenerateOptions
|
||||
expected []metav1.TableRow
|
||||
}{
|
||||
{
|
||||
// Test name, DualStack with node selector, wide.
|
||||
options: printers.GenerateOptions{Wide: false},
|
||||
expected: []metav1.TableRow{
|
||||
// Columns: Name, IPv4, IPv6, Age.
|
||||
{Cells: []interface{}{"ccc1", "10.1.0.0/16,fd00:1:1::/64", "<unknown>"}},
|
||||
{Cells: []interface{}{"ccc2", "10.2.0.0/16,fd00:2:1::/64", "<unknown>"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Test name, DualStack with node selector, wide.
|
||||
options: printers.GenerateOptions{Wide: true},
|
||||
expected: []metav1.TableRow{
|
||||
// Columns: Name, CIDRs, Age.
|
||||
{Cells: []interface{}{"ccc1", "10.1.0.0/16,fd00:1:1::/64", "<unknown>"}},
|
||||
{Cells: []interface{}{"ccc2", "10.2.0.0/16,fd00:2:1::/64", "<unknown>"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
rows, err := printServiceCIDRList(&cccList, test.options)
|
||||
if err != nil {
|
||||
t.Fatalf("Error printing service list: %#v", err)
|
||||
}
|
||||
for i := range rows {
|
||||
rows[i].Object.Object = nil
|
||||
}
|
||||
if !reflect.DeepEqual(test.expected, rows) {
|
||||
t.Errorf("mismatch: %s", cmp.Diff(test.expected, rows))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,6 +61,7 @@ import (
|
||||
serviceaccountstore "k8s.io/kubernetes/pkg/registry/core/serviceaccount/storage"
|
||||
kubeschedulerconfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
|
||||
"k8s.io/kubernetes/pkg/util/async"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
// Config provides information needed to build RESTStorage for core.
|
||||
@@ -137,9 +138,8 @@ func New(c Config) (*legacyProvider, error) {
|
||||
p.startServiceClusterIPRepair = serviceipallocatorcontroller.NewRepairIPAddress(
|
||||
c.Services.IPRepairInterval,
|
||||
client,
|
||||
&c.Services.ClusterIPRange,
|
||||
&c.Services.SecondaryClusterIPRange,
|
||||
c.Informers.Core().V1().Services(),
|
||||
c.Informers.Networking().V1alpha1().ServiceCIDRs(),
|
||||
c.Informers.Networking().V1alpha1().IPAddresses(),
|
||||
).RunUntil
|
||||
}
|
||||
@@ -351,7 +351,16 @@ func (c *Config) newServiceIPAllocators() (registries rangeRegistries, primaryCl
|
||||
if err != nil {
|
||||
return rangeRegistries{}, nil, nil, nil, err
|
||||
}
|
||||
primaryClusterIPAllocator, err = ipallocator.NewIPAllocator(&serviceClusterIPRange, networkingv1alphaClient, c.Informers.Networking().V1alpha1().IPAddresses())
|
||||
// TODO(aojea) Revisit the initialization of the allocators
|
||||
// since right now it depends on the service-cidr flags and
|
||||
// sets the default IPFamily that may not be coherent with the
|
||||
// existing default ServiceCIDR
|
||||
primaryClusterIPAllocator, err = ipallocator.NewMetaAllocator(
|
||||
networkingv1alphaClient,
|
||||
c.Informers.Networking().V1alpha1().ServiceCIDRs(),
|
||||
c.Informers.Networking().V1alpha1().IPAddresses(),
|
||||
netutils.IsIPv6CIDR(&serviceClusterIPRange),
|
||||
)
|
||||
if err != nil {
|
||||
return rangeRegistries{}, nil, nil, nil, fmt.Errorf("cannot create cluster IP allocator: %v", err)
|
||||
}
|
||||
@@ -382,7 +391,16 @@ func (c *Config) newServiceIPAllocators() (registries rangeRegistries, primaryCl
|
||||
if err != nil {
|
||||
return rangeRegistries{}, nil, nil, nil, err
|
||||
}
|
||||
secondaryClusterIPAllocator, err = ipallocator.NewIPAllocator(&c.Services.SecondaryClusterIPRange, networkingv1alphaClient, c.Informers.Networking().V1alpha1().IPAddresses())
|
||||
// TODO(aojea) Revisit the initialization of the allocators
|
||||
// since right now it depends on the service-cidr flags and
|
||||
// sets the default IPFamily that may not be coherent with the
|
||||
// existing default ServiceCIDR
|
||||
secondaryClusterIPAllocator, err = ipallocator.NewMetaAllocator(
|
||||
networkingv1alphaClient,
|
||||
c.Informers.Networking().V1alpha1().ServiceCIDRs(),
|
||||
c.Informers.Networking().V1alpha1().IPAddresses(),
|
||||
netutils.IsIPv6CIDR(&c.Services.SecondaryClusterIPRange),
|
||||
)
|
||||
if err != nil {
|
||||
return rangeRegistries{}, nil, nil, nil, fmt.Errorf("cannot create cluster secondary IP allocator: %v", err)
|
||||
}
|
||||
|
||||
462
pkg/registry/core/service/ipallocator/cidrallocator.go
Normal file
462
pkg/registry/core/service/ipallocator/cidrallocator.go
Normal file
@@ -0,0 +1,462 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipallocator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
networkingv1alpha1informers "k8s.io/client-go/informers/networking/v1alpha1"
|
||||
networkingv1alpha1client "k8s.io/client-go/kubernetes/typed/networking/v1alpha1"
|
||||
networkingv1alpha1listers "k8s.io/client-go/listers/networking/v1alpha1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog/v2"
|
||||
api "k8s.io/kubernetes/pkg/apis/core"
|
||||
"k8s.io/kubernetes/pkg/util/iptree"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
// MetaAllocator maintains a Tree with the ServiceCIDRs containing an IP Allocator
|
||||
// on the nodes. Since each allocator doesn't stored the IPAddresses because it reads
|
||||
// them from the informer cache, it is cheap to create and delete IP Allocators.
|
||||
// MetaAllocator forwards the request to any of the internal allocators that has free
|
||||
// addresses.
|
||||
|
||||
// MetaAllocator implements current allocator interface using
|
||||
// ServiceCIDR and IPAddress API objects.
|
||||
type MetaAllocator struct {
|
||||
client networkingv1alpha1client.NetworkingV1alpha1Interface
|
||||
serviceCIDRLister networkingv1alpha1listers.ServiceCIDRLister
|
||||
serviceCIDRSynced cache.InformerSynced
|
||||
ipAddressLister networkingv1alpha1listers.IPAddressLister
|
||||
ipAddressSynced cache.InformerSynced
|
||||
ipAddressInformer networkingv1alpha1informers.IPAddressInformer
|
||||
queue workqueue.RateLimitingInterface
|
||||
|
||||
internalStopCh chan struct{}
|
||||
|
||||
muTree sync.Mutex
|
||||
tree *iptree.Tree[*Allocator]
|
||||
|
||||
ipFamily api.IPFamily
|
||||
}
|
||||
|
||||
var _ Interface = &MetaAllocator{}
|
||||
|
||||
// NewMetaAllocator returns an IP allocator that use the IPAddress
|
||||
// and ServiceCIDR objects to track the assigned IP addresses,
|
||||
// using an informer cache as storage.
|
||||
func NewMetaAllocator(
|
||||
client networkingv1alpha1client.NetworkingV1alpha1Interface,
|
||||
serviceCIDRInformer networkingv1alpha1informers.ServiceCIDRInformer,
|
||||
ipAddressInformer networkingv1alpha1informers.IPAddressInformer,
|
||||
isIPv6 bool,
|
||||
) (*MetaAllocator, error) {
|
||||
|
||||
// TODO: make the NewMetaAllocator agnostic of the IP family
|
||||
family := api.IPv4Protocol
|
||||
if isIPv6 {
|
||||
family = api.IPv6Protocol
|
||||
}
|
||||
|
||||
c := &MetaAllocator{
|
||||
client: client,
|
||||
serviceCIDRLister: serviceCIDRInformer.Lister(),
|
||||
serviceCIDRSynced: serviceCIDRInformer.Informer().HasSynced,
|
||||
ipAddressLister: ipAddressInformer.Lister(),
|
||||
ipAddressSynced: ipAddressInformer.Informer().HasSynced,
|
||||
ipAddressInformer: ipAddressInformer,
|
||||
queue: workqueue.NewRateLimitingQueueWithConfig(workqueue.DefaultControllerRateLimiter(), workqueue.RateLimitingQueueConfig{Name: ControllerName}),
|
||||
internalStopCh: make(chan struct{}),
|
||||
tree: iptree.New[*Allocator](),
|
||||
ipFamily: family,
|
||||
}
|
||||
|
||||
_, _ = serviceCIDRInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.addServiceCIDR,
|
||||
UpdateFunc: c.updateServiceCIDR,
|
||||
DeleteFunc: c.deleteServiceCIDR,
|
||||
})
|
||||
|
||||
go c.run()
|
||||
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *MetaAllocator) addServiceCIDR(obj interface{}) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(obj)
|
||||
if err == nil {
|
||||
c.queue.Add(key)
|
||||
}
|
||||
}
|
||||
func (c *MetaAllocator) updateServiceCIDR(old, new interface{}) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(new)
|
||||
if err == nil {
|
||||
c.queue.Add(key)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *MetaAllocator) deleteServiceCIDR(obj interface{}) {
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err == nil {
|
||||
c.queue.Add(key)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *MetaAllocator) run() {
|
||||
defer runtime.HandleCrash()
|
||||
defer c.queue.ShutDown()
|
||||
klog.Info("Starting ServiceCIDR Allocator Controller")
|
||||
defer klog.Info("Stopping ServiceCIDR Allocator Controllerr")
|
||||
|
||||
// Wait for all involved caches to be synced, before processing items from the queue is started
|
||||
if !cache.WaitForCacheSync(c.internalStopCh, c.serviceCIDRSynced, c.ipAddressSynced) {
|
||||
runtime.HandleError(fmt.Errorf("timed out waiting for caches to sync"))
|
||||
return
|
||||
}
|
||||
|
||||
// this is single threaded only one serviceCIDR at a time
|
||||
go wait.Until(c.runWorker, time.Second, c.internalStopCh)
|
||||
|
||||
<-c.internalStopCh
|
||||
}
|
||||
|
||||
func (c *MetaAllocator) runWorker() {
|
||||
for c.processNextItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *MetaAllocator) processNextItem() bool {
|
||||
// Wait until there is a new item in the working queue
|
||||
key, quit := c.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer c.queue.Done(key)
|
||||
|
||||
err := c.syncTree()
|
||||
// Handle the error if something went wrong during the execution of the business logic
|
||||
if err != nil {
|
||||
if c.queue.NumRequeues(key) < 5 {
|
||||
klog.Infof("Error syncing cidr %v: %v", key, err)
|
||||
c.queue.AddRateLimited(key)
|
||||
return true
|
||||
}
|
||||
}
|
||||
c.queue.Forget(key)
|
||||
return true
|
||||
}
|
||||
|
||||
// syncTree syncs the ipTrees from the informer cache
|
||||
// It deletes or creates allocator and sets the corresponding state
|
||||
func (c *MetaAllocator) syncTree() error {
|
||||
now := time.Now()
|
||||
defer func() {
|
||||
klog.Infof("Finished sync for CIDRs took %v", time.Since(now))
|
||||
}()
|
||||
|
||||
serviceCIDRs, err := c.serviceCIDRLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cidrsSet := sets.New[string]()
|
||||
cidrReady := map[string]bool{}
|
||||
for _, serviceCIDR := range serviceCIDRs {
|
||||
ready := true
|
||||
if !isReady(serviceCIDR) || !serviceCIDR.DeletionTimestamp.IsZero() {
|
||||
ready = false
|
||||
}
|
||||
|
||||
for _, cidr := range serviceCIDR.Spec.CIDRs {
|
||||
if c.ipFamily == api.IPFamily(convertToV1IPFamily(netutils.IPFamilyOfCIDRString(cidr))) {
|
||||
cidrsSet.Insert(cidr)
|
||||
cidrReady[cidr] = ready
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// obtain the existing allocators and set the existing state
|
||||
treeSet := sets.New[string]()
|
||||
c.muTree.Lock()
|
||||
c.tree.DepthFirstWalk(c.ipFamily == api.IPv6Protocol, func(k netip.Prefix, v *Allocator) bool {
|
||||
v.ready.Store(cidrReady[k.String()])
|
||||
treeSet.Insert(k.String())
|
||||
return false
|
||||
})
|
||||
c.muTree.Unlock()
|
||||
cidrsToRemove := treeSet.Difference(cidrsSet)
|
||||
cidrsToAdd := cidrsSet.Difference(treeSet)
|
||||
|
||||
errs := []error{}
|
||||
// Add new allocators
|
||||
for _, cidr := range cidrsToAdd.UnsortedList() {
|
||||
_, ipnet, err := netutils.ParseCIDRSloppy(cidr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// New ServiceCIDR, create new allocator
|
||||
allocator, err := NewIPAllocator(ipnet, c.client, c.ipAddressInformer)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
allocator.ready.Store(cidrReady[cidr])
|
||||
prefix, err := netip.ParsePrefix(cidr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.addAllocator(prefix, allocator)
|
||||
klog.Infof("Created ClusterIP allocator for Service CIDR %s", cidr)
|
||||
}
|
||||
// Remove allocators that no longer exist
|
||||
for _, cidr := range cidrsToRemove.UnsortedList() {
|
||||
prefix, err := netip.ParsePrefix(cidr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.deleteAllocator(prefix)
|
||||
}
|
||||
|
||||
return utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func (c *MetaAllocator) getAllocator(ip net.IP) (*Allocator, error) {
|
||||
c.muTree.Lock()
|
||||
defer c.muTree.Unlock()
|
||||
|
||||
address := ipToAddr(ip)
|
||||
prefix := netip.PrefixFrom(address, address.BitLen())
|
||||
// Use the largest subnet to allocate addresses because
|
||||
// all the other subnets will be contained.
|
||||
_, allocator, ok := c.tree.ShortestPrefixMatch(prefix)
|
||||
if !ok {
|
||||
klog.V(2).Infof("Could not get allocator for IP %s", ip.String())
|
||||
return nil, ErrMismatchedNetwork
|
||||
}
|
||||
return allocator, nil
|
||||
}
|
||||
|
||||
func (c *MetaAllocator) addAllocator(cidr netip.Prefix, allocator *Allocator) {
|
||||
c.muTree.Lock()
|
||||
defer c.muTree.Unlock()
|
||||
c.tree.InsertPrefix(cidr, allocator)
|
||||
}
|
||||
|
||||
func (c *MetaAllocator) deleteAllocator(cidr netip.Prefix) {
|
||||
c.muTree.Lock()
|
||||
defer c.muTree.Unlock()
|
||||
ok := c.tree.DeletePrefix(cidr)
|
||||
if ok {
|
||||
klog.V(3).Infof("CIDR %s deleted", cidr)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *MetaAllocator) AllocateService(service *api.Service, ip net.IP) error {
|
||||
allocator, err := c.getAllocator(ip)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return allocator.AllocateService(service, ip)
|
||||
}
|
||||
|
||||
func (c *MetaAllocator) Allocate(ip net.IP) error {
|
||||
allocator, err := c.getAllocator(ip)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return allocator.Allocate(ip)
|
||||
}
|
||||
|
||||
func (c *MetaAllocator) AllocateNextService(service *api.Service) (net.IP, error) {
|
||||
c.muTree.Lock()
|
||||
defer c.muTree.Unlock()
|
||||
|
||||
// TODO(aojea) add strategy to return a random allocator but
|
||||
// taking into consideration the number of addresses of each allocator.
|
||||
// Per example, if we have allocator A and B with 256 and 1024 possible
|
||||
// addresses each, the chances to get B has to be 4 times the chances to
|
||||
// get A so we can spread the load of IPs randomly.
|
||||
// However, we need to validate the best strategy before going to Beta.
|
||||
isIPv6 := c.ipFamily == api.IPFamily(v1.IPv6Protocol)
|
||||
for _, allocator := range c.tree.TopLevelPrefixes(isIPv6) {
|
||||
ip, err := allocator.AllocateNextService(service)
|
||||
if err == nil {
|
||||
return ip, nil
|
||||
}
|
||||
}
|
||||
return nil, ErrFull
|
||||
}
|
||||
|
||||
func (c *MetaAllocator) AllocateNext() (net.IP, error) {
|
||||
c.muTree.Lock()
|
||||
defer c.muTree.Unlock()
|
||||
|
||||
// TODO(aojea) add strategy to return a random allocator but
|
||||
// taking into consideration the number of addresses of each allocator.
|
||||
// Per example, if we have allocator A and B with 256 and 1024 possible
|
||||
// addresses each, the chances to get B has to be 4 times the chances to
|
||||
// get A so we can spread the load of IPs randomly.
|
||||
// However, we need to validate the best strategy before going to Beta.
|
||||
isIPv6 := c.ipFamily == api.IPFamily(v1.IPv6Protocol)
|
||||
for _, allocator := range c.tree.TopLevelPrefixes(isIPv6) {
|
||||
ip, err := allocator.AllocateNext()
|
||||
if err == nil {
|
||||
return ip, nil
|
||||
}
|
||||
}
|
||||
return nil, ErrFull
|
||||
}
|
||||
|
||||
func (c *MetaAllocator) Release(ip net.IP) error {
|
||||
allocator, err := c.getAllocator(ip)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return allocator.Release(ip)
|
||||
|
||||
}
|
||||
func (c *MetaAllocator) ForEach(f func(ip net.IP)) {
|
||||
ipLabelSelector := labels.Set(map[string]string{
|
||||
networkingv1alpha1.LabelIPAddressFamily: string(c.IPFamily()),
|
||||
networkingv1alpha1.LabelManagedBy: ControllerName,
|
||||
}).AsSelectorPreValidated()
|
||||
ips, err := c.ipAddressLister.List(ipLabelSelector)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
for _, ip := range ips {
|
||||
f(netutils.ParseIPSloppy(ip.Name))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *MetaAllocator) CIDR() net.IPNet {
|
||||
return net.IPNet{}
|
||||
|
||||
}
|
||||
func (c *MetaAllocator) IPFamily() api.IPFamily {
|
||||
return c.ipFamily
|
||||
}
|
||||
func (c *MetaAllocator) Has(ip net.IP) bool {
|
||||
allocator, err := c.getAllocator(ip)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return allocator.Has(ip)
|
||||
}
|
||||
func (c *MetaAllocator) Destroy() {
|
||||
select {
|
||||
case <-c.internalStopCh:
|
||||
default:
|
||||
close(c.internalStopCh)
|
||||
}
|
||||
}
|
||||
|
||||
// for testing
|
||||
func (c *MetaAllocator) Used() int {
|
||||
ipLabelSelector := labels.Set(map[string]string{
|
||||
networkingv1alpha1.LabelIPAddressFamily: string(c.IPFamily()),
|
||||
networkingv1alpha1.LabelManagedBy: ControllerName,
|
||||
}).AsSelectorPreValidated()
|
||||
ips, err := c.ipAddressLister.List(ipLabelSelector)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return len(ips)
|
||||
}
|
||||
|
||||
// for testing
|
||||
func (c *MetaAllocator) Free() int {
|
||||
c.muTree.Lock()
|
||||
defer c.muTree.Unlock()
|
||||
|
||||
size := 0
|
||||
isIPv6 := c.ipFamily == api.IPFamily(v1.IPv6Protocol)
|
||||
for _, allocator := range c.tree.TopLevelPrefixes(isIPv6) {
|
||||
size += int(allocator.size)
|
||||
}
|
||||
return size - c.Used()
|
||||
}
|
||||
|
||||
func (c *MetaAllocator) EnableMetrics() {}
|
||||
|
||||
// DryRun returns a random allocator
|
||||
func (c *MetaAllocator) DryRun() Interface {
|
||||
c.muTree.Lock()
|
||||
defer c.muTree.Unlock()
|
||||
isIPv6 := c.ipFamily == api.IPFamily(v1.IPv6Protocol)
|
||||
for _, allocator := range c.tree.TopLevelPrefixes(isIPv6) {
|
||||
return allocator.DryRun()
|
||||
}
|
||||
return &Allocator{}
|
||||
}
|
||||
|
||||
func isReady(serviceCIDR *networkingv1alpha1.ServiceCIDR) bool {
|
||||
if serviceCIDR == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, condition := range serviceCIDR.Status.Conditions {
|
||||
if condition.Type == networkingv1alpha1.ServiceCIDRConditionReady {
|
||||
return condition.Status == metav1.ConditionStatus(metav1.ConditionTrue)
|
||||
}
|
||||
}
|
||||
// assume the ServiceCIDR is Ready, in order to handle scenarios where kcm is not running
|
||||
return true
|
||||
}
|
||||
|
||||
// ipToAddr converts a net.IP to a netip.Addr
|
||||
// if the net.IP is not valid it returns an empty netip.Addr{}
|
||||
func ipToAddr(ip net.IP) netip.Addr {
|
||||
// https://pkg.go.dev/net/netip#AddrFromSlice can return an IPv4 in IPv6 format
|
||||
// so we have to check the IP family to return exactly the format that we want
|
||||
// address, _ := netip.AddrFromSlice(net.ParseIPSloppy(192.168.0.1)) returns
|
||||
// an address like ::ffff:192.168.0.1/32
|
||||
bytes := ip.To4()
|
||||
if bytes == nil {
|
||||
bytes = ip.To16()
|
||||
}
|
||||
// AddrFromSlice returns Addr{}, false if the input is invalid.
|
||||
address, _ := netip.AddrFromSlice(bytes)
|
||||
return address
|
||||
}
|
||||
|
||||
// Convert netutils.IPFamily to v1.IPFamily
|
||||
// TODO: consolidate helpers
|
||||
// copied from pkg/proxy/util/utils.go
|
||||
func convertToV1IPFamily(ipFamily netutils.IPFamily) v1.IPFamily {
|
||||
switch ipFamily {
|
||||
case netutils.IPv4:
|
||||
return v1.IPv4Protocol
|
||||
case netutils.IPv6:
|
||||
return v1.IPv6Protocol
|
||||
}
|
||||
|
||||
return v1.IPFamilyUnknown
|
||||
}
|
||||
489
pkg/registry/core/service/ipallocator/cidrallocator_test.go
Normal file
489
pkg/registry/core/service/ipallocator/cidrallocator_test.go
Normal file
@@ -0,0 +1,489 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package ipallocator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
k8stesting "k8s.io/client-go/testing"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
func newTestMetaAllocator() (*MetaAllocator, error) {
|
||||
client := fake.NewSimpleClientset()
|
||||
|
||||
informerFactory := informers.NewSharedInformerFactory(client, 0*time.Second)
|
||||
serviceCIDRInformer := informerFactory.Networking().V1alpha1().ServiceCIDRs()
|
||||
serviceCIDRStore := serviceCIDRInformer.Informer().GetIndexer()
|
||||
serviceCIDRInformer.Informer().HasSynced()
|
||||
ipInformer := informerFactory.Networking().V1alpha1().IPAddresses()
|
||||
ipStore := ipInformer.Informer().GetIndexer()
|
||||
|
||||
client.PrependReactor("create", "servicecidrs", k8stesting.ReactionFunc(func(action k8stesting.Action) (bool, runtime.Object, error) {
|
||||
cidr := action.(k8stesting.CreateAction).GetObject().(*networkingv1alpha1.ServiceCIDR)
|
||||
_, exists, err := serviceCIDRStore.GetByKey(cidr.Name)
|
||||
if exists && err != nil {
|
||||
return false, nil, fmt.Errorf("cidr already exist")
|
||||
}
|
||||
cidr.Generation = 1
|
||||
err = serviceCIDRStore.Add(cidr)
|
||||
return false, cidr, err
|
||||
}))
|
||||
client.PrependReactor("delete", "servicecidrs", k8stesting.ReactionFunc(func(action k8stesting.Action) (bool, runtime.Object, error) {
|
||||
name := action.(k8stesting.DeleteAction).GetName()
|
||||
obj, exists, err := serviceCIDRStore.GetByKey(name)
|
||||
cidr := &networkingv1alpha1.ServiceCIDR{}
|
||||
if exists && err == nil {
|
||||
cidr = obj.(*networkingv1alpha1.ServiceCIDR)
|
||||
err = serviceCIDRStore.Delete(cidr)
|
||||
}
|
||||
return false, cidr, err
|
||||
}))
|
||||
|
||||
client.PrependReactor("create", "ipaddresses", k8stesting.ReactionFunc(func(action k8stesting.Action) (bool, runtime.Object, error) {
|
||||
ip := action.(k8stesting.CreateAction).GetObject().(*networkingv1alpha1.IPAddress)
|
||||
_, exists, err := ipStore.GetByKey(ip.Name)
|
||||
if exists && err != nil {
|
||||
return false, nil, fmt.Errorf("ip already exist")
|
||||
}
|
||||
ip.Generation = 1
|
||||
err = ipStore.Add(ip)
|
||||
return false, ip, err
|
||||
}))
|
||||
client.PrependReactor("delete", "ipaddresses", k8stesting.ReactionFunc(func(action k8stesting.Action) (bool, runtime.Object, error) {
|
||||
name := action.(k8stesting.DeleteAction).GetName()
|
||||
obj, exists, err := ipStore.GetByKey(name)
|
||||
ip := &networkingv1alpha1.IPAddress{}
|
||||
if exists && err == nil {
|
||||
ip = obj.(*networkingv1alpha1.IPAddress)
|
||||
err = ipStore.Delete(ip)
|
||||
}
|
||||
return false, ip, err
|
||||
}))
|
||||
|
||||
c, err := NewMetaAllocator(client.NetworkingV1alpha1(), serviceCIDRInformer, ipInformer, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// we can not force the state of the informers to be synced without racing
|
||||
// so we run our worker here
|
||||
go wait.Until(c.runWorker, time.Second, c.internalStopCh)
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func TestCIDRAllocateMultiple(t *testing.T) {
|
||||
r, err := newTestMetaAllocator()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer r.Destroy()
|
||||
|
||||
if f := r.Free(); f != 0 {
|
||||
t.Errorf("free: %d", f)
|
||||
}
|
||||
if _, err := r.AllocateNext(); err == nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
cidr := newServiceCIDR("test", "192.168.0.0/28")
|
||||
_, err = r.client.ServiceCIDRs().Create(context.Background(), cidr, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r.addServiceCIDR(cidr)
|
||||
// wait for the cidr to be processed and set the informer synced
|
||||
err = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
allocator, err := r.getAllocator(netutils.ParseIPSloppy("192.168.0.1"))
|
||||
if err != nil {
|
||||
t.Logf("unexpected error %v", err)
|
||||
return false, nil
|
||||
}
|
||||
allocator.ipAddressSynced = func() bool { return true }
|
||||
return allocator.ready.Load(), nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
found := sets.NewString()
|
||||
count := 0
|
||||
for r.Free() > 0 {
|
||||
ip, err := r.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("error @ free: %d count: %d: %v", r.Free(), count, err)
|
||||
}
|
||||
count++
|
||||
if found.Has(ip.String()) {
|
||||
t.Fatalf("allocated %s twice: %d", ip, count)
|
||||
}
|
||||
found.Insert(ip.String())
|
||||
}
|
||||
if count != 14 {
|
||||
t.Fatalf("expected 14 IPs got %d", count)
|
||||
}
|
||||
if _, err := r.AllocateNext(); err == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cidr2 := newServiceCIDR("test2", "10.0.0.0/28")
|
||||
_, err = r.client.ServiceCIDRs().Create(context.Background(), cidr2, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r.addServiceCIDR(cidr2)
|
||||
// wait for the cidr to be processed
|
||||
err = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
allocator, err := r.getAllocator(netutils.ParseIPSloppy("10.0.0.11"))
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
allocator.ipAddressSynced = func() bool { return true }
|
||||
return allocator.ready.Load(), nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// allocate one IP from the new allocator
|
||||
err = r.Allocate(netutils.ParseIPSloppy("10.0.0.11"))
|
||||
if err != nil {
|
||||
t.Fatalf("error allocating IP 10.0.0.11 from new allocator: %v", err)
|
||||
}
|
||||
count++
|
||||
for r.Free() > 0 {
|
||||
ip, err := r.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("error @ free: %d count: %d: %v", r.Free(), count, err)
|
||||
}
|
||||
count++
|
||||
if found.Has(ip.String()) {
|
||||
t.Fatalf("allocated %s twice: %d", ip, count)
|
||||
}
|
||||
found.Insert(ip.String())
|
||||
}
|
||||
if count != 28 {
|
||||
t.Fatalf("expected 28 IPs got %d", count)
|
||||
}
|
||||
if _, err := r.AllocateNext(); err == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCIDRAllocateShadow(t *testing.T) {
|
||||
r, err := newTestMetaAllocator()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer r.Destroy()
|
||||
|
||||
if f := r.Free(); f != 0 {
|
||||
t.Errorf("free: %d", f)
|
||||
}
|
||||
if _, err := r.AllocateNext(); err == nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
cidr := newServiceCIDR("test", "192.168.1.0/24")
|
||||
_, err = r.client.ServiceCIDRs().Create(context.Background(), cidr, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r.addServiceCIDR(cidr)
|
||||
// wait for the cidr to be processed and set the informer synced
|
||||
err = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
allocator, err := r.getAllocator(netutils.ParseIPSloppy("192.168.1.0"))
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
allocator.ipAddressSynced = func() bool { return true }
|
||||
return allocator.ready.Load(), nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// allocate one IP from the new allocator
|
||||
err = r.Allocate(netutils.ParseIPSloppy("192.168.1.0"))
|
||||
if err == nil {
|
||||
t.Fatalf("unexpected allocation for IP 192.168.1.0")
|
||||
}
|
||||
|
||||
if f := r.Used(); f != 0 {
|
||||
t.Errorf("used: %d", f)
|
||||
}
|
||||
|
||||
cidr2 := newServiceCIDR("test2", "192.168.0.0/16")
|
||||
_, err = r.client.ServiceCIDRs().Create(context.Background(), cidr2, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r.addServiceCIDR(cidr2)
|
||||
// wait for the cidr to be processed
|
||||
err = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
allocator, err := r.getAllocator(netutils.ParseIPSloppy("192.168.0.0"))
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
allocator.ipAddressSynced = func() bool { return true }
|
||||
return allocator.ready.Load(), nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// allocate one IP from the new allocator
|
||||
err = r.Allocate(netutils.ParseIPSloppy("192.168.1.0"))
|
||||
if err != nil {
|
||||
t.Fatalf("error allocating IP 192.168.1.0 from new allocator: %v", err)
|
||||
}
|
||||
|
||||
if f := r.Used(); f != 1 {
|
||||
t.Errorf("used: %d", f)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCIDRAllocateGrow(t *testing.T) {
|
||||
r, err := newTestMetaAllocator()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer r.Destroy()
|
||||
|
||||
if f := r.Free(); f != 0 {
|
||||
t.Errorf("free: %d", f)
|
||||
}
|
||||
if _, err := r.AllocateNext(); err == nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
cidr := newServiceCIDR("test", "192.168.0.0/28")
|
||||
_, err = r.client.ServiceCIDRs().Create(context.Background(), cidr, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r.addServiceCIDR(cidr)
|
||||
// wait for the cidr to be processed
|
||||
err = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
allocator, err := r.getAllocator(netutils.ParseIPSloppy("192.168.0.1"))
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
allocator.ipAddressSynced = func() bool { return true }
|
||||
return allocator.ready.Load(), nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
found := sets.NewString()
|
||||
count := 0
|
||||
for r.Free() > 0 {
|
||||
ip, err := r.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("error @ free: %d count: %d: %v", r.Free(), count, err)
|
||||
}
|
||||
count++
|
||||
if found.Has(ip.String()) {
|
||||
t.Fatalf("allocated %s twice: %d", ip, count)
|
||||
}
|
||||
found.Insert(ip.String())
|
||||
}
|
||||
if count != 14 {
|
||||
t.Fatalf("expected 14 IPs got %d", count)
|
||||
}
|
||||
if _, err := r.AllocateNext(); err == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cidr2 := newServiceCIDR("test2", "192.168.0.0/24")
|
||||
_, err = r.client.ServiceCIDRs().Create(context.Background(), cidr2, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r.addServiceCIDR(cidr2)
|
||||
// wait for the cidr to be processed
|
||||
err = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
allocator, err := r.getAllocator(netutils.ParseIPSloppy("192.168.0.253"))
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
allocator.ipAddressSynced = func() bool { return true }
|
||||
return allocator.ready.Load(), nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for r.Free() > 0 {
|
||||
ip, err := r.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("error @ free: %d count: %d: %v", r.Free(), count, err)
|
||||
}
|
||||
count++
|
||||
if found.Has(ip.String()) {
|
||||
t.Fatalf("allocated %s twice: %d", ip, count)
|
||||
}
|
||||
found.Insert(ip.String())
|
||||
}
|
||||
if count != 254 {
|
||||
t.Fatalf("expected 254 IPs got %d", count)
|
||||
}
|
||||
if _, err := r.AllocateNext(); err == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestCIDRAllocateShrink(t *testing.T) {
|
||||
r, err := newTestMetaAllocator()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer r.Destroy()
|
||||
|
||||
if f := r.Free(); f != 0 {
|
||||
t.Errorf("free: %d", f)
|
||||
}
|
||||
if _, err := r.AllocateNext(); err == nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
cidr := newServiceCIDR("test", "192.168.0.0/24")
|
||||
_, err = r.client.ServiceCIDRs().Create(context.Background(), cidr, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r.addServiceCIDR(cidr)
|
||||
// wait for the cidr to be processed
|
||||
err = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
allocator, err := r.getAllocator(netutils.ParseIPSloppy("192.168.0.1"))
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
allocator.ipAddressSynced = func() bool { return true }
|
||||
return allocator.ready.Load(), nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
found := sets.NewString()
|
||||
count := 0
|
||||
for r.Free() > 0 {
|
||||
ip, err := r.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("error @ free: %d count: %d: %v", r.Free(), count, err)
|
||||
}
|
||||
count++
|
||||
if found.Has(ip.String()) {
|
||||
t.Fatalf("allocated %s twice: %d", ip, count)
|
||||
}
|
||||
found.Insert(ip.String())
|
||||
}
|
||||
if count != 254 {
|
||||
t.Fatalf("expected 254 IPs got %d", count)
|
||||
}
|
||||
if _, err := r.AllocateNext(); err == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, ip := range found.List() {
|
||||
err = r.Release(netutils.ParseIPSloppy(ip))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error releasing ip %s", err)
|
||||
}
|
||||
}
|
||||
if r.Used() > 0 {
|
||||
t.Fatalf("expected allocator to be empty, got %d", r.Free())
|
||||
}
|
||||
cidr2 := newServiceCIDR("cidr2", "192.168.0.0/28")
|
||||
_, err = r.client.ServiceCIDRs().Create(context.Background(), cidr2, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r.addServiceCIDR(cidr2)
|
||||
err = r.client.ServiceCIDRs().Delete(context.Background(), cidr.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
r.deleteServiceCIDR(cidr)
|
||||
|
||||
// wait for the cidr to be processed (delete ServiceCIDR)
|
||||
err = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
_, err := r.getAllocator(netutils.ParseIPSloppy("192.168.0.253"))
|
||||
if err != nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// wait for the cidr to be processed (create ServiceCIDR)
|
||||
err = wait.PollUntilContextTimeout(context.Background(), 100*time.Millisecond, 5*time.Second, true, func(ctx context.Context) (bool, error) {
|
||||
allocator, err := r.getAllocator(netutils.ParseIPSloppy("192.168.0.1"))
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
allocator.ipAddressSynced = func() bool { return true }
|
||||
return allocator.ready.Load(), nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
count = 0
|
||||
for r.Free() > 0 {
|
||||
_, err := r.AllocateNext()
|
||||
if err != nil {
|
||||
t.Fatalf("error @ free: %d count: %d: %v", r.Free(), count, err)
|
||||
}
|
||||
count++
|
||||
}
|
||||
if count != 14 {
|
||||
t.Fatalf("expected 14 IPs got %d", count)
|
||||
}
|
||||
if _, err := r.AllocateNext(); err == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// TODO: add IPv6 and dual stack test cases
|
||||
func newServiceCIDR(name, cidr string) *networkingv1alpha1.ServiceCIDR {
|
||||
return &networkingv1alpha1.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: networkingv1alpha1.ServiceCIDRSpec{
|
||||
CIDRs: []string{cidr},
|
||||
},
|
||||
Status: networkingv1alpha1.ServiceCIDRStatus{
|
||||
Conditions: []metav1.Condition{
|
||||
{
|
||||
Type: string(networkingv1alpha1.ServiceCIDRConditionReady),
|
||||
Status: metav1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -20,6 +20,8 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -42,6 +44,7 @@ import (
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
|
||||
"k8s.io/kubernetes/pkg/util/iptree"
|
||||
"k8s.io/utils/clock"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
@@ -88,18 +91,23 @@ type RepairIPAddress struct {
|
||||
client kubernetes.Interface
|
||||
interval time.Duration
|
||||
|
||||
networkByFamily map[netutils.IPFamily]*net.IPNet // networks we operate on, by their family
|
||||
|
||||
serviceLister corelisters.ServiceLister
|
||||
servicesSynced cache.InformerSynced
|
||||
|
||||
serviceCIDRLister networkinglisters.ServiceCIDRLister
|
||||
serviceCIDRSynced cache.InformerSynced
|
||||
|
||||
ipAddressLister networkinglisters.IPAddressLister
|
||||
ipAddressSynced cache.InformerSynced
|
||||
|
||||
cidrQueue workqueue.RateLimitingInterface
|
||||
svcQueue workqueue.RateLimitingInterface
|
||||
ipQueue workqueue.RateLimitingInterface
|
||||
workerLoopPeriod time.Duration
|
||||
|
||||
muTree sync.Mutex
|
||||
tree *iptree.Tree[string]
|
||||
|
||||
broadcaster events.EventBroadcaster
|
||||
recorder events.EventRecorder
|
||||
clock clock.Clock
|
||||
@@ -109,38 +117,32 @@ type RepairIPAddress struct {
|
||||
// and generates informational warnings for a cluster that is not in sync.
|
||||
func NewRepairIPAddress(interval time.Duration,
|
||||
client kubernetes.Interface,
|
||||
network *net.IPNet,
|
||||
secondaryNetwork *net.IPNet,
|
||||
serviceInformer coreinformers.ServiceInformer,
|
||||
serviceCIDRInformer networkinginformers.ServiceCIDRInformer,
|
||||
ipAddressInformer networkinginformers.IPAddressInformer) *RepairIPAddress {
|
||||
eventBroadcaster := events.NewBroadcaster(&events.EventSinkImpl{Interface: client.EventsV1()})
|
||||
recorder := eventBroadcaster.NewRecorder(legacyscheme.Scheme, "ipallocator-repair-controller")
|
||||
|
||||
networkByFamily := make(map[netutils.IPFamily]*net.IPNet)
|
||||
primary := netutils.IPFamilyOfCIDR(network)
|
||||
networkByFamily[primary] = network
|
||||
if secondaryNetwork != nil {
|
||||
secondary := netutils.IPFamilyOfCIDR(secondaryNetwork)
|
||||
networkByFamily[secondary] = secondaryNetwork
|
||||
}
|
||||
|
||||
r := &RepairIPAddress{
|
||||
interval: interval,
|
||||
client: client,
|
||||
networkByFamily: networkByFamily,
|
||||
serviceLister: serviceInformer.Lister(),
|
||||
servicesSynced: serviceInformer.Informer().HasSynced,
|
||||
ipAddressLister: ipAddressInformer.Lister(),
|
||||
ipAddressSynced: ipAddressInformer.Informer().HasSynced,
|
||||
svcQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "services"),
|
||||
ipQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ipaddresses"),
|
||||
workerLoopPeriod: time.Second,
|
||||
broadcaster: eventBroadcaster,
|
||||
recorder: recorder,
|
||||
clock: clock.RealClock{},
|
||||
interval: interval,
|
||||
client: client,
|
||||
serviceLister: serviceInformer.Lister(),
|
||||
servicesSynced: serviceInformer.Informer().HasSynced,
|
||||
serviceCIDRLister: serviceCIDRInformer.Lister(),
|
||||
serviceCIDRSynced: serviceCIDRInformer.Informer().HasSynced,
|
||||
ipAddressLister: ipAddressInformer.Lister(),
|
||||
ipAddressSynced: ipAddressInformer.Informer().HasSynced,
|
||||
cidrQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "servicecidrs"),
|
||||
svcQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "services"),
|
||||
ipQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ipaddresses"),
|
||||
tree: iptree.New[string](),
|
||||
workerLoopPeriod: time.Second,
|
||||
broadcaster: eventBroadcaster,
|
||||
recorder: recorder,
|
||||
clock: clock.RealClock{},
|
||||
}
|
||||
|
||||
serviceInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{
|
||||
_, _ = serviceInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(obj)
|
||||
if err == nil {
|
||||
@@ -163,6 +165,29 @@ func NewRepairIPAddress(interval time.Duration,
|
||||
},
|
||||
}, interval)
|
||||
|
||||
_, _ = serviceCIDRInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(obj)
|
||||
if err == nil {
|
||||
r.cidrQueue.Add(key)
|
||||
}
|
||||
},
|
||||
UpdateFunc: func(old interface{}, new interface{}) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(new)
|
||||
if err == nil {
|
||||
r.cidrQueue.Add(key)
|
||||
}
|
||||
},
|
||||
DeleteFunc: func(obj interface{}) {
|
||||
// IndexerInformer uses a delta queue, therefore for deletes we have to use this
|
||||
// key function.
|
||||
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj)
|
||||
if err == nil {
|
||||
r.cidrQueue.Add(key)
|
||||
}
|
||||
},
|
||||
})
|
||||
|
||||
ipAddressInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(obj)
|
||||
@@ -191,6 +216,7 @@ func NewRepairIPAddress(interval time.Duration,
|
||||
|
||||
// RunUntil starts the controller until the provided ch is closed.
|
||||
func (r *RepairIPAddress) RunUntil(onFirstSuccess func(), stopCh chan struct{}) {
|
||||
defer r.cidrQueue.ShutDown()
|
||||
defer r.ipQueue.ShutDown()
|
||||
defer r.svcQueue.ShutDown()
|
||||
r.broadcaster.StartRecordingToSink(stopCh)
|
||||
@@ -199,7 +225,7 @@ func (r *RepairIPAddress) RunUntil(onFirstSuccess func(), stopCh chan struct{})
|
||||
klog.Info("Starting ipallocator-repair-controller")
|
||||
defer klog.Info("Shutting down ipallocator-repair-controller")
|
||||
|
||||
if !cache.WaitForNamedCacheSync("ipallocator-repair-controller", stopCh, r.ipAddressSynced, r.servicesSynced) {
|
||||
if !cache.WaitForNamedCacheSync("ipallocator-repair-controller", stopCh, r.ipAddressSynced, r.servicesSynced, r.serviceCIDRSynced) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -212,6 +238,9 @@ func (r *RepairIPAddress) RunUntil(onFirstSuccess func(), stopCh chan struct{})
|
||||
}
|
||||
onFirstSuccess()
|
||||
|
||||
// serialize the operations on ServiceCIDRs
|
||||
go wait.Until(r.cidrWorker, r.workerLoopPeriod, stopCh)
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(r.ipWorker, r.workerLoopPeriod, stopCh)
|
||||
go wait.Until(r.svcWorker, r.workerLoopPeriod, stopCh)
|
||||
@@ -330,20 +359,16 @@ func (r *RepairIPAddress) syncService(key string) error {
|
||||
runtime.HandleError(fmt.Errorf("the ClusterIP %s for Service %s/%s is not a valid IP; please recreate Service", ip, svc.Namespace, svc.Name))
|
||||
continue
|
||||
}
|
||||
// TODO(aojea) Refactor to abstract the IPs checks
|
||||
family := getFamilyByIP(ip)
|
||||
|
||||
family := netutils.IPFamilyOf(ip)
|
||||
v1Family := getFamilyByIP(ip)
|
||||
network, ok := r.networkByFamily[family]
|
||||
if !ok {
|
||||
// this service is using an IPFamily no longer configured on cluster
|
||||
r.recorder.Eventf(svc, nil, v1.EventTypeWarning, "ClusterIPNotValid", "ClusterIPValidation", "Cluster IP %s(%s) is of ip family that is no longer configured on cluster; please recreate Service", ip, v1Family)
|
||||
runtime.HandleError(fmt.Errorf("the ClusterIP [%v]: %s for Service %s/%s is of ip family that is no longer configured on cluster; please recreate Service", v1Family, ip, svc.Namespace, svc.Name))
|
||||
continue
|
||||
}
|
||||
if !network.Contains(ip) {
|
||||
r.muTree.Lock()
|
||||
prefixes := r.tree.GetHostIPPrefixMatches(ipToAddr(ip))
|
||||
r.muTree.Unlock()
|
||||
if len(prefixes) == 0 {
|
||||
// ClusterIP is out of range
|
||||
r.recorder.Eventf(svc, nil, v1.EventTypeWarning, "ClusterIPOutOfRange", "ClusterIPAllocation", "Cluster IP [%v]: %s is not within the configured Service CIDR %s; please recreate service", v1Family, ip, network.String())
|
||||
runtime.HandleError(fmt.Errorf("the ClusterIP [%v]: %s for Service %s/%s is not within the service CIDR %s; please recreate", v1Family, ip, svc.Namespace, svc.Name, network.String()))
|
||||
r.recorder.Eventf(svc, nil, v1.EventTypeWarning, "ClusterIPOutOfRange", "ClusterIPAllocation", "Cluster IP [%v]: %s is not within any configured Service CIDR; please recreate service", family, ip)
|
||||
runtime.HandleError(fmt.Errorf("the ClusterIP [%v]: %s for Service %s/%s is not within any service CIDR; please recreate", family, ip, svc.Namespace, svc.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -351,8 +376,8 @@ func (r *RepairIPAddress) syncService(key string) error {
|
||||
ipAddress, err := r.ipAddressLister.Get(ip.String())
|
||||
if apierrors.IsNotFound(err) {
|
||||
// ClusterIP doesn't seem to be allocated, create it.
|
||||
r.recorder.Eventf(svc, nil, v1.EventTypeWarning, "ClusterIPNotAllocated", "ClusterIPAllocation", "Cluster IP [%v]: %s is not allocated; repairing", v1Family, ip)
|
||||
runtime.HandleError(fmt.Errorf("the ClusterIP [%v]: %s for Service %s/%s is not allocated; repairing", v1Family, ip, svc.Namespace, svc.Name))
|
||||
r.recorder.Eventf(svc, nil, v1.EventTypeWarning, "ClusterIPNotAllocated", "ClusterIPAllocation", "Cluster IP [%v]: %s is not allocated; repairing", family, ip)
|
||||
runtime.HandleError(fmt.Errorf("the ClusterIP [%v]: %s for Service %s/%s is not allocated; repairing", family, ip, svc.Namespace, svc.Name))
|
||||
_, err := r.client.NetworkingV1alpha1().IPAddresses().Create(context.Background(), newIPAddress(ip.String(), svc), metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -360,14 +385,14 @@ func (r *RepairIPAddress) syncService(key string) error {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
r.recorder.Eventf(svc, nil, v1.EventTypeWarning, "UnknownError", "ClusterIPAllocation", "Unable to allocate ClusterIP [%v]: %s due to an unknown error", v1Family, ip)
|
||||
return fmt.Errorf("unable to allocate ClusterIP [%v]: %s for Service %s/%s due to an unknown error, will retry later: %v", v1Family, ip, svc.Namespace, svc.Name, err)
|
||||
r.recorder.Eventf(svc, nil, v1.EventTypeWarning, "UnknownError", "ClusterIPAllocation", "Unable to allocate ClusterIP [%v]: %s due to an unknown error", family, ip)
|
||||
return fmt.Errorf("unable to allocate ClusterIP [%v]: %s for Service %s/%s due to an unknown error, will retry later: %v", family, ip, svc.Namespace, svc.Name, err)
|
||||
}
|
||||
|
||||
// IPAddress that belongs to a Service must reference a Service
|
||||
if ipAddress.Spec.ParentRef.Group != "" ||
|
||||
ipAddress.Spec.ParentRef.Resource != "services" {
|
||||
r.recorder.Eventf(svc, nil, v1.EventTypeWarning, "ClusterIPNotAllocated", "ClusterIPAllocation", "the ClusterIP [%v]: %s for Service %s/%s has a wrong reference; repairing", v1Family, ip, svc.Namespace, svc.Name)
|
||||
r.recorder.Eventf(svc, nil, v1.EventTypeWarning, "ClusterIPNotAllocated", "ClusterIPAllocation", "the ClusterIP [%v]: %s for Service %s/%s has a wrong reference; repairing", family, ip, svc.Namespace, svc.Name)
|
||||
if err := r.recreateIPAddress(ipAddress.Name, svc); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -381,7 +406,7 @@ func (r *RepairIPAddress) syncService(key string) error {
|
||||
// it will keep deleting and recreating the same IPAddress changing the reference
|
||||
refService, err := r.serviceLister.Services(ipAddress.Spec.ParentRef.Namespace).Get(ipAddress.Spec.ParentRef.Name)
|
||||
if err != nil {
|
||||
r.recorder.Eventf(svc, nil, v1.EventTypeWarning, "ClusterIPNotAllocated", "ClusterIPAllocation", "the ClusterIP [%v]: %s for Service %s/%s has a wrong reference; repairing", v1Family, ip, svc.Namespace, svc.Name)
|
||||
r.recorder.Eventf(svc, nil, v1.EventTypeWarning, "ClusterIPNotAllocated", "ClusterIPAllocation", "the ClusterIP [%v]: %s for Service %s/%s has a wrong reference; repairing", family, ip, svc.Namespace, svc.Name)
|
||||
if err := r.recreateIPAddress(ipAddress.Name, svc); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -472,8 +497,7 @@ func (r *RepairIPAddress) syncIPAddress(key string) error {
|
||||
}
|
||||
|
||||
// does not reference a Service but created by the service allocator, something else have changed it, delete it
|
||||
if ipAddress.Spec.ParentRef.Group != "" ||
|
||||
ipAddress.Spec.ParentRef.Resource != "services" {
|
||||
if ipAddress.Spec.ParentRef.Group != "" || ipAddress.Spec.ParentRef.Resource != "services" {
|
||||
runtime.HandleError(fmt.Errorf("IPAddress %s appears to have been modified, not referencing a Service %v: cleaning up", ipAddress.Name, ipAddress.Spec.ParentRef))
|
||||
r.recorder.Eventf(ipAddress, nil, v1.EventTypeWarning, "IPAddressNotAllocated", "IPAddressAllocation", "IPAddress %s appears to have been modified, not referencing a Service %v: cleaning up", ipAddress.Name, ipAddress.Spec.ParentRef)
|
||||
err := r.client.NetworkingV1alpha1().IPAddresses().Delete(context.Background(), ipAddress.Name, metav1.DeleteOptions{})
|
||||
@@ -523,6 +547,63 @@ func (r *RepairIPAddress) syncIPAddress(key string) error {
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (r *RepairIPAddress) cidrWorker() {
|
||||
for r.processNextWorkCIDR() {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *RepairIPAddress) processNextWorkCIDR() bool {
|
||||
eKey, quit := r.cidrQueue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer r.cidrQueue.Done(eKey)
|
||||
|
||||
err := r.syncCIDRs()
|
||||
r.handleCIDRErr(err, eKey)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (r *RepairIPAddress) handleCIDRErr(err error, key interface{}) {
|
||||
if err == nil {
|
||||
r.cidrQueue.Forget(key)
|
||||
return
|
||||
}
|
||||
|
||||
if r.cidrQueue.NumRequeues(key) < maxRetries {
|
||||
klog.V(2).InfoS("Error syncing ServiceCIDR, retrying", "serviceCIDR", key, "err", err)
|
||||
r.cidrQueue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
|
||||
klog.Warningf("Dropping ServiceCIDR %q out of the queue: %v", key, err)
|
||||
r.cidrQueue.Forget(key)
|
||||
runtime.HandleError(err)
|
||||
}
|
||||
|
||||
// syncCIDRs rebuilds the radix tree based from the informers cache
|
||||
func (r *RepairIPAddress) syncCIDRs() error {
|
||||
serviceCIDRList, err := r.serviceCIDRLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
tree := iptree.New[string]()
|
||||
for _, serviceCIDR := range serviceCIDRList {
|
||||
for _, cidr := range serviceCIDR.Spec.CIDRs {
|
||||
if prefix, err := netip.ParsePrefix(cidr); err == nil { // it can not fail since is already validated
|
||||
tree.InsertPrefix(prefix, serviceCIDR.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
r.muTree.Lock()
|
||||
defer r.muTree.Unlock()
|
||||
r.tree = tree
|
||||
return nil
|
||||
}
|
||||
|
||||
func newIPAddress(name string, svc *v1.Service) *networkingv1alpha1.IPAddress {
|
||||
family := string(v1.IPv4Protocol)
|
||||
if netutils.IsIPv6String(name) {
|
||||
@@ -587,3 +668,20 @@ func verifyIPAddressLabels(ip *networkingv1alpha1.IPAddress) bool {
|
||||
}
|
||||
return managedByController(ip)
|
||||
}
|
||||
|
||||
// TODO(aojea) move to utils, already in pkg/registry/core/service/ipallocator/cidrallocator.go
|
||||
// ipToAddr converts a net.IP to a netip.Addr
|
||||
// if the net.IP is not valid it returns an empty netip.Addr{}
|
||||
func ipToAddr(ip net.IP) netip.Addr {
|
||||
// https://pkg.go.dev/net/netip#AddrFromSlice can return an IPv4 in IPv6 format
|
||||
// so we have to check the IP family to return exactly the format that we want
|
||||
// address, _ := netip.AddrFromSlice(net.ParseIPSloppy(192.168.0.1)) returns
|
||||
// an address like ::ffff:192.168.0.1/32
|
||||
bytes := ip.To4()
|
||||
if bytes == nil {
|
||||
bytes = ip.To16()
|
||||
}
|
||||
// AddrFromSlice returns Addr{}, false if the input is invalid.
|
||||
address, _ := netip.AddrFromSlice(bytes)
|
||||
return address
|
||||
}
|
||||
|
||||
@@ -33,7 +33,6 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/events"
|
||||
"k8s.io/kubernetes/pkg/registry/core/service/ipallocator"
|
||||
netutils "k8s.io/utils/net"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -43,8 +42,9 @@ var (
|
||||
|
||||
type fakeRepair struct {
|
||||
*RepairIPAddress
|
||||
serviceStore cache.Store
|
||||
ipAddressStore cache.Store
|
||||
serviceStore cache.Store
|
||||
ipAddressStore cache.Store
|
||||
serviceCIDRStore cache.Store
|
||||
}
|
||||
|
||||
func newFakeRepair() (*fake.Clientset, *fakeRepair) {
|
||||
@@ -54,6 +54,9 @@ func newFakeRepair() (*fake.Clientset, *fakeRepair) {
|
||||
serviceInformer := informerFactory.Core().V1().Services()
|
||||
serviceIndexer := serviceInformer.Informer().GetIndexer()
|
||||
|
||||
serviceCIDRInformer := informerFactory.Networking().V1alpha1().ServiceCIDRs()
|
||||
serviceCIDRIndexer := serviceCIDRInformer.Informer().GetIndexer()
|
||||
|
||||
ipInformer := informerFactory.Networking().V1alpha1().IPAddresses()
|
||||
ipIndexer := ipInformer.Informer().GetIndexer()
|
||||
|
||||
@@ -72,22 +75,13 @@ func newFakeRepair() (*fake.Clientset, *fakeRepair) {
|
||||
return false, &networkingv1alpha1.IPAddress{}, err
|
||||
}))
|
||||
|
||||
_, primary, err := netutils.ParseCIDRSloppy(serviceCIDRv4)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
_, secondary, err := netutils.ParseCIDRSloppy(serviceCIDRv6)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
r := NewRepairIPAddress(0*time.Second,
|
||||
fakeClient,
|
||||
primary,
|
||||
secondary,
|
||||
serviceInformer,
|
||||
serviceCIDRInformer,
|
||||
ipInformer,
|
||||
)
|
||||
return fakeClient, &fakeRepair{r, serviceIndexer, ipIndexer}
|
||||
return fakeClient, &fakeRepair{r, serviceIndexer, ipIndexer, serviceCIDRIndexer}
|
||||
}
|
||||
|
||||
func TestRepairServiceIP(t *testing.T) {
|
||||
@@ -95,6 +89,7 @@ func TestRepairServiceIP(t *testing.T) {
|
||||
name string
|
||||
svcs []*v1.Service
|
||||
ipAddresses []*networkingv1alpha1.IPAddress
|
||||
cidrs []*networkingv1alpha1.ServiceCIDR
|
||||
expectedIPs []string
|
||||
actions [][]string // verb and resource
|
||||
events []string
|
||||
@@ -105,6 +100,9 @@ func TestRepairServiceIP(t *testing.T) {
|
||||
ipAddresses: []*networkingv1alpha1.IPAddress{
|
||||
newIPAddress("10.0.1.1", newService("test-svc", []string{"10.0.1.1"})),
|
||||
},
|
||||
cidrs: []*networkingv1alpha1.ServiceCIDR{
|
||||
newServiceCIDR("kubernetes", serviceCIDRv4, serviceCIDRv6),
|
||||
},
|
||||
expectedIPs: []string{"10.0.1.1"},
|
||||
actions: [][]string{},
|
||||
events: []string{},
|
||||
@@ -116,21 +114,45 @@ func TestRepairServiceIP(t *testing.T) {
|
||||
newIPAddress("10.0.1.1", newService("test-svc", []string{"10.0.1.1"})),
|
||||
newIPAddress("2001:db8::10", newService("test-svc", []string{"2001:db8::10"})),
|
||||
},
|
||||
cidrs: []*networkingv1alpha1.ServiceCIDR{
|
||||
newServiceCIDR("kubernetes", serviceCIDRv4, serviceCIDRv6),
|
||||
},
|
||||
expectedIPs: []string{"10.0.1.1", "2001:db8::10"},
|
||||
actions: [][]string{},
|
||||
events: []string{},
|
||||
},
|
||||
{
|
||||
name: "no changes needed dual stack multiple cidrs",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"192.168.0.1", "2001:db8:a:b::10"})},
|
||||
ipAddresses: []*networkingv1alpha1.IPAddress{
|
||||
newIPAddress("192.168.0.1", newService("test-svc", []string{"192.168.0.1"})),
|
||||
newIPAddress("2001:db8:a:b::10", newService("test-svc", []string{"2001:db8:a:b::10"})),
|
||||
},
|
||||
cidrs: []*networkingv1alpha1.ServiceCIDR{
|
||||
newServiceCIDR("kubernetes", serviceCIDRv4, serviceCIDRv6),
|
||||
newServiceCIDR("custom", "192.168.0.0/24", "2001:db8:a:b::/64"),
|
||||
},
|
||||
expectedIPs: []string{"192.168.0.1", "2001:db8:a:b::10"},
|
||||
actions: [][]string{},
|
||||
events: []string{},
|
||||
},
|
||||
// these two cases simulate migrating from bitmaps to IPAddress objects
|
||||
{
|
||||
name: "create IPAddress single stack",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"10.0.1.1"})},
|
||||
name: "create IPAddress single stack",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"10.0.1.1"})},
|
||||
cidrs: []*networkingv1alpha1.ServiceCIDR{
|
||||
newServiceCIDR("kubernetes", serviceCIDRv4, serviceCIDRv6),
|
||||
},
|
||||
expectedIPs: []string{"10.0.1.1"},
|
||||
actions: [][]string{{"create", "ipaddresses"}},
|
||||
events: []string{"Warning ClusterIPNotAllocated Cluster IP [IPv4]: 10.0.1.1 is not allocated; repairing"},
|
||||
},
|
||||
{
|
||||
name: "create IPAddresses dual stack",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"10.0.1.1", "2001:db8::10"})},
|
||||
name: "create IPAddresses dual stack",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"10.0.1.1", "2001:db8::10"})},
|
||||
cidrs: []*networkingv1alpha1.ServiceCIDR{
|
||||
newServiceCIDR("kubernetes", serviceCIDRv4, serviceCIDRv6),
|
||||
},
|
||||
expectedIPs: []string{"10.0.1.1", "2001:db8::10"},
|
||||
actions: [][]string{{"create", "ipaddresses"}, {"create", "ipaddresses"}},
|
||||
events: []string{
|
||||
@@ -138,12 +160,26 @@ func TestRepairServiceIP(t *testing.T) {
|
||||
"Warning ClusterIPNotAllocated Cluster IP [IPv6]: 2001:db8::10 is not allocated; repairing",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "create IPAddress single stack from secondary",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"192.168.1.1"})},
|
||||
cidrs: []*networkingv1alpha1.ServiceCIDR{
|
||||
newServiceCIDR("kubernetes", serviceCIDRv4, serviceCIDRv6),
|
||||
newServiceCIDR("custom", "192.168.1.0/24", ""),
|
||||
},
|
||||
expectedIPs: []string{"192.168.1.1"},
|
||||
actions: [][]string{{"create", "ipaddresses"}},
|
||||
events: []string{"Warning ClusterIPNotAllocated Cluster IP [IPv4]: 192.168.1.1 is not allocated; repairing"},
|
||||
},
|
||||
{
|
||||
name: "reconcile IPAddress single stack wrong reference",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"10.0.1.1"})},
|
||||
ipAddresses: []*networkingv1alpha1.IPAddress{
|
||||
newIPAddress("10.0.1.1", newService("test-svc2", []string{"10.0.1.1"})),
|
||||
},
|
||||
cidrs: []*networkingv1alpha1.ServiceCIDR{
|
||||
newServiceCIDR("kubernetes", serviceCIDRv4, serviceCIDRv6),
|
||||
},
|
||||
expectedIPs: []string{"10.0.1.1"},
|
||||
actions: [][]string{{"delete", "ipaddresses"}, {"create", "ipaddresses"}},
|
||||
events: []string{"Warning ClusterIPNotAllocated the ClusterIP [IPv4]: 10.0.1.1 for Service bar/test-svc has a wrong reference; repairing"},
|
||||
@@ -155,6 +191,9 @@ func TestRepairServiceIP(t *testing.T) {
|
||||
newIPAddress("10.0.1.1", newService("test-svc2", []string{"10.0.1.1"})),
|
||||
newIPAddress("2001:db8::10", newService("test-svc2", []string{"2001:db8::10"})),
|
||||
},
|
||||
cidrs: []*networkingv1alpha1.ServiceCIDR{
|
||||
newServiceCIDR("kubernetes", serviceCIDRv4, serviceCIDRv6),
|
||||
},
|
||||
expectedIPs: []string{"10.0.1.1", "2001:db8::10"},
|
||||
actions: [][]string{{"delete", "ipaddresses"}, {"create", "ipaddresses"}, {"delete", "ipaddresses"}, {"create", "ipaddresses"}},
|
||||
events: []string{
|
||||
@@ -169,18 +208,85 @@ func TestRepairServiceIP(t *testing.T) {
|
||||
newIPAddress("192.168.1.1", newService("test-svc", []string{"192.168.1.1"})),
|
||||
newIPAddress("2001:db8::10", newService("test-svc", []string{"2001:db8::10"})),
|
||||
},
|
||||
cidrs: []*networkingv1alpha1.ServiceCIDR{
|
||||
newServiceCIDR("kubernetes", serviceCIDRv4, serviceCIDRv6),
|
||||
},
|
||||
expectedIPs: []string{"2001:db8::10"},
|
||||
actions: [][]string{},
|
||||
events: []string{"Warning ClusterIPOutOfRange Cluster IP [IPv4]: 192.168.1.1 is not within the configured Service CIDR 10.0.0.0/16; please recreate service"},
|
||||
events: []string{"Warning ClusterIPOutOfRange Cluster IP [IPv4]: 192.168.1.1 is not within any configured Service CIDR; please recreate service"},
|
||||
},
|
||||
{
|
||||
name: "one IP orphan",
|
||||
ipAddresses: []*networkingv1alpha1.IPAddress{
|
||||
newIPAddress("10.0.1.1", newService("test-svc", []string{"10.0.1.1"})),
|
||||
},
|
||||
cidrs: []*networkingv1alpha1.ServiceCIDR{
|
||||
newServiceCIDR("kubernetes", serviceCIDRv4, serviceCIDRv6),
|
||||
},
|
||||
actions: [][]string{{"delete", "ipaddresses"}},
|
||||
events: []string{"Warning IPAddressNotAllocated IPAddress: 10.0.1.1 for Service bar/test-svc appears to have leaked: cleaning up"},
|
||||
},
|
||||
{
|
||||
name: "one IP out of range matching the network address",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"10.0.0.0"})},
|
||||
ipAddresses: []*networkingv1alpha1.IPAddress{
|
||||
newIPAddress("10.0.0.0", newService("test-svc", []string{"10.0.0.0"})),
|
||||
},
|
||||
cidrs: []*networkingv1alpha1.ServiceCIDR{
|
||||
newServiceCIDR("kubernetes", serviceCIDRv4, serviceCIDRv6),
|
||||
},
|
||||
expectedIPs: []string{"10.0.0.0"},
|
||||
actions: [][]string{},
|
||||
events: []string{"Warning ClusterIPOutOfRange Cluster IP [IPv4]: 10.0.0.0 is not within any configured Service CIDR; please recreate service"},
|
||||
},
|
||||
{
|
||||
name: "one IP out of range matching the broadcast address",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"10.0.255.255"})},
|
||||
ipAddresses: []*networkingv1alpha1.IPAddress{
|
||||
newIPAddress("10.0.255.255", newService("test-svc", []string{"10.0.255.255"})),
|
||||
},
|
||||
cidrs: []*networkingv1alpha1.ServiceCIDR{
|
||||
newServiceCIDR("kubernetes", serviceCIDRv4, serviceCIDRv6),
|
||||
},
|
||||
expectedIPs: []string{"10.0.255.255"},
|
||||
actions: [][]string{},
|
||||
events: []string{"Warning ClusterIPOutOfRange Cluster IP [IPv4]: 10.0.255.255 is not within any configured Service CIDR; please recreate service"},
|
||||
},
|
||||
{
|
||||
name: "one IPv6 out of range matching the subnet address",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"2001:db8::"})},
|
||||
ipAddresses: []*networkingv1alpha1.IPAddress{
|
||||
newIPAddress("2001:db8::", newService("test-svc", []string{"2001:db8::"})),
|
||||
},
|
||||
cidrs: []*networkingv1alpha1.ServiceCIDR{
|
||||
newServiceCIDR("kubernetes", serviceCIDRv4, serviceCIDRv6),
|
||||
},
|
||||
expectedIPs: []string{"2001:db8::"},
|
||||
actions: [][]string{},
|
||||
events: []string{"Warning ClusterIPOutOfRange Cluster IP [IPv6]: 2001:db8:: is not within any configured Service CIDR; please recreate service"},
|
||||
},
|
||||
{
|
||||
name: "one IPv6 matching the broadcast address",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"2001:db8::ffff:ffff:ffff:ffff"})},
|
||||
ipAddresses: []*networkingv1alpha1.IPAddress{
|
||||
newIPAddress("2001:db8::ffff:ffff:ffff:ffff", newService("test-svc", []string{"2001:db8::ffff:ffff:ffff:ffff"})),
|
||||
},
|
||||
cidrs: []*networkingv1alpha1.ServiceCIDR{
|
||||
newServiceCIDR("kubernetes", serviceCIDRv4, serviceCIDRv6),
|
||||
},
|
||||
expectedIPs: []string{"2001:db8::ffff:ffff:ffff:ffff"},
|
||||
},
|
||||
{
|
||||
name: "one IP orphan matching the broadcast address",
|
||||
ipAddresses: []*networkingv1alpha1.IPAddress{
|
||||
newIPAddress("10.0.255.255", newService("test-svc", []string{"10.0.255.255"})),
|
||||
},
|
||||
cidrs: []*networkingv1alpha1.ServiceCIDR{
|
||||
newServiceCIDR("kubernetes", serviceCIDRv4, serviceCIDRv6),
|
||||
},
|
||||
actions: [][]string{{"delete", "ipaddresses"}},
|
||||
events: []string{"Warning IPAddressNotAllocated IPAddress: 10.0.255.255 for Service bar/test-svc appears to have leaked: cleaning up"},
|
||||
},
|
||||
{
|
||||
name: "Two IPAddresses referencing the same service",
|
||||
svcs: []*v1.Service{newService("test-svc", []string{"10.0.1.1"})},
|
||||
@@ -188,6 +294,9 @@ func TestRepairServiceIP(t *testing.T) {
|
||||
newIPAddress("10.0.1.1", newService("test-svc", []string{"10.0.1.1"})),
|
||||
newIPAddress("10.0.1.2", newService("test-svc", []string{"10.0.1.1"})),
|
||||
},
|
||||
cidrs: []*networkingv1alpha1.ServiceCIDR{
|
||||
newServiceCIDR("kubernetes", serviceCIDRv4, serviceCIDRv6),
|
||||
},
|
||||
actions: [][]string{{"delete", "ipaddresses"}},
|
||||
events: []string{"Warning IPAddressWrongReference IPAddress: 10.0.1.2 for Service bar/test-svc has a wrong reference; cleaning up"},
|
||||
},
|
||||
@@ -200,7 +309,10 @@ func TestRepairServiceIP(t *testing.T) {
|
||||
ipAddresses: []*networkingv1alpha1.IPAddress{
|
||||
newIPAddress("10.0.1.1", newService("test-svc2", []string{"10.0.1.1"})),
|
||||
},
|
||||
events: []string{"Warning ClusterIPAlreadyAllocated Cluster IP [4]:10.0.1.1 was assigned to multiple services; please recreate service"},
|
||||
cidrs: []*networkingv1alpha1.ServiceCIDR{
|
||||
newServiceCIDR("kubernetes", serviceCIDRv4, serviceCIDRv6),
|
||||
},
|
||||
events: []string{"Warning ClusterIPAlreadyAllocated Cluster IP [IPv4]:10.0.1.1 was assigned to multiple services; please recreate service"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -208,9 +320,21 @@ func TestRepairServiceIP(t *testing.T) {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
|
||||
c, r := newFakeRepair()
|
||||
// add cidrs
|
||||
for _, cidr := range test.cidrs {
|
||||
err := r.serviceCIDRStore.Add(cidr)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error trying to add Service %v object: %v", cidr, err)
|
||||
}
|
||||
}
|
||||
err := r.syncCIDRs()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// override for testing
|
||||
r.servicesSynced = func() bool { return true }
|
||||
r.ipAddressSynced = func() bool { return true }
|
||||
r.serviceCIDRSynced = func() bool { return true }
|
||||
recorder := events.NewFakeRecorder(100)
|
||||
r.recorder = recorder
|
||||
for _, svc := range test.svcs {
|
||||
@@ -228,7 +352,7 @@ func TestRepairServiceIP(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
err := r.runOnce()
|
||||
err = r.runOnce()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -402,6 +526,20 @@ func newService(name string, ips []string) *v1.Service {
|
||||
return svc
|
||||
}
|
||||
|
||||
func newServiceCIDR(name, primary, secondary string) *networkingv1alpha1.ServiceCIDR {
|
||||
serviceCIDR := &networkingv1alpha1.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: networkingv1alpha1.ServiceCIDRSpec{},
|
||||
}
|
||||
serviceCIDR.Spec.CIDRs = append(serviceCIDR.Spec.CIDRs, primary)
|
||||
if secondary != "" {
|
||||
serviceCIDR.Spec.CIDRs = append(serviceCIDR.Spec.CIDRs, secondary)
|
||||
}
|
||||
return serviceCIDR
|
||||
}
|
||||
|
||||
func expectAction(t *testing.T, actions []k8stesting.Action, expected [][]string) {
|
||||
t.Helper()
|
||||
if len(actions) != len(expected) {
|
||||
|
||||
@@ -45,6 +45,7 @@ var (
|
||||
ErrFull = errors.New("range is full")
|
||||
ErrAllocated = errors.New("provided IP is already allocated")
|
||||
ErrMismatchedNetwork = errors.New("the provided network does not match the current range")
|
||||
ErrNotReady = errors.New("allocator not ready")
|
||||
)
|
||||
|
||||
type ErrNotInRange struct {
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/netip"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
@@ -58,6 +59,9 @@ type Allocator struct {
|
||||
client networkingv1alpha1client.NetworkingV1alpha1Interface
|
||||
ipAddressLister networkingv1alpha1listers.IPAddressLister
|
||||
ipAddressSynced cache.InformerSynced
|
||||
// ready indicates if the allocator is able to allocate new IP addresses.
|
||||
// This is required because it depends on the ServiceCIDR to be ready.
|
||||
ready atomic.Bool
|
||||
|
||||
// metrics is a metrics recorder that can be disabled
|
||||
metrics metricsRecorderInterface
|
||||
@@ -133,7 +137,7 @@ func NewIPAllocator(
|
||||
metricLabel: cidr.String(),
|
||||
rand: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
}
|
||||
|
||||
a.ready.Store(true)
|
||||
return a, nil
|
||||
}
|
||||
|
||||
@@ -185,8 +189,8 @@ func (a *Allocator) AllocateService(svc *api.Service, ip net.IP) error {
|
||||
}
|
||||
|
||||
func (a *Allocator) allocateService(svc *api.Service, ip net.IP, dryRun bool) error {
|
||||
if !a.ipAddressSynced() {
|
||||
return fmt.Errorf("allocator not ready")
|
||||
if !a.ready.Load() || !a.ipAddressSynced() {
|
||||
return ErrNotReady
|
||||
}
|
||||
addr, err := netip.ParseAddr(ip.String())
|
||||
if err != nil {
|
||||
@@ -227,8 +231,8 @@ func (a *Allocator) AllocateNextService(svc *api.Service) (net.IP, error) {
|
||||
// falls back to the lower subnet.
|
||||
// It starts allocating from a random IP within each range.
|
||||
func (a *Allocator) allocateNextService(svc *api.Service, dryRun bool) (net.IP, error) {
|
||||
if !a.ipAddressSynced() {
|
||||
return nil, fmt.Errorf("allocator not ready")
|
||||
if !a.ready.Load() || !a.ipAddressSynced() {
|
||||
return nil, ErrNotReady
|
||||
}
|
||||
if dryRun {
|
||||
// Don't bother finding a free value. It's racy and not worth the
|
||||
@@ -348,9 +352,6 @@ func (a *Allocator) Release(ip net.IP) error {
|
||||
}
|
||||
|
||||
func (a *Allocator) release(ip net.IP, dryRun bool) error {
|
||||
if !a.ipAddressSynced() {
|
||||
return fmt.Errorf("allocator not ready")
|
||||
}
|
||||
if dryRun {
|
||||
return nil
|
||||
}
|
||||
@@ -403,7 +404,7 @@ func (a *Allocator) IPFamily() api.IPFamily {
|
||||
return a.family
|
||||
}
|
||||
|
||||
// for testing
|
||||
// for testing, it assumes this is the allocator is unique for the ipFamily
|
||||
func (a *Allocator) Used() int {
|
||||
ipLabelSelector := labels.Set(map[string]string{
|
||||
networkingv1alpha1.LabelIPAddressFamily: string(a.IPFamily()),
|
||||
@@ -416,7 +417,7 @@ func (a *Allocator) Used() int {
|
||||
return len(ips)
|
||||
}
|
||||
|
||||
// for testing
|
||||
// for testing, it assumes this is the allocator is unique for the ipFamily
|
||||
func (a *Allocator) Free() int {
|
||||
return int(a.size) - a.Used()
|
||||
}
|
||||
|
||||
@@ -405,7 +405,8 @@ func (al *Allocators) allocIPs(service *api.Service, toAlloc map[api.IPFamily]st
|
||||
var allocatedIP net.IP
|
||||
var err error
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRServiceAllocator) {
|
||||
svcAllocator, ok := allocator.(*ipallocator.Allocator)
|
||||
// TODO: simplify this and avoid all this duplicate code
|
||||
svcAllocator, ok := allocator.(*ipallocator.MetaAllocator)
|
||||
if ok {
|
||||
allocatedIP, err = svcAllocator.AllocateNextService(service)
|
||||
} else {
|
||||
@@ -425,7 +426,8 @@ func (al *Allocators) allocIPs(service *api.Service, toAlloc map[api.IPFamily]st
|
||||
}
|
||||
var err error
|
||||
if utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRServiceAllocator) {
|
||||
svcAllocator, ok := allocator.(*ipallocator.Allocator)
|
||||
// TODO: simplify this and avoid all this duplicate code
|
||||
svcAllocator, ok := allocator.(*ipallocator.MetaAllocator)
|
||||
if ok {
|
||||
err = svcAllocator.AllocateService(service, parsedIP)
|
||||
} else {
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
ingressclassstore "k8s.io/kubernetes/pkg/registry/networking/ingressclass/storage"
|
||||
ipaddressstore "k8s.io/kubernetes/pkg/registry/networking/ipaddress/storage"
|
||||
networkpolicystore "k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage"
|
||||
servicecidrstore "k8s.io/kubernetes/pkg/registry/networking/servicecidr/storage"
|
||||
)
|
||||
|
||||
type RESTStorageProvider struct{}
|
||||
@@ -98,6 +99,17 @@ func (p RESTStorageProvider) v1alpha1Storage(apiResourceConfigSource serverstora
|
||||
}
|
||||
storage[resource] = ipAddressStorage
|
||||
}
|
||||
|
||||
// servicecidrs
|
||||
if resource := "servicecidrs"; apiResourceConfigSource.ResourceEnabled(networkingapiv1alpha1.SchemeGroupVersion.WithResource(resource)) {
|
||||
serviceCIDRStorage, serviceCIDRStatusStorage, err := servicecidrstore.NewREST(restOptionsGetter)
|
||||
if err != nil {
|
||||
return storage, err
|
||||
}
|
||||
storage[resource] = serviceCIDRStorage
|
||||
storage[resource+"/status"] = serviceCIDRStatusStorage
|
||||
}
|
||||
|
||||
return storage, nil
|
||||
}
|
||||
|
||||
|
||||
17
pkg/registry/networking/servicecidr/doc.go
Normal file
17
pkg/registry/networking/servicecidr/doc.go
Normal file
@@ -0,0 +1,17 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package servicecidr // import "k8s.io/kubernetes/pkg/registry/networking/servicecidr"
|
||||
101
pkg/registry/networking/servicecidr/storage/storage.go
Normal file
101
pkg/registry/networking/servicecidr/storage/storage.go
Normal file
@@ -0,0 +1,101 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package storage
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apiserver/pkg/registry/generic"
|
||||
genericregistry "k8s.io/apiserver/pkg/registry/generic/registry"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/kubernetes/pkg/apis/networking"
|
||||
"k8s.io/kubernetes/pkg/printers"
|
||||
printersinternal "k8s.io/kubernetes/pkg/printers/internalversion"
|
||||
printerstorage "k8s.io/kubernetes/pkg/printers/storage"
|
||||
"k8s.io/kubernetes/pkg/registry/networking/servicecidr"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
// REST implements a RESTStorage for IPRange against etcd
|
||||
type REST struct {
|
||||
*genericregistry.Store
|
||||
}
|
||||
|
||||
// NewREST returns a RESTStorage object that will work against service CIDRs.
|
||||
func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, *StatusREST, error) {
|
||||
store := &genericregistry.Store{
|
||||
NewFunc: func() runtime.Object { return &networking.ServiceCIDR{} },
|
||||
NewListFunc: func() runtime.Object { return &networking.ServiceCIDRList{} },
|
||||
DefaultQualifiedResource: networking.Resource("servicecidrs"),
|
||||
SingularQualifiedResource: networking.Resource("servicecidr"),
|
||||
|
||||
CreateStrategy: servicecidr.Strategy,
|
||||
UpdateStrategy: servicecidr.Strategy,
|
||||
DeleteStrategy: servicecidr.Strategy,
|
||||
ResetFieldsStrategy: servicecidr.Strategy,
|
||||
|
||||
TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)},
|
||||
}
|
||||
options := &generic.StoreOptions{RESTOptions: optsGetter}
|
||||
if err := store.CompleteWithOptions(options); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
statusStore := *store
|
||||
statusStore.UpdateStrategy = servicecidr.StatusStrategy
|
||||
statusStore.ResetFieldsStrategy = servicecidr.StatusStrategy
|
||||
return &REST{store}, &StatusREST{store: &statusStore}, nil
|
||||
}
|
||||
|
||||
// StatusREST implements the REST endpoint for changing the status of an ingress
|
||||
type StatusREST struct {
|
||||
store *genericregistry.Store
|
||||
}
|
||||
|
||||
// New creates an instance of the StatusREST object
|
||||
func (r *StatusREST) New() runtime.Object {
|
||||
return &networking.ServiceCIDR{}
|
||||
}
|
||||
|
||||
// Destroy cleans up resources on shutdown.
|
||||
func (r *StatusREST) Destroy() {
|
||||
// Given that underlying store is shared with REST,
|
||||
// we don't destroy it here explicitly.
|
||||
}
|
||||
|
||||
// Get retrieves the object from the storage. It is required to support Patch.
|
||||
func (r *StatusREST) Get(ctx context.Context, name string, options *metav1.GetOptions) (runtime.Object, error) {
|
||||
return r.store.Get(ctx, name, options)
|
||||
}
|
||||
|
||||
// Update alters the status subset of an object.
|
||||
func (r *StatusREST) Update(ctx context.Context, name string, objInfo rest.UpdatedObjectInfo, createValidation rest.ValidateObjectFunc, updateValidation rest.ValidateObjectUpdateFunc, forceAllowCreate bool, options *metav1.UpdateOptions) (runtime.Object, bool, error) {
|
||||
// We are explicitly setting forceAllowCreate to false in the call to the underlying storage because
|
||||
// subresources should never allow create on update.
|
||||
return r.store.Update(ctx, name, objInfo, createValidation, updateValidation, false, options)
|
||||
}
|
||||
|
||||
func (r *StatusREST) ConvertToTable(ctx context.Context, object runtime.Object, tableOptions runtime.Object) (*metav1.Table, error) {
|
||||
return r.store.ConvertToTable(ctx, object, tableOptions)
|
||||
}
|
||||
|
||||
// GetResetFields implements rest.ResetFieldsStrategy
|
||||
func (r *StatusREST) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
|
||||
return r.store.GetResetFields()
|
||||
}
|
||||
151
pkg/registry/networking/servicecidr/strategy.go
Normal file
151
pkg/registry/networking/servicecidr/strategy.go
Normal file
@@ -0,0 +1,151 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package servicecidr
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/validation/field"
|
||||
"k8s.io/apiserver/pkg/registry/rest"
|
||||
"k8s.io/apiserver/pkg/storage/names"
|
||||
"k8s.io/kubernetes/pkg/api/legacyscheme"
|
||||
"k8s.io/kubernetes/pkg/apis/networking"
|
||||
"k8s.io/kubernetes/pkg/apis/networking/validation"
|
||||
"sigs.k8s.io/structured-merge-diff/v4/fieldpath"
|
||||
)
|
||||
|
||||
// serviceCIDRStrategy implements verification logic for ServiceCIDR allocators.
|
||||
type serviceCIDRStrategy struct {
|
||||
runtime.ObjectTyper
|
||||
names.NameGenerator
|
||||
}
|
||||
|
||||
// Strategy is the default logic that applies when creating and updating Replication ServiceCIDR objects.
|
||||
var Strategy = serviceCIDRStrategy{legacyscheme.Scheme, names.SimpleNameGenerator}
|
||||
|
||||
// Strategy should implement rest.RESTCreateStrategy
|
||||
var _ rest.RESTCreateStrategy = Strategy
|
||||
|
||||
// Strategy should implement rest.RESTUpdateStrategy
|
||||
var _ rest.RESTUpdateStrategy = Strategy
|
||||
|
||||
// NamespaceScoped returns false because all ServiceCIDRes is cluster scoped.
|
||||
func (serviceCIDRStrategy) NamespaceScoped() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// GetResetFields returns the set of fields that get reset by the strategy
|
||||
// and should not be modified by the user.
|
||||
func (serviceCIDRStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
|
||||
fields := map[fieldpath.APIVersion]*fieldpath.Set{
|
||||
"networking/v1alpha1": fieldpath.NewSet(
|
||||
fieldpath.MakePathOrDie("status"),
|
||||
),
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
// PrepareForCreate clears the status of an ServiceCIDR before creation.
|
||||
func (serviceCIDRStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {
|
||||
_ = obj.(*networking.ServiceCIDR)
|
||||
|
||||
}
|
||||
|
||||
// PrepareForUpdate clears fields that are not allowed to be set by end users on update.
|
||||
func (serviceCIDRStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
||||
newServiceCIDR := obj.(*networking.ServiceCIDR)
|
||||
oldServiceCIDR := old.(*networking.ServiceCIDR)
|
||||
|
||||
_, _ = newServiceCIDR, oldServiceCIDR
|
||||
}
|
||||
|
||||
// Validate validates a new ServiceCIDR.
|
||||
func (serviceCIDRStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList {
|
||||
cidrConfig := obj.(*networking.ServiceCIDR)
|
||||
err := validation.ValidateServiceCIDR(cidrConfig)
|
||||
return err
|
||||
}
|
||||
|
||||
// Canonicalize normalizes the object after validation.
|
||||
func (serviceCIDRStrategy) Canonicalize(obj runtime.Object) {
|
||||
}
|
||||
|
||||
// AllowCreateOnUpdate is false for ServiceCIDR; this means POST is needed to create one.
|
||||
func (serviceCIDRStrategy) AllowCreateOnUpdate() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// WarningsOnCreate returns warnings for the creation of the given object.
|
||||
func (serviceCIDRStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
// ValidateUpdate is the default update validation for an end user.
|
||||
func (serviceCIDRStrategy) ValidateUpdate(ctx context.Context, new, old runtime.Object) field.ErrorList {
|
||||
newServiceCIDR := new.(*networking.ServiceCIDR)
|
||||
oldServiceCIDR := old.(*networking.ServiceCIDR)
|
||||
errList := validation.ValidateServiceCIDR(newServiceCIDR)
|
||||
errList = append(errList, validation.ValidateServiceCIDRUpdate(newServiceCIDR, oldServiceCIDR)...)
|
||||
return errList
|
||||
}
|
||||
|
||||
// AllowUnconditionalUpdate is the default update policy for ServiceCIDR objects.
|
||||
func (serviceCIDRStrategy) AllowUnconditionalUpdate() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// WarningsOnUpdate returns warnings for the given update.
|
||||
func (serviceCIDRStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
type serviceCIDRStatusStrategy struct {
|
||||
serviceCIDRStrategy
|
||||
}
|
||||
|
||||
// StatusStrategy implements logic used to validate and prepare for updates of the status subresource
|
||||
var StatusStrategy = serviceCIDRStatusStrategy{Strategy}
|
||||
|
||||
// GetResetFields returns the set of fields that get reset by the strategy
|
||||
// and should not be modified by the user.
|
||||
func (serviceCIDRStatusStrategy) GetResetFields() map[fieldpath.APIVersion]*fieldpath.Set {
|
||||
fields := map[fieldpath.APIVersion]*fieldpath.Set{
|
||||
"networking/v1alpha1": fieldpath.NewSet(
|
||||
fieldpath.MakePathOrDie("spec"),
|
||||
),
|
||||
}
|
||||
return fields
|
||||
}
|
||||
|
||||
// PrepareForUpdate clears fields that are not allowed to be set by end users on update of status
|
||||
func (serviceCIDRStatusStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {
|
||||
newServiceCIDR := obj.(*networking.ServiceCIDR)
|
||||
oldServiceCIDR := old.(*networking.ServiceCIDR)
|
||||
// status changes are not allowed to update spec
|
||||
newServiceCIDR.Spec = oldServiceCIDR.Spec
|
||||
}
|
||||
|
||||
// ValidateUpdate is the default update validation for an end user updating status
|
||||
func (serviceCIDRStatusStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList {
|
||||
return validation.ValidateServiceCIDRStatusUpdate(obj.(*networking.ServiceCIDR), old.(*networking.ServiceCIDR))
|
||||
}
|
||||
|
||||
// WarningsOnUpdate returns warnings for the given update.
|
||||
func (serviceCIDRStatusStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string {
|
||||
return nil
|
||||
}
|
||||
17
pkg/registry/networking/servicecidr/strategy_test.go
Normal file
17
pkg/registry/networking/servicecidr/strategy_test.go
Normal file
@@ -0,0 +1,17 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package servicecidr
|
||||
679
pkg/util/iptree/iptree.go
Normal file
679
pkg/util/iptree/iptree.go
Normal file
@@ -0,0 +1,679 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package iptree
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/bits"
|
||||
"net/netip"
|
||||
)
|
||||
|
||||
// iptree implement a radix tree that uses IP prefixes as nodes and allows to store values in each node.
|
||||
// Example:
|
||||
//
|
||||
// r := New[int]()
|
||||
//
|
||||
// prefixes := []string{
|
||||
// "0.0.0.0/0",
|
||||
// "10.0.0.0/8",
|
||||
// "10.0.0.0/16",
|
||||
// "10.1.0.0/16",
|
||||
// "10.1.1.0/24",
|
||||
// "10.1.244.0/24",
|
||||
// "10.0.0.0/24",
|
||||
// "10.0.0.3/32",
|
||||
// "192.168.0.0/24",
|
||||
// "192.168.0.0/28",
|
||||
// "192.168.129.0/28",
|
||||
// }
|
||||
// for _, k := range prefixes {
|
||||
// r.InsertPrefix(netip.MustParsePrefix(k), 0)
|
||||
// }
|
||||
//
|
||||
// (*) means the node is not public, is not storing any value
|
||||
//
|
||||
// 0.0.0.0/0 --- 10.0.0.0/8 --- *10.0.0.0/15 --- 10.0.0.0/16 --- 10.0.0.0/24 --- 10.0.0.3/32
|
||||
// | |
|
||||
// | \ -------- 10.1.0.0/16 --- 10.1.1.0/24
|
||||
// | |
|
||||
// | \ ------- 10.1.244.0/24
|
||||
// |
|
||||
// \------ *192.168.0.0/16 --- 192.168.0.0/24 --- 192.168.0.0/28
|
||||
// |
|
||||
// \ -------- 192.168.129.0/28
|
||||
|
||||
// node is an element of radix tree with a netip.Prefix optimized to store IP prefixes.
|
||||
type node[T any] struct {
|
||||
// prefix network CIDR
|
||||
prefix netip.Prefix
|
||||
// public nodes are used to store values
|
||||
public bool
|
||||
val T
|
||||
|
||||
child [2]*node[T] // binary tree
|
||||
}
|
||||
|
||||
// mergeChild allow to compress the tree
|
||||
// when n has exactly one child and no value
|
||||
// p -> n -> b -> c ==> p -> b -> c
|
||||
func (n *node[T]) mergeChild() {
|
||||
// public nodes can not be merged
|
||||
if n.public {
|
||||
return
|
||||
}
|
||||
// can not merge if there are two children
|
||||
if n.child[0] != nil &&
|
||||
n.child[1] != nil {
|
||||
return
|
||||
}
|
||||
// can not merge if there are no children
|
||||
if n.child[0] == nil &&
|
||||
n.child[1] == nil {
|
||||
return
|
||||
}
|
||||
// find the child and merge it
|
||||
var child *node[T]
|
||||
if n.child[0] != nil {
|
||||
child = n.child[0]
|
||||
} else if n.child[1] != nil {
|
||||
child = n.child[1]
|
||||
}
|
||||
n.prefix = child.prefix
|
||||
n.public = child.public
|
||||
n.val = child.val
|
||||
n.child = child.child
|
||||
// remove any references from the deleted node
|
||||
// to avoid memory leak
|
||||
child.child[0] = nil
|
||||
child.child[1] = nil
|
||||
}
|
||||
|
||||
// Tree is a radix tree for IPv4 and IPv6 networks.
|
||||
type Tree[T any] struct {
|
||||
rootV4 *node[T]
|
||||
rootV6 *node[T]
|
||||
}
|
||||
|
||||
// New creates a new Radix Tree for IP addresses.
|
||||
func New[T any]() *Tree[T] {
|
||||
return &Tree[T]{
|
||||
rootV4: &node[T]{
|
||||
prefix: netip.PrefixFrom(netip.IPv4Unspecified(), 0),
|
||||
},
|
||||
rootV6: &node[T]{
|
||||
prefix: netip.PrefixFrom(netip.IPv6Unspecified(), 0),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// GetPrefix returns the stored value and true if the exact prefix exists in the tree.
|
||||
func (t *Tree[T]) GetPrefix(prefix netip.Prefix) (T, bool) {
|
||||
var zeroT T
|
||||
|
||||
n := t.rootV4
|
||||
if prefix.Addr().Is6() {
|
||||
n = t.rootV6
|
||||
}
|
||||
bitPosition := 0
|
||||
// mask the address for sanity
|
||||
address := prefix.Masked().Addr()
|
||||
// we can't check longer than the request mask
|
||||
mask := prefix.Bits()
|
||||
// walk the network bits of the prefix
|
||||
for bitPosition < mask {
|
||||
// Look for a child checking the bit position after the mask
|
||||
n = n.child[getBitFromAddr(address, bitPosition+1)]
|
||||
if n == nil {
|
||||
return zeroT, false
|
||||
}
|
||||
// check we are in the right branch comparing the suffixes
|
||||
if !n.prefix.Contains(address) {
|
||||
return zeroT, false
|
||||
}
|
||||
// update the new bit position with the new node mask
|
||||
bitPosition = n.prefix.Bits()
|
||||
}
|
||||
// check if this node is a public node and contains a prefix
|
||||
if n != nil && n.public && n.prefix == prefix {
|
||||
return n.val, true
|
||||
}
|
||||
|
||||
return zeroT, false
|
||||
}
|
||||
|
||||
// LongestPrefixMatch returns the longest prefix match, the stored value and true if exist.
|
||||
// For example, considering the following prefixes 192.168.20.16/28 and 192.168.0.0/16,
|
||||
// when the address 192.168.20.19/32 is looked up it will return 192.168.20.16/28.
|
||||
func (t *Tree[T]) LongestPrefixMatch(prefix netip.Prefix) (netip.Prefix, T, bool) {
|
||||
n := t.rootV4
|
||||
if prefix.Addr().Is6() {
|
||||
n = t.rootV6
|
||||
}
|
||||
|
||||
var last *node[T]
|
||||
// bit position is given by the mask bits
|
||||
bitPosition := 0
|
||||
// mask the address
|
||||
address := prefix.Masked().Addr()
|
||||
mask := prefix.Bits()
|
||||
// walk the network bits of the prefix
|
||||
for bitPosition < mask {
|
||||
if n.public {
|
||||
last = n
|
||||
}
|
||||
// Look for a child checking the bit position after the mask
|
||||
n = n.child[getBitFromAddr(address, bitPosition+1)]
|
||||
if n == nil {
|
||||
break
|
||||
}
|
||||
// check we are in the right branch comparing the suffixes
|
||||
if !n.prefix.Contains(address) {
|
||||
break
|
||||
}
|
||||
// update the new bit position with the new node mask
|
||||
bitPosition = n.prefix.Bits()
|
||||
}
|
||||
|
||||
if n != nil && n.public && n.prefix == prefix {
|
||||
last = n
|
||||
}
|
||||
|
||||
if last != nil {
|
||||
return last.prefix, last.val, true
|
||||
}
|
||||
var zeroT T
|
||||
return netip.Prefix{}, zeroT, false
|
||||
}
|
||||
|
||||
// ShortestPrefixMatch returns the shortest prefix match, the stored value and true if exist.
|
||||
// For example, considering the following prefixes 192.168.20.16/28 and 192.168.0.0/16,
|
||||
// when the address 192.168.20.19/32 is looked up it will return 192.168.0.0/16.
|
||||
func (t *Tree[T]) ShortestPrefixMatch(prefix netip.Prefix) (netip.Prefix, T, bool) {
|
||||
var zeroT T
|
||||
|
||||
n := t.rootV4
|
||||
if prefix.Addr().Is6() {
|
||||
n = t.rootV6
|
||||
}
|
||||
// bit position is given by the mask bits
|
||||
bitPosition := 0
|
||||
// mask the address
|
||||
address := prefix.Masked().Addr()
|
||||
mask := prefix.Bits()
|
||||
for bitPosition < mask {
|
||||
if n.public {
|
||||
return n.prefix, n.val, true
|
||||
}
|
||||
// Look for a child checking the bit position after the mask
|
||||
n = n.child[getBitFromAddr(address, bitPosition+1)]
|
||||
if n == nil {
|
||||
return netip.Prefix{}, zeroT, false
|
||||
}
|
||||
// check we are in the right branch comparing the suffixes
|
||||
if !n.prefix.Contains(address) {
|
||||
return netip.Prefix{}, zeroT, false
|
||||
}
|
||||
// update the new bit position with the new node mask
|
||||
bitPosition = n.prefix.Bits()
|
||||
}
|
||||
|
||||
if n != nil && n.public && n.prefix == prefix {
|
||||
return n.prefix, n.val, true
|
||||
}
|
||||
return netip.Prefix{}, zeroT, false
|
||||
}
|
||||
|
||||
// InsertPrefix is used to add a new entry or update
|
||||
// an existing entry. Returns true if updated.
|
||||
func (t *Tree[T]) InsertPrefix(prefix netip.Prefix, v T) bool {
|
||||
n := t.rootV4
|
||||
if prefix.Addr().Is6() {
|
||||
n = t.rootV6
|
||||
}
|
||||
var parent *node[T]
|
||||
// bit position is given by the mask bits
|
||||
bitPosition := 0
|
||||
// mask the address
|
||||
address := prefix.Masked().Addr()
|
||||
mask := prefix.Bits()
|
||||
for bitPosition < mask {
|
||||
// Look for a child checking the bit position after the mask
|
||||
childIndex := getBitFromAddr(address, bitPosition+1)
|
||||
parent = n
|
||||
n = n.child[childIndex]
|
||||
// if no child create a new one with
|
||||
if n == nil {
|
||||
parent.child[childIndex] = &node[T]{
|
||||
public: true,
|
||||
val: v,
|
||||
prefix: prefix,
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// update the new bit position with the new node mask
|
||||
bitPosition = n.prefix.Bits()
|
||||
|
||||
// continue if we are in the right branch and current
|
||||
// node is our parent
|
||||
if n.prefix.Contains(address) && bitPosition <= mask {
|
||||
continue
|
||||
}
|
||||
|
||||
// Split the node and add a new child:
|
||||
// - Case 1: parent -> child -> n
|
||||
// - Case 2: parent -> newnode |--> child
|
||||
// |--> n
|
||||
child := &node[T]{
|
||||
prefix: prefix,
|
||||
public: true,
|
||||
val: v,
|
||||
}
|
||||
// Case 1: existing node is a sibling
|
||||
if prefix.Contains(n.prefix.Addr()) && bitPosition > mask {
|
||||
// parent to child
|
||||
parent.child[childIndex] = child
|
||||
pos := prefix.Bits() + 1
|
||||
// calculate if the sibling is at the left or right
|
||||
child.child[getBitFromAddr(n.prefix.Addr(), pos)] = n
|
||||
return false
|
||||
}
|
||||
|
||||
// Case 2: existing node has the same mask but different base address
|
||||
// add common ancestor and branch on it
|
||||
ancestor := findAncestor(prefix, n.prefix)
|
||||
link := &node[T]{
|
||||
prefix: ancestor,
|
||||
}
|
||||
pos := parent.prefix.Bits() + 1
|
||||
parent.child[getBitFromAddr(ancestor.Addr(), pos)] = link
|
||||
// ancestor -> children
|
||||
pos = ancestor.Bits() + 1
|
||||
idxChild := getBitFromAddr(prefix.Addr(), pos)
|
||||
idxN := getBitFromAddr(n.prefix.Addr(), pos)
|
||||
if idxChild == idxN {
|
||||
panic(fmt.Sprintf("wrong ancestor %s: child %s N %s", ancestor.String(), prefix.String(), n.prefix.String()))
|
||||
}
|
||||
link.child[idxChild] = child
|
||||
link.child[idxN] = n
|
||||
return false
|
||||
}
|
||||
|
||||
// if already exist update it and make it public
|
||||
if n != nil && n.prefix == prefix {
|
||||
if n.public {
|
||||
n.val = v
|
||||
n.public = true
|
||||
return true
|
||||
}
|
||||
n.val = v
|
||||
n.public = true
|
||||
return false
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// DeletePrefix delete the exact prefix and return true if it existed.
|
||||
func (t *Tree[T]) DeletePrefix(prefix netip.Prefix) bool {
|
||||
root := t.rootV4
|
||||
if prefix.Addr().Is6() {
|
||||
root = t.rootV6
|
||||
}
|
||||
var parent *node[T]
|
||||
n := root
|
||||
// bit position is given by the mask bits
|
||||
bitPosition := 0
|
||||
// mask the address
|
||||
address := prefix.Masked().Addr()
|
||||
mask := prefix.Bits()
|
||||
for bitPosition < mask {
|
||||
// Look for a child checking the bit position after the mask
|
||||
parent = n
|
||||
n = n.child[getBitFromAddr(address, bitPosition+1)]
|
||||
if n == nil {
|
||||
return false
|
||||
}
|
||||
// check we are in the right branch comparing the suffixes
|
||||
if !n.prefix.Contains(address) {
|
||||
return false
|
||||
}
|
||||
// update the new bit position with the new node mask
|
||||
bitPosition = n.prefix.Bits()
|
||||
}
|
||||
// check if the node contains the prefix we want to delete
|
||||
if n.prefix != prefix {
|
||||
return false
|
||||
}
|
||||
// Delete the value
|
||||
n.public = false
|
||||
var zeroT T
|
||||
n.val = zeroT
|
||||
|
||||
nodeChildren := 0
|
||||
if n.child[0] != nil {
|
||||
nodeChildren++
|
||||
}
|
||||
if n.child[1] != nil {
|
||||
nodeChildren++
|
||||
}
|
||||
// If there is a parent and this node does not have any children
|
||||
// this is a leaf so we can delete this node.
|
||||
// - parent -> child(to be deleted)
|
||||
if parent != nil && nodeChildren == 0 {
|
||||
if parent.child[0] != nil && parent.child[0] == n {
|
||||
parent.child[0] = nil
|
||||
} else if parent.child[1] != nil && parent.child[1] == n {
|
||||
parent.child[1] = nil
|
||||
} else {
|
||||
panic("wrong parent")
|
||||
}
|
||||
n = nil
|
||||
}
|
||||
// Check if we should merge this node
|
||||
// The root node can not be merged
|
||||
if n != root && nodeChildren == 1 {
|
||||
n.mergeChild()
|
||||
}
|
||||
// Check if we should merge the parent's other child
|
||||
// parent -> deletedNode
|
||||
// |--> child
|
||||
parentChildren := 0
|
||||
if parent != nil {
|
||||
if parent.child[0] != nil {
|
||||
parentChildren++
|
||||
}
|
||||
if parent.child[1] != nil {
|
||||
parentChildren++
|
||||
}
|
||||
if parent != root && parentChildren == 1 && !parent.public {
|
||||
parent.mergeChild()
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// for testing, returns the number of public nodes in the tree.
|
||||
func (t *Tree[T]) Len(isV6 bool) int {
|
||||
count := 0
|
||||
t.DepthFirstWalk(isV6, func(k netip.Prefix, v T) bool {
|
||||
count++
|
||||
return false
|
||||
})
|
||||
return count
|
||||
}
|
||||
|
||||
// WalkFn is used when walking the tree. Takes a
|
||||
// key and value, returning if iteration should
|
||||
// be terminated.
|
||||
type WalkFn[T any] func(s netip.Prefix, v T) bool
|
||||
|
||||
// DepthFirstWalk is used to walk the tree of the corresponding IP family
|
||||
func (t *Tree[T]) DepthFirstWalk(isIPv6 bool, fn WalkFn[T]) {
|
||||
if isIPv6 {
|
||||
recursiveWalk(t.rootV6, fn)
|
||||
}
|
||||
recursiveWalk(t.rootV4, fn)
|
||||
}
|
||||
|
||||
// recursiveWalk is used to do a pre-order walk of a node
|
||||
// recursively. Returns true if the walk should be aborted
|
||||
func recursiveWalk[T any](n *node[T], fn WalkFn[T]) bool {
|
||||
if n == nil {
|
||||
return true
|
||||
}
|
||||
// Visit the public values if any
|
||||
if n.public && fn(n.prefix, n.val) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Recurse on the children
|
||||
if n.child[0] != nil {
|
||||
if recursiveWalk(n.child[0], fn) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if n.child[1] != nil {
|
||||
if recursiveWalk(n.child[1], fn) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// WalkPrefix is used to walk the tree under a prefix
|
||||
func (t *Tree[T]) WalkPrefix(prefix netip.Prefix, fn WalkFn[T]) {
|
||||
n := t.rootV4
|
||||
if prefix.Addr().Is6() {
|
||||
n = t.rootV6
|
||||
}
|
||||
bitPosition := 0
|
||||
// mask the address for sanity
|
||||
address := prefix.Masked().Addr()
|
||||
// we can't check longer than the request mask
|
||||
mask := prefix.Bits()
|
||||
// walk the network bits of the prefix
|
||||
for bitPosition < mask {
|
||||
// Look for a child checking the bit position after the mask
|
||||
n = n.child[getBitFromAddr(address, bitPosition+1)]
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
// check we are in the right branch comparing the suffixes
|
||||
if !n.prefix.Contains(address) {
|
||||
break
|
||||
}
|
||||
// update the new bit position with the new node mask
|
||||
bitPosition = n.prefix.Bits()
|
||||
}
|
||||
recursiveWalk[T](n, fn)
|
||||
|
||||
}
|
||||
|
||||
// WalkPath is used to walk the tree, but only visiting nodes
|
||||
// from the root down to a given IP prefix. Where WalkPrefix walks
|
||||
// all the entries *under* the given prefix, this walks the
|
||||
// entries *above* the given prefix.
|
||||
func (t *Tree[T]) WalkPath(path netip.Prefix, fn WalkFn[T]) {
|
||||
n := t.rootV4
|
||||
if path.Addr().Is6() {
|
||||
n = t.rootV6
|
||||
}
|
||||
bitPosition := 0
|
||||
// mask the address for sanity
|
||||
address := path.Masked().Addr()
|
||||
// we can't check longer than the request mask
|
||||
mask := path.Bits()
|
||||
// walk the network bits of the prefix
|
||||
for bitPosition < mask {
|
||||
// Visit the public values if any
|
||||
if n.public && fn(n.prefix, n.val) {
|
||||
return
|
||||
}
|
||||
// Look for a child checking the bit position after the mask
|
||||
n = n.child[getBitFromAddr(address, bitPosition+1)]
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
// check we are in the right branch comparing the suffixes
|
||||
if !n.prefix.Contains(address) {
|
||||
return
|
||||
}
|
||||
// update the new bit position with the new node mask
|
||||
bitPosition = n.prefix.Bits()
|
||||
}
|
||||
// check if this node is a public node and contains a prefix
|
||||
if n != nil && n.public && n.prefix == path {
|
||||
fn(n.prefix, n.val)
|
||||
}
|
||||
}
|
||||
|
||||
// TopLevelPrefixes is used to return a map with all the Top Level prefixes
|
||||
// from the corresponding IP family and its values.
|
||||
// For example, if the tree contains entries for 10.0.0.0/8, 10.1.0.0/16, and 192.168.0.0/16,
|
||||
// this will return 10.0.0.0/8 and 192.168.0.0/16.
|
||||
func (t *Tree[T]) TopLevelPrefixes(isIPv6 bool) map[string]T {
|
||||
if isIPv6 {
|
||||
return t.topLevelPrefixes(t.rootV6)
|
||||
}
|
||||
return t.topLevelPrefixes(t.rootV4)
|
||||
}
|
||||
|
||||
// topLevelPrefixes is used to return a map with all the Top Level prefixes and its values
|
||||
func (t *Tree[T]) topLevelPrefixes(root *node[T]) map[string]T {
|
||||
result := map[string]T{}
|
||||
queue := []*node[T]{root}
|
||||
|
||||
for len(queue) > 0 {
|
||||
n := queue[0]
|
||||
queue = queue[1:]
|
||||
// store and continue, only interested on the top level prefixes
|
||||
if n.public {
|
||||
result[n.prefix.String()] = n.val
|
||||
continue
|
||||
}
|
||||
if n.child[0] != nil {
|
||||
queue = append(queue, n.child[0])
|
||||
}
|
||||
if n.child[1] != nil {
|
||||
queue = append(queue, n.child[1])
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// GetHostIPPrefixMatches returns the list of prefixes that contain the specified Host IP.
|
||||
// An IP is considered a Host IP if is within the subnet range and is not the network address
|
||||
// or, if IPv4, the broadcast address (RFC 1878).
|
||||
func (t *Tree[T]) GetHostIPPrefixMatches(ip netip.Addr) map[netip.Prefix]T {
|
||||
// walk the tree to find all the prefixes containing this IP
|
||||
ipPrefix := netip.PrefixFrom(ip, ip.BitLen())
|
||||
prefixes := map[netip.Prefix]T{}
|
||||
t.WalkPath(ipPrefix, func(k netip.Prefix, v T) bool {
|
||||
if prefixContainIP(k, ipPrefix.Addr()) {
|
||||
prefixes[k] = v
|
||||
}
|
||||
return false
|
||||
})
|
||||
return prefixes
|
||||
}
|
||||
|
||||
// assume starts at 0 from the MSB: 0.1.2......31
|
||||
// return 0 or 1
|
||||
func getBitFromAddr(ip netip.Addr, pos int) int {
|
||||
bytes := ip.AsSlice()
|
||||
// get the byte in the slice
|
||||
index := (pos - 1) / 8
|
||||
if index >= len(bytes) {
|
||||
panic(fmt.Sprintf("ip %s pos %d index %d bytes %v", ip, pos, index, bytes))
|
||||
}
|
||||
// get the offset inside the byte
|
||||
offset := (pos - 1) % 8
|
||||
// check if the bit is set
|
||||
if bytes[index]&(uint8(0x80)>>offset) > 0 {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// find the common subnet, aka the one with the common prefix
|
||||
func findAncestor(a, b netip.Prefix) netip.Prefix {
|
||||
bytesA := a.Addr().AsSlice()
|
||||
bytesB := b.Addr().AsSlice()
|
||||
bytes := make([]byte, len(bytesA))
|
||||
|
||||
max := a.Bits()
|
||||
if l := b.Bits(); l < max {
|
||||
max = l
|
||||
}
|
||||
|
||||
mask := 0
|
||||
for i := range bytesA {
|
||||
xor := bytesA[i] ^ bytesB[i]
|
||||
if xor == 0 {
|
||||
bytes[i] = bytesA[i]
|
||||
mask += 8
|
||||
|
||||
} else {
|
||||
pos := bits.LeadingZeros8(xor)
|
||||
mask += pos
|
||||
// mask off the non leading zeros
|
||||
bytes[i] = bytesA[i] & (^uint8(0) << (8 - pos))
|
||||
break
|
||||
}
|
||||
}
|
||||
if mask > max {
|
||||
mask = max
|
||||
}
|
||||
|
||||
addr, ok := netip.AddrFromSlice(bytes)
|
||||
if !ok {
|
||||
panic(bytes)
|
||||
}
|
||||
ancestor := netip.PrefixFrom(addr, mask)
|
||||
return ancestor.Masked()
|
||||
}
|
||||
|
||||
// prefixContainIP returns true if the given IP is contained with the prefix,
|
||||
// is not the network address and also, if IPv4, is not the broadcast address.
|
||||
// This is required because the Kubernetes allocators reserve these addresses
|
||||
// so IPAddresses can not block deletion of this ranges.
|
||||
func prefixContainIP(prefix netip.Prefix, ip netip.Addr) bool {
|
||||
// if the IP is the network address is not contained
|
||||
if prefix.Masked().Addr() == ip {
|
||||
return false
|
||||
}
|
||||
// the broadcast address is not considered contained for IPv4
|
||||
if !ip.Is6() {
|
||||
ipLast, err := broadcastAddress(prefix)
|
||||
if err != nil || ipLast == ip {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return prefix.Contains(ip)
|
||||
}
|
||||
|
||||
// TODO(aojea) consolidate all these IPs utils
|
||||
// pkg/registry/core/service/ipallocator/ipallocator.go
|
||||
// broadcastAddress returns the broadcast address of the subnet
|
||||
// The broadcast address is obtained by setting all the host bits
|
||||
// in a subnet to 1.
|
||||
// network 192.168.0.0/24 : subnet bits 24 host bits 32 - 24 = 8
|
||||
// broadcast address 192.168.0.255
|
||||
func broadcastAddress(subnet netip.Prefix) (netip.Addr, error) {
|
||||
base := subnet.Masked().Addr()
|
||||
bytes := base.AsSlice()
|
||||
// get all the host bits from the subnet
|
||||
n := 8*len(bytes) - subnet.Bits()
|
||||
// set all the host bits to 1
|
||||
for i := len(bytes) - 1; i >= 0 && n > 0; i-- {
|
||||
if n >= 8 {
|
||||
bytes[i] = 0xff
|
||||
n -= 8
|
||||
} else {
|
||||
mask := ^uint8(0) >> (8 - n)
|
||||
bytes[i] |= mask
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
addr, ok := netip.AddrFromSlice(bytes)
|
||||
if !ok {
|
||||
return netip.Addr{}, fmt.Errorf("invalid address %v", bytes)
|
||||
}
|
||||
return addr, nil
|
||||
}
|
||||
781
pkg/util/iptree/iptree_test.go
Normal file
781
pkg/util/iptree/iptree_test.go
Normal file
@@ -0,0 +1,781 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package iptree
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"net/netip"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
)
|
||||
|
||||
func Test_InsertGetDelete(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
prefix netip.Prefix
|
||||
}{
|
||||
{
|
||||
name: "ipv4",
|
||||
prefix: netip.MustParsePrefix("192.168.0.0/24"),
|
||||
},
|
||||
{
|
||||
name: "ipv6",
|
||||
prefix: netip.MustParsePrefix("fd00:1:2:3::/124"),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
tree := New[int]()
|
||||
ok := tree.InsertPrefix(tc.prefix, 1)
|
||||
if ok {
|
||||
t.Fatal("should not exist")
|
||||
}
|
||||
if _, ok := tree.GetPrefix(tc.prefix); !ok {
|
||||
t.Errorf("CIDR %s not found", tc.prefix)
|
||||
}
|
||||
if ok := tree.DeletePrefix(tc.prefix); !ok {
|
||||
t.Errorf("CIDR %s not deleted", tc.prefix)
|
||||
}
|
||||
if _, ok := tree.GetPrefix(tc.prefix); ok {
|
||||
t.Errorf("CIDR %s found", tc.prefix)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestBasicIPv4(t *testing.T) {
|
||||
tree := New[int]()
|
||||
// insert
|
||||
ipnet := netip.MustParsePrefix("192.168.0.0/24")
|
||||
ok := tree.InsertPrefix(ipnet, 1)
|
||||
if ok {
|
||||
t.Fatal("should not exist")
|
||||
}
|
||||
// check exist
|
||||
if _, ok := tree.GetPrefix(ipnet); !ok {
|
||||
t.Errorf("CIDR %s not found", ipnet)
|
||||
}
|
||||
|
||||
// check does not exist
|
||||
ipnet2 := netip.MustParsePrefix("12.1.0.0/16")
|
||||
if _, ok := tree.GetPrefix(ipnet2); ok {
|
||||
t.Errorf("CIDR %s not expected", ipnet2)
|
||||
}
|
||||
|
||||
// check insert existing prefix updates the value
|
||||
ok = tree.InsertPrefix(ipnet2, 2)
|
||||
if ok {
|
||||
t.Errorf("should not exist: %s", ipnet2)
|
||||
}
|
||||
|
||||
ok = tree.InsertPrefix(ipnet2, 3)
|
||||
if !ok {
|
||||
t.Errorf("should be updated: %s", ipnet2)
|
||||
}
|
||||
|
||||
if v, ok := tree.GetPrefix(ipnet2); !ok || v != 3 {
|
||||
t.Errorf("CIDR %s not expected", ipnet2)
|
||||
}
|
||||
|
||||
// check longer prefix matching
|
||||
ipnet3 := netip.MustParsePrefix("12.1.0.2/32")
|
||||
lpm, _, ok := tree.LongestPrefixMatch(ipnet3)
|
||||
if !ok || lpm != ipnet2 {
|
||||
t.Errorf("expected %s got %s", ipnet2, lpm)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBasicIPv6(t *testing.T) {
|
||||
tree := New[int]()
|
||||
// insert
|
||||
ipnet := netip.MustParsePrefix("2001:db8::/64")
|
||||
ok := tree.InsertPrefix(ipnet, 1)
|
||||
if ok {
|
||||
t.Fatal("should not exist")
|
||||
}
|
||||
// check exist
|
||||
if _, ok := tree.GetPrefix(ipnet); !ok {
|
||||
t.Errorf("CIDR %s not found", ipnet)
|
||||
}
|
||||
|
||||
// check does not exist
|
||||
ipnet2 := netip.MustParsePrefix("2001:db8:1:3:4::/64")
|
||||
if _, ok := tree.GetPrefix(ipnet2); ok {
|
||||
t.Errorf("CIDR %s not expected", ipnet2)
|
||||
}
|
||||
|
||||
// check insert existing prefix updates the value
|
||||
ok = tree.InsertPrefix(ipnet2, 2)
|
||||
if ok {
|
||||
t.Errorf("should not exist: %s", ipnet2)
|
||||
}
|
||||
|
||||
ok = tree.InsertPrefix(ipnet2, 3)
|
||||
if !ok {
|
||||
t.Errorf("should be updated: %s", ipnet2)
|
||||
}
|
||||
|
||||
if v, ok := tree.GetPrefix(ipnet2); !ok || v != 3 {
|
||||
t.Errorf("CIDR %s not expected", ipnet2)
|
||||
}
|
||||
|
||||
// check longer prefix matching
|
||||
ipnet3 := netip.MustParsePrefix("2001:db8:1:3:4::/96")
|
||||
lpm, _, ok := tree.LongestPrefixMatch(ipnet3)
|
||||
if !ok || lpm != ipnet2 {
|
||||
t.Errorf("expected %s got %s", ipnet2, lpm)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertGetDelete100K(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
is6 bool
|
||||
}{
|
||||
{
|
||||
name: "ipv4",
|
||||
},
|
||||
{
|
||||
name: "ipv6",
|
||||
is6: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cidrs := generateRandomCIDRs(tc.is6, 100*1000)
|
||||
tree := New[string]()
|
||||
|
||||
for k := range cidrs {
|
||||
ok := tree.InsertPrefix(k, k.String())
|
||||
if ok {
|
||||
t.Errorf("error inserting: %v", k)
|
||||
}
|
||||
}
|
||||
|
||||
if tree.Len(tc.is6) != len(cidrs) {
|
||||
t.Errorf("expected %d nodes on the tree, got %d", len(cidrs), tree.Len(tc.is6))
|
||||
}
|
||||
|
||||
list := cidrs.UnsortedList()
|
||||
for _, k := range list {
|
||||
if v, ok := tree.GetPrefix(k); !ok {
|
||||
t.Errorf("CIDR %s not found", k)
|
||||
return
|
||||
} else if v != k.String() {
|
||||
t.Errorf("CIDR value %s not found", k)
|
||||
return
|
||||
}
|
||||
ok := tree.DeletePrefix(k)
|
||||
if !ok {
|
||||
t.Errorf("CIDR delete %s error", k)
|
||||
}
|
||||
}
|
||||
|
||||
if tree.Len(tc.is6) != 0 {
|
||||
t.Errorf("No node expected on the tree, got: %d %v", tree.Len(tc.is6), cidrs)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_findAncestor(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
a netip.Prefix
|
||||
b netip.Prefix
|
||||
want netip.Prefix
|
||||
}{
|
||||
{
|
||||
name: "ipv4 direct parent",
|
||||
a: netip.MustParsePrefix("192.168.0.0/24"),
|
||||
b: netip.MustParsePrefix("192.168.1.0/24"),
|
||||
want: netip.MustParsePrefix("192.168.0.0/23"),
|
||||
},
|
||||
{
|
||||
name: "ipv4 root parent ",
|
||||
a: netip.MustParsePrefix("192.168.0.0/24"),
|
||||
b: netip.MustParsePrefix("1.168.1.0/24"),
|
||||
want: netip.MustParsePrefix("0.0.0.0/0"),
|
||||
},
|
||||
{
|
||||
name: "ipv4 parent /1",
|
||||
a: netip.MustParsePrefix("192.168.0.0/24"),
|
||||
b: netip.MustParsePrefix("184.168.1.0/24"),
|
||||
want: netip.MustParsePrefix("128.0.0.0/1"),
|
||||
},
|
||||
{
|
||||
name: "ipv6 direct parent",
|
||||
a: netip.MustParsePrefix("fd00:1:1:1::/64"),
|
||||
b: netip.MustParsePrefix("fd00:1:1:2::/64"),
|
||||
want: netip.MustParsePrefix("fd00:1:1::/62"),
|
||||
},
|
||||
{
|
||||
name: "ipv6 root parent ",
|
||||
a: netip.MustParsePrefix("fd00:1:1:1::/64"),
|
||||
b: netip.MustParsePrefix("1:1:1:1::/64"),
|
||||
want: netip.MustParsePrefix("::/0"),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := findAncestor(tt.a, tt.b); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("findAncestor() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getBitFromAddr(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
ip netip.Addr
|
||||
pos int
|
||||
want int
|
||||
}{
|
||||
// 192.168.0.0
|
||||
// 11000000.10101000.00000000.00000001
|
||||
{
|
||||
name: "ipv4 first is a one",
|
||||
ip: netip.MustParseAddr("192.168.0.0"),
|
||||
pos: 1,
|
||||
want: 1,
|
||||
},
|
||||
{
|
||||
name: "ipv4 middle is a zero",
|
||||
ip: netip.MustParseAddr("192.168.0.0"),
|
||||
pos: 16,
|
||||
want: 0,
|
||||
},
|
||||
{
|
||||
name: "ipv4 middle is a one",
|
||||
ip: netip.MustParseAddr("192.168.0.0"),
|
||||
pos: 13,
|
||||
want: 1,
|
||||
},
|
||||
{
|
||||
name: "ipv4 last is a zero",
|
||||
ip: netip.MustParseAddr("192.168.0.0"),
|
||||
pos: 32,
|
||||
want: 0,
|
||||
},
|
||||
// 2001:db8::ff00:42:8329
|
||||
// 0010000000000001:0000110110111000:0000000000000000:0000000000000000:0000000000000000:1111111100000000:0000000001000010:1000001100101001
|
||||
{
|
||||
name: "ipv6 first is a zero",
|
||||
ip: netip.MustParseAddr("2001:db8::ff00:42:8329"),
|
||||
pos: 1,
|
||||
want: 0,
|
||||
},
|
||||
{
|
||||
name: "ipv6 middle is a zero",
|
||||
ip: netip.MustParseAddr("2001:db8::ff00:42:8329"),
|
||||
pos: 56,
|
||||
want: 0,
|
||||
},
|
||||
{
|
||||
name: "ipv6 middle is a one",
|
||||
ip: netip.MustParseAddr("2001:db8::ff00:42:8329"),
|
||||
pos: 81,
|
||||
want: 1,
|
||||
},
|
||||
{
|
||||
name: "ipv6 last is a one",
|
||||
ip: netip.MustParseAddr("2001:db8::ff00:42:8329"),
|
||||
pos: 128,
|
||||
want: 1,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := getBitFromAddr(tt.ip, tt.pos); got != tt.want {
|
||||
t.Errorf("getBitFromAddr() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestShortestPrefix(t *testing.T) {
|
||||
r := New[int]()
|
||||
|
||||
keys := []string{
|
||||
"10.0.0.0/8",
|
||||
"10.21.0.0/16",
|
||||
"10.221.0.0/16",
|
||||
"10.1.2.3/32",
|
||||
"10.1.2.0/24",
|
||||
"192.168.0.0/24",
|
||||
"192.168.0.0/16",
|
||||
}
|
||||
for _, k := range keys {
|
||||
ok := r.InsertPrefix(netip.MustParsePrefix(k), 0)
|
||||
if ok {
|
||||
t.Errorf("unexpected update on insert %s", k)
|
||||
}
|
||||
}
|
||||
if r.Len(false) != len(keys) {
|
||||
t.Fatalf("bad len: %v %v", r.Len(false), len(keys))
|
||||
}
|
||||
|
||||
type exp struct {
|
||||
inp string
|
||||
out string
|
||||
}
|
||||
cases := []exp{
|
||||
{"192.168.0.3/32", "192.168.0.0/16"},
|
||||
{"10.1.2.4/21", "10.0.0.0/8"},
|
||||
{"192.168.0.0/16", "192.168.0.0/16"},
|
||||
{"192.168.0.0/32", "192.168.0.0/16"},
|
||||
{"10.1.2.3/32", "10.0.0.0/8"},
|
||||
}
|
||||
for _, test := range cases {
|
||||
m, _, ok := r.ShortestPrefixMatch(netip.MustParsePrefix(test.inp))
|
||||
if !ok {
|
||||
t.Fatalf("no match: %v", test)
|
||||
}
|
||||
if m != netip.MustParsePrefix(test.out) {
|
||||
t.Fatalf("mis-match: %v %v", m, test)
|
||||
}
|
||||
}
|
||||
|
||||
// not match
|
||||
_, _, ok := r.ShortestPrefixMatch(netip.MustParsePrefix("0.0.0.0/0"))
|
||||
if ok {
|
||||
t.Fatalf("match unexpected for 0.0.0.0/0")
|
||||
}
|
||||
}
|
||||
|
||||
func TestLongestPrefixMatch(t *testing.T) {
|
||||
r := New[int]()
|
||||
|
||||
keys := []string{
|
||||
"10.0.0.0/8",
|
||||
"10.21.0.0/16",
|
||||
"10.221.0.0/16",
|
||||
"10.1.2.3/32",
|
||||
"10.1.2.0/24",
|
||||
"192.168.0.0/24",
|
||||
"192.168.0.0/16",
|
||||
}
|
||||
for _, k := range keys {
|
||||
ok := r.InsertPrefix(netip.MustParsePrefix(k), 0)
|
||||
if ok {
|
||||
t.Errorf("unexpected update on insert %s", k)
|
||||
}
|
||||
}
|
||||
if r.Len(false) != len(keys) {
|
||||
t.Fatalf("bad len: %v %v", r.Len(false), len(keys))
|
||||
}
|
||||
|
||||
type exp struct {
|
||||
inp string
|
||||
out string
|
||||
}
|
||||
cases := []exp{
|
||||
{"192.168.0.3/32", "192.168.0.0/24"},
|
||||
{"10.1.2.4/21", "10.0.0.0/8"},
|
||||
{"10.21.2.0/24", "10.21.0.0/16"},
|
||||
{"10.1.2.3/32", "10.1.2.3/32"},
|
||||
}
|
||||
for _, test := range cases {
|
||||
m, _, ok := r.LongestPrefixMatch(netip.MustParsePrefix(test.inp))
|
||||
if !ok {
|
||||
t.Fatalf("no match: %v", test)
|
||||
}
|
||||
if m != netip.MustParsePrefix(test.out) {
|
||||
t.Fatalf("mis-match: %v %v", m, test)
|
||||
}
|
||||
}
|
||||
// not match
|
||||
_, _, ok := r.LongestPrefixMatch(netip.MustParsePrefix("0.0.0.0/0"))
|
||||
if ok {
|
||||
t.Fatalf("match unexpected for 0.0.0.0/0")
|
||||
}
|
||||
}
|
||||
|
||||
func TestTopLevelPrefixesV4(t *testing.T) {
|
||||
r := New[string]()
|
||||
|
||||
keys := []string{
|
||||
"10.0.0.0/8",
|
||||
"10.21.0.0/16",
|
||||
"10.221.0.0/16",
|
||||
"10.1.2.3/32",
|
||||
"10.1.2.0/24",
|
||||
"192.168.0.0/20",
|
||||
"192.168.1.0/24",
|
||||
"172.16.0.0/12",
|
||||
"172.21.23.0/24",
|
||||
}
|
||||
for _, k := range keys {
|
||||
ok := r.InsertPrefix(netip.MustParsePrefix(k), k)
|
||||
if ok {
|
||||
t.Errorf("unexpected update on insert %s", k)
|
||||
}
|
||||
}
|
||||
if r.Len(false) != len(keys) {
|
||||
t.Fatalf("bad len: %v %v", r.Len(false), len(keys))
|
||||
}
|
||||
|
||||
expected := []string{
|
||||
"10.0.0.0/8",
|
||||
"192.168.0.0/20",
|
||||
"172.16.0.0/12",
|
||||
}
|
||||
parents := r.TopLevelPrefixes(false)
|
||||
if len(parents) != len(expected) {
|
||||
t.Fatalf("bad len: %v %v", len(parents), len(expected))
|
||||
}
|
||||
|
||||
for _, k := range expected {
|
||||
v, ok := parents[k]
|
||||
if !ok {
|
||||
t.Errorf("key %s not found", k)
|
||||
}
|
||||
if v != k {
|
||||
t.Errorf("value expected %s got %s", k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTopLevelPrefixesV6(t *testing.T) {
|
||||
r := New[string]()
|
||||
|
||||
keys := []string{
|
||||
"2001:db8:1:2:3::/64",
|
||||
"2001:db8::/64",
|
||||
"2001:db8:1:1:1::/64",
|
||||
"2001:db8:1:1:1::/112",
|
||||
}
|
||||
for _, k := range keys {
|
||||
ok := r.InsertPrefix(netip.MustParsePrefix(k), k)
|
||||
if ok {
|
||||
t.Errorf("unexpected update on insert %s", k)
|
||||
}
|
||||
}
|
||||
|
||||
if r.Len(true) != len(keys) {
|
||||
t.Fatalf("bad len: %v %v", r.Len(true), len(keys))
|
||||
}
|
||||
|
||||
expected := []string{
|
||||
"2001:db8::/64",
|
||||
"2001:db8:1:2:3::/64",
|
||||
"2001:db8:1:1:1::/64",
|
||||
}
|
||||
parents := r.TopLevelPrefixes(true)
|
||||
if len(parents) != len(expected) {
|
||||
t.Fatalf("bad len: %v %v", len(parents), len(expected))
|
||||
}
|
||||
|
||||
for _, k := range expected {
|
||||
v, ok := parents[k]
|
||||
if !ok {
|
||||
t.Errorf("key %s not found", k)
|
||||
}
|
||||
if v != k {
|
||||
t.Errorf("value expected %s got %s", k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestWalkV4(t *testing.T) {
|
||||
r := New[int]()
|
||||
|
||||
keys := []string{
|
||||
"10.0.0.0/8",
|
||||
"10.1.0.0/16",
|
||||
"10.1.1.0/24",
|
||||
"10.1.1.32/26",
|
||||
"10.1.1.33/32",
|
||||
}
|
||||
for _, k := range keys {
|
||||
ok := r.InsertPrefix(netip.MustParsePrefix(k), 0)
|
||||
if ok {
|
||||
t.Errorf("unexpected update on insert %s", k)
|
||||
}
|
||||
}
|
||||
if r.Len(false) != len(keys) {
|
||||
t.Fatalf("bad len: %v %v", r.Len(false), len(keys))
|
||||
}
|
||||
|
||||
// match exact prefix
|
||||
path := []string{}
|
||||
r.WalkPath(netip.MustParsePrefix("10.1.1.32/26"), func(k netip.Prefix, v int) bool {
|
||||
path = append(path, k.String())
|
||||
return false
|
||||
})
|
||||
if !cmp.Equal(path, keys[:4]) {
|
||||
t.Errorf("Walkpath expected %v got %v", keys[:4], path)
|
||||
}
|
||||
// not match on prefix
|
||||
path = []string{}
|
||||
r.WalkPath(netip.MustParsePrefix("10.1.1.33/26"), func(k netip.Prefix, v int) bool {
|
||||
path = append(path, k.String())
|
||||
return false
|
||||
})
|
||||
if !cmp.Equal(path, keys[:3]) {
|
||||
t.Errorf("Walkpath expected %v got %v", keys[:3], path)
|
||||
}
|
||||
// match exact prefix
|
||||
path = []string{}
|
||||
r.WalkPrefix(netip.MustParsePrefix("10.0.0.0/8"), func(k netip.Prefix, v int) bool {
|
||||
path = append(path, k.String())
|
||||
return false
|
||||
})
|
||||
if !cmp.Equal(path, keys) {
|
||||
t.Errorf("WalkPrefix expected %v got %v", keys, path)
|
||||
}
|
||||
// not match on prefix
|
||||
path = []string{}
|
||||
r.WalkPrefix(netip.MustParsePrefix("10.0.0.0/9"), func(k netip.Prefix, v int) bool {
|
||||
path = append(path, k.String())
|
||||
return false
|
||||
})
|
||||
if !cmp.Equal(path, keys[1:]) {
|
||||
t.Errorf("WalkPrefix expected %v got %v", keys[1:], path)
|
||||
}
|
||||
}
|
||||
|
||||
func TestWalkV6(t *testing.T) {
|
||||
r := New[int]()
|
||||
|
||||
keys := []string{
|
||||
"2001:db8::/48",
|
||||
"2001:db8::/64",
|
||||
"2001:db8::/96",
|
||||
"2001:db8::/112",
|
||||
"2001:db8::/128",
|
||||
}
|
||||
for _, k := range keys {
|
||||
ok := r.InsertPrefix(netip.MustParsePrefix(k), 0)
|
||||
if ok {
|
||||
t.Errorf("unexpected update on insert %s", k)
|
||||
}
|
||||
}
|
||||
if r.Len(true) != len(keys) {
|
||||
t.Fatalf("bad len: %v %v", r.Len(false), len(keys))
|
||||
}
|
||||
|
||||
// match exact prefix
|
||||
path := []string{}
|
||||
r.WalkPath(netip.MustParsePrefix("2001:db8::/112"), func(k netip.Prefix, v int) bool {
|
||||
path = append(path, k.String())
|
||||
return false
|
||||
})
|
||||
if !cmp.Equal(path, keys[:4]) {
|
||||
t.Errorf("Walkpath expected %v got %v", keys[:4], path)
|
||||
}
|
||||
// not match on prefix
|
||||
path = []string{}
|
||||
r.WalkPath(netip.MustParsePrefix("2001:db8::1/112"), func(k netip.Prefix, v int) bool {
|
||||
path = append(path, k.String())
|
||||
return false
|
||||
})
|
||||
if !cmp.Equal(path, keys[:3]) {
|
||||
t.Errorf("Walkpath expected %v got %v", keys[:3], path)
|
||||
}
|
||||
// match exact prefix
|
||||
path = []string{}
|
||||
r.WalkPrefix(netip.MustParsePrefix("2001:db8::/48"), func(k netip.Prefix, v int) bool {
|
||||
path = append(path, k.String())
|
||||
return false
|
||||
})
|
||||
if !cmp.Equal(path, keys) {
|
||||
t.Errorf("WalkPrefix expected %v got %v", keys, path)
|
||||
}
|
||||
// not match on prefix
|
||||
path = []string{}
|
||||
r.WalkPrefix(netip.MustParsePrefix("2001:db8::/49"), func(k netip.Prefix, v int) bool {
|
||||
path = append(path, k.String())
|
||||
return false
|
||||
})
|
||||
if !cmp.Equal(path, keys[1:]) {
|
||||
t.Errorf("WalkPrefix expected %v got %v", keys[1:], path)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetHostIPPrefixMatches(t *testing.T) {
|
||||
r := New[int]()
|
||||
|
||||
keys := []string{
|
||||
"10.0.0.0/8",
|
||||
"10.21.0.0/16",
|
||||
"10.221.0.0/16",
|
||||
"10.1.2.3/32",
|
||||
"10.1.2.0/24",
|
||||
"192.168.0.0/24",
|
||||
"192.168.0.0/16",
|
||||
"2001:db8::/48",
|
||||
"2001:db8::/64",
|
||||
"2001:db8::/96",
|
||||
}
|
||||
for _, k := range keys {
|
||||
ok := r.InsertPrefix(netip.MustParsePrefix(k), 0)
|
||||
if ok {
|
||||
t.Errorf("unexpected update on insert %s", k)
|
||||
}
|
||||
}
|
||||
|
||||
type exp struct {
|
||||
inp string
|
||||
out []string
|
||||
}
|
||||
cases := []exp{
|
||||
{"192.168.0.3", []string{"192.168.0.0/24", "192.168.0.0/16"}},
|
||||
{"10.1.2.4", []string{"10.1.2.0/24", "10.0.0.0/8"}},
|
||||
{"10.1.2.0", []string{"10.0.0.0/8"}},
|
||||
{"10.1.2.255", []string{"10.0.0.0/8"}},
|
||||
{"192.168.0.0", []string{}},
|
||||
{"192.168.1.0", []string{"192.168.0.0/16"}},
|
||||
{"10.1.2.255", []string{"10.0.0.0/8"}},
|
||||
{"2001:db8::1", []string{"2001:db8::/96", "2001:db8::/64", "2001:db8::/48"}},
|
||||
{"2001:db8::", []string{}},
|
||||
{"2001:db8::ffff:ffff:ffff:ffff", []string{"2001:db8::/64", "2001:db8::/48"}},
|
||||
}
|
||||
for _, test := range cases {
|
||||
m := r.GetHostIPPrefixMatches(netip.MustParseAddr(test.inp))
|
||||
in := []netip.Prefix{}
|
||||
for k := range m {
|
||||
in = append(in, k)
|
||||
}
|
||||
out := []netip.Prefix{}
|
||||
for _, s := range test.out {
|
||||
out = append(out, netip.MustParsePrefix(s))
|
||||
}
|
||||
|
||||
// sort by prefix bits to avoid flakes
|
||||
sort.Slice(in, func(i, j int) bool { return in[i].Bits() < in[j].Bits() })
|
||||
sort.Slice(out, func(i, j int) bool { return out[i].Bits() < out[j].Bits() })
|
||||
if !reflect.DeepEqual(in, out) {
|
||||
t.Fatalf("mis-match: %v %v", in, out)
|
||||
}
|
||||
}
|
||||
|
||||
// not match
|
||||
_, _, ok := r.ShortestPrefixMatch(netip.MustParsePrefix("0.0.0.0/0"))
|
||||
if ok {
|
||||
t.Fatalf("match unexpected for 0.0.0.0/0")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_prefixContainIP(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
prefix netip.Prefix
|
||||
ip netip.Addr
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "IPv4 contains",
|
||||
prefix: netip.MustParsePrefix("192.168.0.0/24"),
|
||||
ip: netip.MustParseAddr("192.168.0.1"),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "IPv4 network address",
|
||||
prefix: netip.MustParsePrefix("192.168.0.0/24"),
|
||||
ip: netip.MustParseAddr("192.168.0.0"),
|
||||
},
|
||||
{
|
||||
name: "IPv4 broadcast address",
|
||||
prefix: netip.MustParsePrefix("192.168.0.0/24"),
|
||||
ip: netip.MustParseAddr("192.168.0.255"),
|
||||
},
|
||||
{
|
||||
name: "IPv4 does not contain",
|
||||
prefix: netip.MustParsePrefix("192.168.0.0/24"),
|
||||
ip: netip.MustParseAddr("192.168.1.2"),
|
||||
},
|
||||
{
|
||||
name: "IPv6 contains",
|
||||
prefix: netip.MustParsePrefix("2001:db2::/96"),
|
||||
ip: netip.MustParseAddr("2001:db2::1"),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "IPv6 network address",
|
||||
prefix: netip.MustParsePrefix("2001:db2::/96"),
|
||||
ip: netip.MustParseAddr("2001:db2::"),
|
||||
},
|
||||
{
|
||||
name: "IPv6 broadcast address",
|
||||
prefix: netip.MustParsePrefix("2001:db2::/96"),
|
||||
ip: netip.MustParseAddr("2001:db2::ffff:ffff"),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "IPv6 does not contain",
|
||||
prefix: netip.MustParsePrefix("2001:db2::/96"),
|
||||
ip: netip.MustParseAddr("2001:db2:1:2:3::1"),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := prefixContainIP(tt.prefix, tt.ip); got != tt.want {
|
||||
t.Errorf("prefixContainIP() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkInsertUpdate(b *testing.B) {
|
||||
r := New[bool]()
|
||||
ipList := generateRandomCIDRs(true, 20000).UnsortedList()
|
||||
for _, ip := range ipList {
|
||||
r.InsertPrefix(ip, true)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
r.InsertPrefix(ipList[n%len(ipList)], true)
|
||||
}
|
||||
}
|
||||
|
||||
func generateRandomCIDRs(is6 bool, number int) sets.Set[netip.Prefix] {
|
||||
n := 4
|
||||
if is6 {
|
||||
n = 16
|
||||
}
|
||||
cidrs := sets.Set[netip.Prefix]{}
|
||||
rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
for i := 0; i < number; i++ {
|
||||
bytes := make([]byte, n)
|
||||
for i := 0; i < n; i++ {
|
||||
bytes[i] = uint8(rand.Intn(255))
|
||||
}
|
||||
|
||||
ip, ok := netip.AddrFromSlice(bytes)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
bits := rand.Intn(n * 8)
|
||||
prefix := netip.PrefixFrom(ip, bits).Masked()
|
||||
if prefix.IsValid() {
|
||||
cidrs.Insert(prefix)
|
||||
}
|
||||
}
|
||||
return cidrs
|
||||
}
|
||||
@@ -369,6 +369,16 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding)
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "service-cidrs-controller"},
|
||||
Rules: []rbacv1.PolicyRule{
|
||||
rbacv1helpers.NewRule("get", "list", "watch", "patch", "update").Groups(networkingGroup).Resources("servicecidrs").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("patch", "update").Groups(networkingGroup).Resources("servicecidrs/finalizers").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("patch", "update").Groups(networkingGroup).Resources("servicecidrs/status").RuleOrDie(),
|
||||
rbacv1helpers.NewRule("get", "list", "watch").Groups(networkingGroup).Resources("ipaddresses").RuleOrDie(),
|
||||
eventsRule(),
|
||||
},
|
||||
})
|
||||
addControllerRole(&controllerRoles, &controllerRoleBindings, func() rbacv1.ClusterRole {
|
||||
role := rbacv1.ClusterRole{
|
||||
ObjectMeta: metav1.ObjectMeta{Name: saRolePrefix + "statefulset-controller"},
|
||||
|
||||
@@ -476,6 +476,23 @@ items:
|
||||
- kind: ServiceAccount
|
||||
name: service-account-controller
|
||||
namespace: kube-system
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:controller:service-cidrs-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: system:controller:service-cidrs-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: service-cidrs-controller
|
||||
namespace: kube-system
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
|
||||
@@ -1373,6 +1373,57 @@ items:
|
||||
- create
|
||||
- patch
|
||||
- update
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
annotations:
|
||||
rbac.authorization.kubernetes.io/autoupdate: "true"
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
kubernetes.io/bootstrapping: rbac-defaults
|
||||
name: system:controller:service-cidrs-controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- servicecidrs
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- patch
|
||||
- update
|
||||
- watch
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- servicecidrs/finalizers
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- servicecidrs/status
|
||||
verbs:
|
||||
- patch
|
||||
- update
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- ipaddresses
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
- events.k8s.io
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- update
|
||||
- apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
io "io"
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
math "math"
|
||||
math_bits "math/bits"
|
||||
@@ -155,11 +156,127 @@ func (m *ParentReference) XXX_DiscardUnknown() {
|
||||
|
||||
var xxx_messageInfo_ParentReference proto.InternalMessageInfo
|
||||
|
||||
func (m *ServiceCIDR) Reset() { *m = ServiceCIDR{} }
|
||||
func (*ServiceCIDR) ProtoMessage() {}
|
||||
func (*ServiceCIDR) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_c1b7ac8d7d97acec, []int{4}
|
||||
}
|
||||
func (m *ServiceCIDR) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ServiceCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
func (m *ServiceCIDR) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ServiceCIDR.Merge(m, src)
|
||||
}
|
||||
func (m *ServiceCIDR) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ServiceCIDR) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ServiceCIDR.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ServiceCIDR proto.InternalMessageInfo
|
||||
|
||||
func (m *ServiceCIDRList) Reset() { *m = ServiceCIDRList{} }
|
||||
func (*ServiceCIDRList) ProtoMessage() {}
|
||||
func (*ServiceCIDRList) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_c1b7ac8d7d97acec, []int{5}
|
||||
}
|
||||
func (m *ServiceCIDRList) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ServiceCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
func (m *ServiceCIDRList) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ServiceCIDRList.Merge(m, src)
|
||||
}
|
||||
func (m *ServiceCIDRList) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ServiceCIDRList) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ServiceCIDRList.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ServiceCIDRList proto.InternalMessageInfo
|
||||
|
||||
func (m *ServiceCIDRSpec) Reset() { *m = ServiceCIDRSpec{} }
|
||||
func (*ServiceCIDRSpec) ProtoMessage() {}
|
||||
func (*ServiceCIDRSpec) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_c1b7ac8d7d97acec, []int{6}
|
||||
}
|
||||
func (m *ServiceCIDRSpec) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ServiceCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
func (m *ServiceCIDRSpec) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ServiceCIDRSpec.Merge(m, src)
|
||||
}
|
||||
func (m *ServiceCIDRSpec) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ServiceCIDRSpec) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ServiceCIDRSpec.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ServiceCIDRSpec proto.InternalMessageInfo
|
||||
|
||||
func (m *ServiceCIDRStatus) Reset() { *m = ServiceCIDRStatus{} }
|
||||
func (*ServiceCIDRStatus) ProtoMessage() {}
|
||||
func (*ServiceCIDRStatus) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_c1b7ac8d7d97acec, []int{7}
|
||||
}
|
||||
func (m *ServiceCIDRStatus) XXX_Unmarshal(b []byte) error {
|
||||
return m.Unmarshal(b)
|
||||
}
|
||||
func (m *ServiceCIDRStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
b = b[:cap(b)]
|
||||
n, err := m.MarshalToSizedBuffer(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b[:n], nil
|
||||
}
|
||||
func (m *ServiceCIDRStatus) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_ServiceCIDRStatus.Merge(m, src)
|
||||
}
|
||||
func (m *ServiceCIDRStatus) XXX_Size() int {
|
||||
return m.Size()
|
||||
}
|
||||
func (m *ServiceCIDRStatus) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_ServiceCIDRStatus.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_ServiceCIDRStatus proto.InternalMessageInfo
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1alpha1.IPAddress")
|
||||
proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1alpha1.IPAddressList")
|
||||
proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1alpha1.IPAddressSpec")
|
||||
proto.RegisterType((*ParentReference)(nil), "k8s.io.api.networking.v1alpha1.ParentReference")
|
||||
proto.RegisterType((*ServiceCIDR)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDR")
|
||||
proto.RegisterType((*ServiceCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRList")
|
||||
proto.RegisterType((*ServiceCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRSpec")
|
||||
proto.RegisterType((*ServiceCIDRStatus)(nil), "k8s.io.api.networking.v1alpha1.ServiceCIDRStatus")
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -167,39 +284,48 @@ func init() {
|
||||
}
|
||||
|
||||
var fileDescriptor_c1b7ac8d7d97acec = []byte{
|
||||
// 509 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4d, 0x6f, 0xd3, 0x40,
|
||||
0x10, 0x8d, 0xdb, 0x44, 0x8a, 0xb7, 0x44, 0x80, 0x4f, 0x51, 0x0e, 0x9b, 0x28, 0x5c, 0x8a, 0x44,
|
||||
0x77, 0x49, 0x85, 0x10, 0x57, 0x22, 0xa4, 0xaa, 0x12, 0xb4, 0x95, 0xb9, 0xa1, 0x1e, 0xd8, 0x38,
|
||||
0x53, 0xc7, 0x18, 0xef, 0xae, 0x76, 0xd7, 0x41, 0xdc, 0xf8, 0x09, 0xfc, 0x1b, 0x4e, 0x70, 0xce,
|
||||
0xb1, 0xc7, 0x9e, 0x22, 0x62, 0xfe, 0x08, 0xda, 0x8d, 0x63, 0x57, 0x8d, 0xfa, 0x71, 0xf3, 0xbc,
|
||||
0x79, 0xef, 0xcd, 0xbc, 0x59, 0x19, 0x1d, 0xa5, 0x6f, 0x34, 0x49, 0x04, 0x4d, 0xf3, 0x09, 0x28,
|
||||
0x0e, 0x06, 0x34, 0x9d, 0x03, 0x9f, 0x0a, 0x45, 0xcb, 0x06, 0x93, 0x09, 0xe5, 0x60, 0xbe, 0x09,
|
||||
0x95, 0x26, 0x3c, 0xa6, 0xf3, 0x11, 0xfb, 0x2a, 0x67, 0x6c, 0x44, 0x63, 0xe0, 0xa0, 0x98, 0x81,
|
||||
0x29, 0x91, 0x4a, 0x18, 0x11, 0xe0, 0x35, 0x9f, 0x30, 0x99, 0x90, 0x9a, 0x4f, 0x36, 0xfc, 0xde,
|
||||
0x41, 0x9c, 0x98, 0x59, 0x3e, 0x21, 0x91, 0xc8, 0x68, 0x2c, 0x62, 0x41, 0x9d, 0x6c, 0x92, 0x5f,
|
||||
0xb8, 0xca, 0x15, 0xee, 0x6b, 0x6d, 0xd7, 0x7b, 0x55, 0x8f, 0xcf, 0x58, 0x34, 0x4b, 0x38, 0xa8,
|
||||
0xef, 0x54, 0xa6, 0xb1, 0x05, 0x34, 0xcd, 0xc0, 0x30, 0x3a, 0xdf, 0x5a, 0xa2, 0x47, 0x6f, 0x53,
|
||||
0xa9, 0x9c, 0x9b, 0x24, 0x83, 0x2d, 0xc1, 0xeb, 0xfb, 0x04, 0x3a, 0x9a, 0x41, 0xc6, 0x6e, 0xea,
|
||||
0x86, 0x7f, 0x3c, 0xe4, 0x1f, 0x9f, 0xbd, 0x9d, 0x4e, 0x15, 0x68, 0x1d, 0x7c, 0x46, 0x6d, 0xbb,
|
||||
0xd1, 0x94, 0x19, 0xd6, 0xf5, 0x06, 0xde, 0xfe, 0xde, 0xe1, 0x4b, 0x52, 0x9f, 0xa3, 0x32, 0x26,
|
||||
0x32, 0x8d, 0x2d, 0xa0, 0x89, 0x65, 0x93, 0xf9, 0x88, 0x9c, 0x4e, 0xbe, 0x40, 0x64, 0x3e, 0x80,
|
||||
0x61, 0xe3, 0x60, 0xb1, 0xec, 0x37, 0x8a, 0x65, 0x1f, 0xd5, 0x58, 0x58, 0xb9, 0x06, 0xa7, 0xa8,
|
||||
0xa9, 0x25, 0x44, 0xdd, 0x1d, 0xe7, 0x7e, 0x40, 0xee, 0x3e, 0x36, 0xa9, 0x56, 0xfb, 0x28, 0x21,
|
||||
0x1a, 0x3f, 0x2a, 0xad, 0x9b, 0xb6, 0x0a, 0x9d, 0xd1, 0xf0, 0xb7, 0x87, 0x3a, 0x15, 0xeb, 0x7d,
|
||||
0xa2, 0x4d, 0x70, 0xbe, 0x15, 0x82, 0x3c, 0x2c, 0x84, 0x55, 0xbb, 0x08, 0x4f, 0xca, 0x39, 0xed,
|
||||
0x0d, 0x72, 0x2d, 0xc0, 0x09, 0x6a, 0x25, 0x06, 0x32, 0xdd, 0xdd, 0x19, 0xec, 0xee, 0xef, 0x1d,
|
||||
0x3e, 0x7f, 0x70, 0x82, 0x71, 0xa7, 0x74, 0x6d, 0x1d, 0x5b, 0x7d, 0xb8, 0xb6, 0x19, 0x66, 0xd7,
|
||||
0xd6, 0xb7, 0xb1, 0x82, 0x73, 0xe4, 0x4b, 0xa6, 0x80, 0x9b, 0x10, 0x2e, 0xca, 0xfd, 0xe9, 0x7d,
|
||||
0x43, 0xce, 0x36, 0x02, 0x50, 0xc0, 0x23, 0x18, 0x77, 0x8a, 0x65, 0xdf, 0xaf, 0xc0, 0xb0, 0x36,
|
||||
0x1c, 0xfe, 0xf2, 0xd0, 0xe3, 0x1b, 0xec, 0xe0, 0x19, 0x6a, 0xc5, 0x4a, 0xe4, 0xd2, 0x4d, 0xf3,
|
||||
0xeb, 0x3d, 0x8f, 0x2c, 0x18, 0xae, 0x7b, 0xc1, 0x0b, 0xd4, 0x56, 0xa0, 0x45, 0xae, 0x22, 0x70,
|
||||
0x8f, 0xe7, 0xd7, 0x57, 0x0a, 0x4b, 0x3c, 0xac, 0x18, 0x01, 0x45, 0x3e, 0x67, 0x19, 0x68, 0xc9,
|
||||
0x22, 0xe8, 0xee, 0x3a, 0xfa, 0xd3, 0x92, 0xee, 0x9f, 0x6c, 0x1a, 0x61, 0xcd, 0x09, 0x06, 0xa8,
|
||||
0x69, 0x8b, 0x6e, 0xd3, 0x71, 0xab, 0x87, 0xb6, 0xdc, 0xd0, 0x75, 0xc6, 0xef, 0x16, 0x2b, 0xdc,
|
||||
0xb8, 0x5c, 0xe1, 0xc6, 0xd5, 0x0a, 0x37, 0x7e, 0x14, 0xd8, 0x5b, 0x14, 0xd8, 0xbb, 0x2c, 0xb0,
|
||||
0x77, 0x55, 0x60, 0xef, 0x6f, 0x81, 0xbd, 0x9f, 0xff, 0x70, 0xe3, 0x13, 0xbe, 0xfb, 0x6f, 0xff,
|
||||
0x1f, 0x00, 0x00, 0xff, 0xff, 0xde, 0x6a, 0x6d, 0x5e, 0x27, 0x04, 0x00, 0x00,
|
||||
// 648 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcd, 0x6e, 0xd3, 0x4c,
|
||||
0x14, 0x8d, 0xdb, 0xa4, 0xaa, 0x27, 0x5f, 0xbf, 0x52, 0xaf, 0xa2, 0x2e, 0x9c, 0x28, 0x6c, 0x8a,
|
||||
0xa0, 0x33, 0x24, 0x42, 0x88, 0x2d, 0x6e, 0xa5, 0xaa, 0x12, 0xb4, 0x65, 0xba, 0x02, 0x75, 0xc1,
|
||||
0xc4, 0xbe, 0x75, 0x4c, 0xf0, 0x8f, 0x66, 0xc6, 0x01, 0x76, 0x3c, 0x02, 0x2f, 0xc0, 0x73, 0xb0,
|
||||
0x02, 0x89, 0x5d, 0x97, 0x5d, 0x76, 0x55, 0x51, 0xf3, 0x22, 0x68, 0xc6, 0x8e, 0x9d, 0x34, 0xea,
|
||||
0xdf, 0xa6, 0x3b, 0xcf, 0xb9, 0xe7, 0x9c, 0xb9, 0xe7, 0xce, 0x8c, 0x8c, 0x76, 0x46, 0x2f, 0x04,
|
||||
0x0e, 0x62, 0x32, 0x4a, 0x07, 0xc0, 0x23, 0x90, 0x20, 0xc8, 0x18, 0x22, 0x2f, 0xe6, 0xa4, 0x28,
|
||||
0xb0, 0x24, 0x20, 0x11, 0xc8, 0x4f, 0x31, 0x1f, 0x05, 0x91, 0x4f, 0xc6, 0x3d, 0xf6, 0x31, 0x19,
|
||||
0xb2, 0x1e, 0xf1, 0x21, 0x02, 0xce, 0x24, 0x78, 0x38, 0xe1, 0xb1, 0x8c, 0x2d, 0x3b, 0xe7, 0x63,
|
||||
0x96, 0x04, 0xb8, 0xe2, 0xe3, 0x09, 0x7f, 0x7d, 0xd3, 0x0f, 0xe4, 0x30, 0x1d, 0x60, 0x37, 0x0e,
|
||||
0x89, 0x1f, 0xfb, 0x31, 0xd1, 0xb2, 0x41, 0x7a, 0xac, 0x57, 0x7a, 0xa1, 0xbf, 0x72, 0xbb, 0xf5,
|
||||
0x67, 0xd5, 0xf6, 0x21, 0x73, 0x87, 0x41, 0x04, 0xfc, 0x0b, 0x49, 0x46, 0xbe, 0x02, 0x04, 0x09,
|
||||
0x41, 0x32, 0x32, 0x9e, 0x6b, 0x62, 0x9d, 0x5c, 0xa5, 0xe2, 0x69, 0x24, 0x83, 0x10, 0xe6, 0x04,
|
||||
0xcf, 0x6f, 0x12, 0x08, 0x77, 0x08, 0x21, 0xbb, 0xac, 0xeb, 0xfe, 0x32, 0x90, 0xb9, 0x7b, 0xf0,
|
||||
0xd2, 0xf3, 0x38, 0x08, 0x61, 0xbd, 0x47, 0xcb, 0xaa, 0x23, 0x8f, 0x49, 0xd6, 0x32, 0x3a, 0xc6,
|
||||
0x46, 0xb3, 0xff, 0x14, 0x57, 0xe3, 0x28, 0x8d, 0x71, 0x32, 0xf2, 0x15, 0x20, 0xb0, 0x62, 0xe3,
|
||||
0x71, 0x0f, 0xef, 0x0f, 0x3e, 0x80, 0x2b, 0x5f, 0x83, 0x64, 0x8e, 0x75, 0x72, 0xde, 0xae, 0x65,
|
||||
0xe7, 0x6d, 0x54, 0x61, 0xb4, 0x74, 0xb5, 0xf6, 0x51, 0x5d, 0x24, 0xe0, 0xb6, 0x16, 0xb4, 0xfb,
|
||||
0x26, 0xbe, 0x7e, 0xd8, 0xb8, 0x6c, 0xed, 0x30, 0x01, 0xd7, 0xf9, 0xaf, 0xb0, 0xae, 0xab, 0x15,
|
||||
0xd5, 0x46, 0xdd, 0x9f, 0x06, 0x5a, 0x29, 0x59, 0xaf, 0x02, 0x21, 0xad, 0xa3, 0xb9, 0x10, 0xf8,
|
||||
0x76, 0x21, 0x94, 0x5a, 0x47, 0x78, 0x50, 0xec, 0xb3, 0x3c, 0x41, 0xa6, 0x02, 0xec, 0xa1, 0x46,
|
||||
0x20, 0x21, 0x14, 0xad, 0x85, 0xce, 0xe2, 0x46, 0xb3, 0xff, 0xe8, 0xd6, 0x09, 0x9c, 0x95, 0xc2,
|
||||
0xb5, 0xb1, 0xab, 0xf4, 0x34, 0xb7, 0xe9, 0x86, 0x53, 0xed, 0xab, 0x58, 0xd6, 0x11, 0x32, 0x13,
|
||||
0xc6, 0x21, 0x92, 0x14, 0x8e, 0x8b, 0xfe, 0xc9, 0x4d, 0x9b, 0x1c, 0x4c, 0x04, 0xc0, 0x21, 0x72,
|
||||
0xc1, 0x59, 0xc9, 0xce, 0xdb, 0x66, 0x09, 0xd2, 0xca, 0xb0, 0xfb, 0xc3, 0x40, 0xab, 0x97, 0xd8,
|
||||
0xd6, 0x43, 0xd4, 0xf0, 0x79, 0x9c, 0x26, 0x7a, 0x37, 0xb3, 0xea, 0x73, 0x47, 0x81, 0x34, 0xaf,
|
||||
0x59, 0x4f, 0xd0, 0x32, 0x07, 0x11, 0xa7, 0xdc, 0x05, 0x7d, 0x78, 0x66, 0x35, 0x25, 0x5a, 0xe0,
|
||||
0xb4, 0x64, 0x58, 0x04, 0x99, 0x11, 0x0b, 0x41, 0x24, 0xcc, 0x85, 0xd6, 0xa2, 0xa6, 0xaf, 0x15,
|
||||
0x74, 0x73, 0x6f, 0x52, 0xa0, 0x15, 0xc7, 0xea, 0xa0, 0xba, 0x5a, 0xb4, 0xea, 0x9a, 0x5b, 0x1e,
|
||||
0xb4, 0xe2, 0x52, 0x5d, 0xe9, 0x7e, 0x5f, 0x40, 0xcd, 0x43, 0xe0, 0xe3, 0xc0, 0x85, 0xad, 0xdd,
|
||||
0x6d, 0x7a, 0x0f, 0x77, 0xf5, 0xcd, 0xcc, 0x5d, 0xbd, 0xf1, 0x10, 0xa6, 0x9a, 0xbb, 0xea, 0xb6,
|
||||
0x5a, 0x6f, 0xd1, 0x92, 0x90, 0x4c, 0xa6, 0x42, 0x0f, 0xa5, 0xd9, 0xef, 0xdd, 0xc5, 0x54, 0x0b,
|
||||
0x9d, 0xff, 0x0b, 0xdb, 0xa5, 0x7c, 0x4d, 0x0b, 0xc3, 0xee, 0x6f, 0x03, 0xad, 0x4e, 0xb1, 0xef,
|
||||
0xe1, 0x29, 0x1c, 0xcc, 0x3e, 0x85, 0xc7, 0x77, 0xc8, 0x72, 0xc5, 0x63, 0xe8, 0xcf, 0x44, 0xd0,
|
||||
0xcf, 0xa1, 0x8d, 0x1a, 0x6e, 0xe0, 0x71, 0xd1, 0x32, 0x3a, 0x8b, 0x1b, 0xa6, 0x63, 0x2a, 0x8d,
|
||||
0x2a, 0x0a, 0x9a, 0xe3, 0xdd, 0xcf, 0x68, 0x6d, 0x6e, 0x48, 0x96, 0x8b, 0x90, 0x1b, 0x47, 0x5e,
|
||||
0x20, 0x83, 0x38, 0xca, 0xa5, 0xb3, 0x07, 0x78, 0x4d, 0xf4, 0xad, 0x89, 0xae, 0xba, 0x1d, 0x25,
|
||||
0x24, 0xe8, 0x94, 0xad, 0xb3, 0x7d, 0x72, 0x61, 0xd7, 0x4e, 0x2f, 0xec, 0xda, 0xd9, 0x85, 0x5d,
|
||||
0xfb, 0x9a, 0xd9, 0xc6, 0x49, 0x66, 0x1b, 0xa7, 0x99, 0x6d, 0x9c, 0x65, 0xb6, 0xf1, 0x27, 0xb3,
|
||||
0x8d, 0x6f, 0x7f, 0xed, 0xda, 0x3b, 0xfb, 0xfa, 0xff, 0xcf, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff,
|
||||
0x29, 0x82, 0x11, 0x57, 0xb9, 0x06, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *IPAddress) Marshal() (dAtA []byte, err error) {
|
||||
@@ -370,6 +496,175 @@ func (m *ParentReference) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *ServiceCIDR) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ServiceCIDR) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ServiceCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
{
|
||||
size, err := m.Status.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x1a
|
||||
{
|
||||
size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
{
|
||||
size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *ServiceCIDRList) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ServiceCIDRList) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ServiceCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Items) > 0 {
|
||||
for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
}
|
||||
{
|
||||
size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *ServiceCIDRSpec) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ServiceCIDRSpec) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ServiceCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.CIDRs) > 0 {
|
||||
for iNdEx := len(m.CIDRs) - 1; iNdEx >= 0; iNdEx-- {
|
||||
i -= len(m.CIDRs[iNdEx])
|
||||
copy(dAtA[i:], m.CIDRs[iNdEx])
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDRs[iNdEx])))
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func (m *ServiceCIDRStatus) Marshal() (dAtA []byte, err error) {
|
||||
size := m.Size()
|
||||
dAtA = make([]byte, size)
|
||||
n, err := m.MarshalToSizedBuffer(dAtA[:size])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dAtA[:n], nil
|
||||
}
|
||||
|
||||
func (m *ServiceCIDRStatus) MarshalTo(dAtA []byte) (int, error) {
|
||||
size := m.Size()
|
||||
return m.MarshalToSizedBuffer(dAtA[:size])
|
||||
}
|
||||
|
||||
func (m *ServiceCIDRStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i := len(dAtA)
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Conditions) > 0 {
|
||||
for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
offset -= sovGenerated(v)
|
||||
base := offset
|
||||
@@ -441,6 +736,68 @@ func (m *ParentReference) Size() (n int) {
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ServiceCIDR) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = m.ObjectMeta.Size()
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
l = m.Spec.Size()
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
l = m.Status.Size()
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ServiceCIDRList) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
l = m.ListMeta.Size()
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
if len(m.Items) > 0 {
|
||||
for _, e := range m.Items {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ServiceCIDRSpec) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.CIDRs) > 0 {
|
||||
for _, s := range m.CIDRs {
|
||||
l = len(s)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func (m *ServiceCIDRStatus) Size() (n int) {
|
||||
if m == nil {
|
||||
return 0
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.Conditions) > 0 {
|
||||
for _, e := range m.Conditions {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovGenerated(x uint64) (n int) {
|
||||
return (math_bits.Len64(x|1) + 6) / 7
|
||||
}
|
||||
@@ -497,6 +854,59 @@ func (this *ParentReference) String() string {
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *ServiceCIDR) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&ServiceCIDR{`,
|
||||
`ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`,
|
||||
`Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceCIDRSpec", "ServiceCIDRSpec", 1), `&`, ``, 1) + `,`,
|
||||
`Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceCIDRStatus", "ServiceCIDRStatus", 1), `&`, ``, 1) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *ServiceCIDRList) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
repeatedStringForItems := "[]ServiceCIDR{"
|
||||
for _, f := range this.Items {
|
||||
repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceCIDR", "ServiceCIDR", 1), `&`, ``, 1) + ","
|
||||
}
|
||||
repeatedStringForItems += "}"
|
||||
s := strings.Join([]string{`&ServiceCIDRList{`,
|
||||
`ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`,
|
||||
`Items:` + repeatedStringForItems + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *ServiceCIDRSpec) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
s := strings.Join([]string{`&ServiceCIDRSpec{`,
|
||||
`CIDRs:` + fmt.Sprintf("%v", this.CIDRs) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func (this *ServiceCIDRStatus) String() string {
|
||||
if this == nil {
|
||||
return "nil"
|
||||
}
|
||||
repeatedStringForConditions := "[]Condition{"
|
||||
for _, f := range this.Conditions {
|
||||
repeatedStringForConditions += fmt.Sprintf("%v", f) + ","
|
||||
}
|
||||
repeatedStringForConditions += "}"
|
||||
s := strings.Join([]string{`&ServiceCIDRStatus{`,
|
||||
`Conditions:` + repeatedStringForConditions + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
}
|
||||
func valueToStringGenerated(v interface{}) string {
|
||||
rv := reflect.ValueOf(v)
|
||||
if rv.IsNil() {
|
||||
@@ -1002,6 +1412,438 @@ func (m *ParentReference) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ServiceCIDR) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ServiceCIDR: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ServiceCIDR: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ServiceCIDRList) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ServiceCIDRList: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ServiceCIDRList: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Items = append(m.Items, ServiceCIDR{})
|
||||
if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ServiceCIDRSpec) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ServiceCIDRSpec: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ServiceCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field CIDRs", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.CIDRs = append(m.CIDRs, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (m *ServiceCIDRStatus) Unmarshal(dAtA []byte) error {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= uint64(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: ServiceCIDRStatus: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: ServiceCIDRStatus: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Conditions = append(m.Conditions, v1.Condition{})
|
||||
if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if (skippy < 0) || (iNdEx+skippy) < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipGenerated(dAtA []byte) (n int, err error) {
|
||||
l := len(dAtA)
|
||||
iNdEx := 0
|
||||
|
||||
@@ -85,3 +85,54 @@ message ParentReference {
|
||||
optional string name = 4;
|
||||
}
|
||||
|
||||
// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
|
||||
// This range is used to allocate ClusterIPs to Service objects.
|
||||
message ServiceCIDR {
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// spec is the desired state of the ServiceCIDR.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
optional ServiceCIDRSpec spec = 2;
|
||||
|
||||
// status represents the current state of the ServiceCIDR.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
optional ServiceCIDRStatus status = 3;
|
||||
}
|
||||
|
||||
// ServiceCIDRList contains a list of ServiceCIDR objects.
|
||||
message ServiceCIDRList {
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// items is the list of ServiceCIDRs.
|
||||
repeated ServiceCIDR items = 2;
|
||||
}
|
||||
|
||||
// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.
|
||||
message ServiceCIDRSpec {
|
||||
// CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
|
||||
// from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
repeated string cidrs = 1;
|
||||
}
|
||||
|
||||
// ServiceCIDRStatus describes the current state of the ServiceCIDR.
|
||||
message ServiceCIDRStatus {
|
||||
// conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
|
||||
// Current service state
|
||||
// +optional
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
repeated k8s.io.apimachinery.pkg.apis.meta.v1.Condition conditions = 1;
|
||||
}
|
||||
|
||||
|
||||
@@ -54,6 +54,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&IPAddress{},
|
||||
&IPAddressList{},
|
||||
&ServiceCIDR{},
|
||||
&ServiceCIDRList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
|
||||
@@ -81,3 +81,70 @@ type IPAddressList struct {
|
||||
// items is the list of IPAddresses.
|
||||
Items []IPAddress `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.27
|
||||
|
||||
// ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64).
|
||||
// This range is used to allocate ClusterIPs to Service objects.
|
||||
type ServiceCIDR struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// spec is the desired state of the ServiceCIDR.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Spec ServiceCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
// status represents the current state of the ServiceCIDR.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Status ServiceCIDRStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
||||
// ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.
|
||||
type ServiceCIDRSpec struct {
|
||||
// CIDRs defines the IP blocks in CIDR notation (e.g. "192.168.0.0/24" or "2001:db8::/64")
|
||||
// from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family.
|
||||
// This field is immutable.
|
||||
// +optional
|
||||
CIDRs []string `json:"cidrs,omitempty" protobuf:"bytes,1,opt,name=cidrs"`
|
||||
}
|
||||
|
||||
const (
|
||||
// ServiceCIDRConditionReady represents status of a ServiceCIDR that is ready to be used by the
|
||||
// apiserver to allocate ClusterIPs for Services.
|
||||
ServiceCIDRConditionReady = "Ready"
|
||||
// ServiceCIDRReasonTerminating represents a reason where a ServiceCIDR is not ready because it is
|
||||
// being deleted.
|
||||
ServiceCIDRReasonTerminating = "Terminating"
|
||||
)
|
||||
|
||||
// ServiceCIDRStatus describes the current state of the ServiceCIDR.
|
||||
type ServiceCIDRStatus struct {
|
||||
// conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR.
|
||||
// Current service state
|
||||
// +optional
|
||||
// +patchMergeKey=type
|
||||
// +patchStrategy=merge
|
||||
// +listType=map
|
||||
// +listMapKey=type
|
||||
Conditions []metav1.Condition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
// +k8s:prerelease-lifecycle-gen:introduced=1.27
|
||||
|
||||
// ServiceCIDRList contains a list of ServiceCIDR objects.
|
||||
type ServiceCIDRList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
// items is the list of ServiceCIDRs.
|
||||
Items []ServiceCIDR `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
@@ -68,4 +68,43 @@ func (ParentReference) SwaggerDoc() map[string]string {
|
||||
return map_ParentReference
|
||||
}
|
||||
|
||||
var map_ServiceCIDR = map[string]string{
|
||||
"": "ServiceCIDR defines a range of IP addresses using CIDR format (e.g. 192.168.0.0/24 or 2001:db2::/64). This range is used to allocate ClusterIPs to Service objects.",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
"spec": "spec is the desired state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
|
||||
"status": "status represents the current state of the ServiceCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
|
||||
}
|
||||
|
||||
func (ServiceCIDR) SwaggerDoc() map[string]string {
|
||||
return map_ServiceCIDR
|
||||
}
|
||||
|
||||
var map_ServiceCIDRList = map[string]string{
|
||||
"": "ServiceCIDRList contains a list of ServiceCIDR objects.",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
"items": "items is the list of ServiceCIDRs.",
|
||||
}
|
||||
|
||||
func (ServiceCIDRList) SwaggerDoc() map[string]string {
|
||||
return map_ServiceCIDRList
|
||||
}
|
||||
|
||||
var map_ServiceCIDRSpec = map[string]string{
|
||||
"": "ServiceCIDRSpec define the CIDRs the user wants to use for allocating ClusterIPs for Services.",
|
||||
"cidrs": "CIDRs defines the IP blocks in CIDR notation (e.g. \"192.168.0.0/24\" or \"2001:db8::/64\") from which to assign service cluster IPs. Max of two CIDRs is allowed, one of each IP family. This field is immutable.",
|
||||
}
|
||||
|
||||
func (ServiceCIDRSpec) SwaggerDoc() map[string]string {
|
||||
return map_ServiceCIDRSpec
|
||||
}
|
||||
|
||||
var map_ServiceCIDRStatus = map[string]string{
|
||||
"": "ServiceCIDRStatus describes the current state of the ServiceCIDR.",
|
||||
"conditions": "conditions holds an array of metav1.Condition that describe the state of the ServiceCIDR. Current service state",
|
||||
}
|
||||
|
||||
func (ServiceCIDRStatus) SwaggerDoc() map[string]string {
|
||||
return map_ServiceCIDRStatus
|
||||
}
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS END HERE
|
||||
|
||||
@@ -22,6 +22,7 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
@@ -121,3 +122,108 @@ func (in *ParentReference) DeepCopy() *ParentReference {
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceCIDR) DeepCopyInto(out *ServiceCIDR) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDR.
|
||||
func (in *ServiceCIDR) DeepCopy() *ServiceCIDR {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceCIDR)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ServiceCIDR) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceCIDRList) DeepCopyInto(out *ServiceCIDRList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ListMeta.DeepCopyInto(&out.ListMeta)
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]ServiceCIDR, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRList.
|
||||
func (in *ServiceCIDRList) DeepCopy() *ServiceCIDRList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceCIDRList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *ServiceCIDRList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceCIDRSpec) DeepCopyInto(out *ServiceCIDRSpec) {
|
||||
*out = *in
|
||||
if in.CIDRs != nil {
|
||||
in, out := &in.CIDRs, &out.CIDRs
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRSpec.
|
||||
func (in *ServiceCIDRSpec) DeepCopy() *ServiceCIDRSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceCIDRSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceCIDRStatus) DeepCopyInto(out *ServiceCIDRStatus) {
|
||||
*out = *in
|
||||
if in.Conditions != nil {
|
||||
in, out := &in.Conditions, &out.Conditions
|
||||
*out = make([]v1.Condition, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceCIDRStatus.
|
||||
func (in *ServiceCIDRStatus) DeepCopy() *ServiceCIDRStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceCIDRStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
@@ -56,3 +56,39 @@ func (in *IPAddressList) APILifecycleDeprecated() (major, minor int) {
|
||||
func (in *IPAddressList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 33
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *ServiceCIDR) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 27
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *ServiceCIDR) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 30
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *ServiceCIDR) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 33
|
||||
}
|
||||
|
||||
// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go.
|
||||
func (in *ServiceCIDRList) APILifecycleIntroduced() (major, minor int) {
|
||||
return 1, 27
|
||||
}
|
||||
|
||||
// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor.
|
||||
func (in *ServiceCIDRList) APILifecycleDeprecated() (major, minor int) {
|
||||
return 1, 30
|
||||
}
|
||||
|
||||
// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison.
|
||||
// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor.
|
||||
func (in *ServiceCIDRList) APILifecycleRemoved() (major, minor int) {
|
||||
return 1, 33
|
||||
}
|
||||
|
||||
63
staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ServiceCIDR.json
vendored
Normal file
63
staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ServiceCIDR.json
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
{
|
||||
"kind": "ServiceCIDR",
|
||||
"apiVersion": "networking.k8s.io/v1alpha1",
|
||||
"metadata": {
|
||||
"name": "nameValue",
|
||||
"generateName": "generateNameValue",
|
||||
"namespace": "namespaceValue",
|
||||
"selfLink": "selfLinkValue",
|
||||
"uid": "uidValue",
|
||||
"resourceVersion": "resourceVersionValue",
|
||||
"generation": 7,
|
||||
"creationTimestamp": "2008-01-01T01:01:01Z",
|
||||
"deletionTimestamp": "2009-01-01T01:01:01Z",
|
||||
"deletionGracePeriodSeconds": 10,
|
||||
"labels": {
|
||||
"labelsKey": "labelsValue"
|
||||
},
|
||||
"annotations": {
|
||||
"annotationsKey": "annotationsValue"
|
||||
},
|
||||
"ownerReferences": [
|
||||
{
|
||||
"apiVersion": "apiVersionValue",
|
||||
"kind": "kindValue",
|
||||
"name": "nameValue",
|
||||
"uid": "uidValue",
|
||||
"controller": true,
|
||||
"blockOwnerDeletion": true
|
||||
}
|
||||
],
|
||||
"finalizers": [
|
||||
"finalizersValue"
|
||||
],
|
||||
"managedFields": [
|
||||
{
|
||||
"manager": "managerValue",
|
||||
"operation": "operationValue",
|
||||
"apiVersion": "apiVersionValue",
|
||||
"time": "2004-01-01T01:01:01Z",
|
||||
"fieldsType": "fieldsTypeValue",
|
||||
"fieldsV1": {},
|
||||
"subresource": "subresourceValue"
|
||||
}
|
||||
]
|
||||
},
|
||||
"spec": {
|
||||
"cidrs": [
|
||||
"cidrsValue"
|
||||
]
|
||||
},
|
||||
"status": {
|
||||
"conditions": [
|
||||
{
|
||||
"type": "typeValue",
|
||||
"status": "statusValue",
|
||||
"observedGeneration": 3,
|
||||
"lastTransitionTime": "2004-01-01T01:01:01Z",
|
||||
"reason": "reasonValue",
|
||||
"message": "messageValue"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
BIN
staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ServiceCIDR.pb
vendored
Normal file
BIN
staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ServiceCIDR.pb
vendored
Normal file
Binary file not shown.
45
staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ServiceCIDR.yaml
vendored
Normal file
45
staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ServiceCIDR.yaml
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
apiVersion: networking.k8s.io/v1alpha1
|
||||
kind: ServiceCIDR
|
||||
metadata:
|
||||
annotations:
|
||||
annotationsKey: annotationsValue
|
||||
creationTimestamp: "2008-01-01T01:01:01Z"
|
||||
deletionGracePeriodSeconds: 10
|
||||
deletionTimestamp: "2009-01-01T01:01:01Z"
|
||||
finalizers:
|
||||
- finalizersValue
|
||||
generateName: generateNameValue
|
||||
generation: 7
|
||||
labels:
|
||||
labelsKey: labelsValue
|
||||
managedFields:
|
||||
- apiVersion: apiVersionValue
|
||||
fieldsType: fieldsTypeValue
|
||||
fieldsV1: {}
|
||||
manager: managerValue
|
||||
operation: operationValue
|
||||
subresource: subresourceValue
|
||||
time: "2004-01-01T01:01:01Z"
|
||||
name: nameValue
|
||||
namespace: namespaceValue
|
||||
ownerReferences:
|
||||
- apiVersion: apiVersionValue
|
||||
blockOwnerDeletion: true
|
||||
controller: true
|
||||
kind: kindValue
|
||||
name: nameValue
|
||||
uid: uidValue
|
||||
resourceVersion: resourceVersionValue
|
||||
selfLink: selfLinkValue
|
||||
uid: uidValue
|
||||
spec:
|
||||
cidrs:
|
||||
- cidrsValue
|
||||
status:
|
||||
conditions:
|
||||
- lastTransitionTime: "2004-01-01T01:01:01Z"
|
||||
message: messageValue
|
||||
observedGeneration: 3
|
||||
reason: reasonValue
|
||||
status: statusValue
|
||||
type: typeValue
|
||||
@@ -10588,6 +10588,47 @@ var schemaYAML = typed.YAMLObject(`types:
|
||||
- name: resource
|
||||
type:
|
||||
scalar: string
|
||||
- name: io.k8s.api.networking.v1alpha1.ServiceCIDR
|
||||
map:
|
||||
fields:
|
||||
- name: apiVersion
|
||||
type:
|
||||
scalar: string
|
||||
- name: kind
|
||||
type:
|
||||
scalar: string
|
||||
- name: metadata
|
||||
type:
|
||||
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta
|
||||
default: {}
|
||||
- name: spec
|
||||
type:
|
||||
namedType: io.k8s.api.networking.v1alpha1.ServiceCIDRSpec
|
||||
default: {}
|
||||
- name: status
|
||||
type:
|
||||
namedType: io.k8s.api.networking.v1alpha1.ServiceCIDRStatus
|
||||
default: {}
|
||||
- name: io.k8s.api.networking.v1alpha1.ServiceCIDRSpec
|
||||
map:
|
||||
fields:
|
||||
- name: cidrs
|
||||
type:
|
||||
list:
|
||||
elementType:
|
||||
scalar: string
|
||||
elementRelationship: atomic
|
||||
- name: io.k8s.api.networking.v1alpha1.ServiceCIDRStatus
|
||||
map:
|
||||
fields:
|
||||
- name: conditions
|
||||
type:
|
||||
list:
|
||||
elementType:
|
||||
namedType: io.k8s.apimachinery.pkg.apis.meta.v1.Condition
|
||||
elementRelationship: associative
|
||||
keys:
|
||||
- type
|
||||
- name: io.k8s.api.networking.v1beta1.HTTPIngressPath
|
||||
map:
|
||||
fields:
|
||||
|
||||
@@ -0,0 +1,256 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by applyconfiguration-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
managedfields "k8s.io/apimachinery/pkg/util/managedfields"
|
||||
internal "k8s.io/client-go/applyconfigurations/internal"
|
||||
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
|
||||
)
|
||||
|
||||
// ServiceCIDRApplyConfiguration represents an declarative configuration of the ServiceCIDR type for use
|
||||
// with apply.
|
||||
type ServiceCIDRApplyConfiguration struct {
|
||||
v1.TypeMetaApplyConfiguration `json:",inline"`
|
||||
*v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"`
|
||||
Spec *ServiceCIDRSpecApplyConfiguration `json:"spec,omitempty"`
|
||||
Status *ServiceCIDRStatusApplyConfiguration `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// ServiceCIDR constructs an declarative configuration of the ServiceCIDR type for use with
|
||||
// apply.
|
||||
func ServiceCIDR(name string) *ServiceCIDRApplyConfiguration {
|
||||
b := &ServiceCIDRApplyConfiguration{}
|
||||
b.WithName(name)
|
||||
b.WithKind("ServiceCIDR")
|
||||
b.WithAPIVersion("networking.k8s.io/v1alpha1")
|
||||
return b
|
||||
}
|
||||
|
||||
// ExtractServiceCIDR extracts the applied configuration owned by fieldManager from
|
||||
// serviceCIDR. If no managedFields are found in serviceCIDR for fieldManager, a
|
||||
// ServiceCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable),
|
||||
// APIVersion and Kind populated. It is possible that no managed fields were found for because other
|
||||
// field managers have taken ownership of all the fields previously owned by fieldManager, or because
|
||||
// the fieldManager never owned fields any fields.
|
||||
// serviceCIDR must be a unmodified ServiceCIDR API object that was retrieved from the Kubernetes API.
|
||||
// ExtractServiceCIDR provides a way to perform a extract/modify-in-place/apply workflow.
|
||||
// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously
|
||||
// applied if another fieldManager has updated or force applied any of the previously applied fields.
|
||||
// Experimental!
|
||||
func ExtractServiceCIDR(serviceCIDR *networkingv1alpha1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) {
|
||||
return extractServiceCIDR(serviceCIDR, fieldManager, "")
|
||||
}
|
||||
|
||||
// ExtractServiceCIDRStatus is the same as ExtractServiceCIDR except
|
||||
// that it extracts the status subresource applied configuration.
|
||||
// Experimental!
|
||||
func ExtractServiceCIDRStatus(serviceCIDR *networkingv1alpha1.ServiceCIDR, fieldManager string) (*ServiceCIDRApplyConfiguration, error) {
|
||||
return extractServiceCIDR(serviceCIDR, fieldManager, "status")
|
||||
}
|
||||
|
||||
func extractServiceCIDR(serviceCIDR *networkingv1alpha1.ServiceCIDR, fieldManager string, subresource string) (*ServiceCIDRApplyConfiguration, error) {
|
||||
b := &ServiceCIDRApplyConfiguration{}
|
||||
err := managedfields.ExtractInto(serviceCIDR, internal.Parser().Type("io.k8s.api.networking.v1alpha1.ServiceCIDR"), fieldManager, b, subresource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.WithName(serviceCIDR.Name)
|
||||
|
||||
b.WithKind("ServiceCIDR")
|
||||
b.WithAPIVersion("networking.k8s.io/v1alpha1")
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// WithKind sets the Kind field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the Kind field is set to the value of the last call.
|
||||
func (b *ServiceCIDRApplyConfiguration) WithKind(value string) *ServiceCIDRApplyConfiguration {
|
||||
b.Kind = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the APIVersion field is set to the value of the last call.
|
||||
func (b *ServiceCIDRApplyConfiguration) WithAPIVersion(value string) *ServiceCIDRApplyConfiguration {
|
||||
b.APIVersion = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithName sets the Name field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the Name field is set to the value of the last call.
|
||||
func (b *ServiceCIDRApplyConfiguration) WithName(value string) *ServiceCIDRApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.Name = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithGenerateName sets the GenerateName field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the GenerateName field is set to the value of the last call.
|
||||
func (b *ServiceCIDRApplyConfiguration) WithGenerateName(value string) *ServiceCIDRApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.GenerateName = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithNamespace sets the Namespace field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the Namespace field is set to the value of the last call.
|
||||
func (b *ServiceCIDRApplyConfiguration) WithNamespace(value string) *ServiceCIDRApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.Namespace = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithUID sets the UID field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the UID field is set to the value of the last call.
|
||||
func (b *ServiceCIDRApplyConfiguration) WithUID(value types.UID) *ServiceCIDRApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.UID = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the ResourceVersion field is set to the value of the last call.
|
||||
func (b *ServiceCIDRApplyConfiguration) WithResourceVersion(value string) *ServiceCIDRApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.ResourceVersion = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithGeneration sets the Generation field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the Generation field is set to the value of the last call.
|
||||
func (b *ServiceCIDRApplyConfiguration) WithGeneration(value int64) *ServiceCIDRApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.Generation = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the CreationTimestamp field is set to the value of the last call.
|
||||
func (b *ServiceCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.CreationTimestamp = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the DeletionTimestamp field is set to the value of the last call.
|
||||
func (b *ServiceCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ServiceCIDRApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.DeletionTimestamp = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call.
|
||||
func (b *ServiceCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ServiceCIDRApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
b.DeletionGracePeriodSeconds = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithLabels puts the entries into the Labels field in the declarative configuration
|
||||
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||
// If called multiple times, the entries provided by each call will be put on the Labels field,
|
||||
// overwriting an existing map entries in Labels field with the same key.
|
||||
func (b *ServiceCIDRApplyConfiguration) WithLabels(entries map[string]string) *ServiceCIDRApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
if b.Labels == nil && len(entries) > 0 {
|
||||
b.Labels = make(map[string]string, len(entries))
|
||||
}
|
||||
for k, v := range entries {
|
||||
b.Labels[k] = v
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// WithAnnotations puts the entries into the Annotations field in the declarative configuration
|
||||
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||
// If called multiple times, the entries provided by each call will be put on the Annotations field,
|
||||
// overwriting an existing map entries in Annotations field with the same key.
|
||||
func (b *ServiceCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ServiceCIDRApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
if b.Annotations == nil && len(entries) > 0 {
|
||||
b.Annotations = make(map[string]string, len(entries))
|
||||
}
|
||||
for k, v := range entries {
|
||||
b.Annotations[k] = v
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration
|
||||
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||
// If called multiple times, values provided by each call will be appended to the OwnerReferences field.
|
||||
func (b *ServiceCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ServiceCIDRApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
for i := range values {
|
||||
if values[i] == nil {
|
||||
panic("nil value passed to WithOwnerReferences")
|
||||
}
|
||||
b.OwnerReferences = append(b.OwnerReferences, *values[i])
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// WithFinalizers adds the given value to the Finalizers field in the declarative configuration
|
||||
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||
// If called multiple times, values provided by each call will be appended to the Finalizers field.
|
||||
func (b *ServiceCIDRApplyConfiguration) WithFinalizers(values ...string) *ServiceCIDRApplyConfiguration {
|
||||
b.ensureObjectMetaApplyConfigurationExists()
|
||||
for i := range values {
|
||||
b.Finalizers = append(b.Finalizers, values[i])
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (b *ServiceCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists() {
|
||||
if b.ObjectMetaApplyConfiguration == nil {
|
||||
b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{}
|
||||
}
|
||||
}
|
||||
|
||||
// WithSpec sets the Spec field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the Spec field is set to the value of the last call.
|
||||
func (b *ServiceCIDRApplyConfiguration) WithSpec(value *ServiceCIDRSpecApplyConfiguration) *ServiceCIDRApplyConfiguration {
|
||||
b.Spec = value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithStatus sets the Status field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the Status field is set to the value of the last call.
|
||||
func (b *ServiceCIDRApplyConfiguration) WithStatus(value *ServiceCIDRStatusApplyConfiguration) *ServiceCIDRApplyConfiguration {
|
||||
b.Status = value
|
||||
return b
|
||||
}
|
||||
@@ -0,0 +1,41 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by applyconfiguration-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
// ServiceCIDRSpecApplyConfiguration represents an declarative configuration of the ServiceCIDRSpec type for use
|
||||
// with apply.
|
||||
type ServiceCIDRSpecApplyConfiguration struct {
|
||||
CIDRs []string `json:"cidrs,omitempty"`
|
||||
}
|
||||
|
||||
// ServiceCIDRSpecApplyConfiguration constructs an declarative configuration of the ServiceCIDRSpec type for use with
|
||||
// apply.
|
||||
func ServiceCIDRSpec() *ServiceCIDRSpecApplyConfiguration {
|
||||
return &ServiceCIDRSpecApplyConfiguration{}
|
||||
}
|
||||
|
||||
// WithCIDRs adds the given value to the CIDRs field in the declarative configuration
|
||||
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||
// If called multiple times, values provided by each call will be appended to the CIDRs field.
|
||||
func (b *ServiceCIDRSpecApplyConfiguration) WithCIDRs(values ...string) *ServiceCIDRSpecApplyConfiguration {
|
||||
for i := range values {
|
||||
b.CIDRs = append(b.CIDRs, values[i])
|
||||
}
|
||||
return b
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by applyconfiguration-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/client-go/applyconfigurations/meta/v1"
|
||||
)
|
||||
|
||||
// ServiceCIDRStatusApplyConfiguration represents an declarative configuration of the ServiceCIDRStatus type for use
|
||||
// with apply.
|
||||
type ServiceCIDRStatusApplyConfiguration struct {
|
||||
Conditions []v1.ConditionApplyConfiguration `json:"conditions,omitempty"`
|
||||
}
|
||||
|
||||
// ServiceCIDRStatusApplyConfiguration constructs an declarative configuration of the ServiceCIDRStatus type for use with
|
||||
// apply.
|
||||
func ServiceCIDRStatus() *ServiceCIDRStatusApplyConfiguration {
|
||||
return &ServiceCIDRStatusApplyConfiguration{}
|
||||
}
|
||||
|
||||
// WithConditions adds the given value to the Conditions field in the declarative configuration
|
||||
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||
// If called multiple times, values provided by each call will be appended to the Conditions field.
|
||||
func (b *ServiceCIDRStatusApplyConfiguration) WithConditions(values ...*v1.ConditionApplyConfiguration) *ServiceCIDRStatusApplyConfiguration {
|
||||
for i := range values {
|
||||
if values[i] == nil {
|
||||
panic("nil value passed to WithConditions")
|
||||
}
|
||||
b.Conditions = append(b.Conditions, *values[i])
|
||||
}
|
||||
return b
|
||||
}
|
||||
@@ -1349,6 +1349,12 @@ func ForKind(kind schema.GroupVersionKind) interface{} {
|
||||
return &applyconfigurationsnetworkingv1alpha1.IPAddressSpecApplyConfiguration{}
|
||||
case networkingv1alpha1.SchemeGroupVersion.WithKind("ParentReference"):
|
||||
return &applyconfigurationsnetworkingv1alpha1.ParentReferenceApplyConfiguration{}
|
||||
case networkingv1alpha1.SchemeGroupVersion.WithKind("ServiceCIDR"):
|
||||
return &applyconfigurationsnetworkingv1alpha1.ServiceCIDRApplyConfiguration{}
|
||||
case networkingv1alpha1.SchemeGroupVersion.WithKind("ServiceCIDRSpec"):
|
||||
return &applyconfigurationsnetworkingv1alpha1.ServiceCIDRSpecApplyConfiguration{}
|
||||
case networkingv1alpha1.SchemeGroupVersion.WithKind("ServiceCIDRStatus"):
|
||||
return &applyconfigurationsnetworkingv1alpha1.ServiceCIDRStatusApplyConfiguration{}
|
||||
|
||||
// Group=networking.k8s.io, Version=v1beta1
|
||||
case networkingv1beta1.SchemeGroupVersion.WithKind("HTTPIngressPath"):
|
||||
|
||||
@@ -298,6 +298,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
|
||||
// Group=networking.k8s.io, Version=v1alpha1
|
||||
case networkingv1alpha1.SchemeGroupVersion.WithResource("ipaddresses"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().IPAddresses().Informer()}, nil
|
||||
case networkingv1alpha1.SchemeGroupVersion.WithResource("servicecidrs"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().ServiceCIDRs().Informer()}, nil
|
||||
|
||||
// Group=networking.k8s.io, Version=v1beta1
|
||||
case networkingv1beta1.SchemeGroupVersion.WithResource("ingresses"):
|
||||
|
||||
@@ -26,6 +26,8 @@ import (
|
||||
type Interface interface {
|
||||
// IPAddresses returns a IPAddressInformer.
|
||||
IPAddresses() IPAddressInformer
|
||||
// ServiceCIDRs returns a ServiceCIDRInformer.
|
||||
ServiceCIDRs() ServiceCIDRInformer
|
||||
}
|
||||
|
||||
type version struct {
|
||||
@@ -43,3 +45,8 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
|
||||
func (v *version) IPAddresses() IPAddressInformer {
|
||||
return &iPAddressInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
// ServiceCIDRs returns a ServiceCIDRInformer.
|
||||
func (v *version) ServiceCIDRs() ServiceCIDRInformer {
|
||||
return &serviceCIDRInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
time "time"
|
||||
|
||||
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
internalinterfaces "k8s.io/client-go/informers/internalinterfaces"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
v1alpha1 "k8s.io/client-go/listers/networking/v1alpha1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// ServiceCIDRInformer provides access to a shared informer and lister for
|
||||
// ServiceCIDRs.
|
||||
type ServiceCIDRInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1alpha1.ServiceCIDRLister
|
||||
}
|
||||
|
||||
type serviceCIDRInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
}
|
||||
|
||||
// NewServiceCIDRInformer constructs a new informer for ServiceCIDR type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredServiceCIDRInformer(client, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredServiceCIDRInformer constructs a new informer for ServiceCIDR type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredServiceCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.NetworkingV1alpha1().ServiceCIDRs().List(context.TODO(), options)
|
||||
},
|
||||
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.NetworkingV1alpha1().ServiceCIDRs().Watch(context.TODO(), options)
|
||||
},
|
||||
},
|
||||
&networkingv1alpha1.ServiceCIDR{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *serviceCIDRInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredServiceCIDRInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *serviceCIDRInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&networkingv1alpha1.ServiceCIDR{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *serviceCIDRInformer) Lister() v1alpha1.ServiceCIDRLister {
|
||||
return v1alpha1.NewServiceCIDRLister(f.Informer().GetIndexer())
|
||||
}
|
||||
@@ -32,6 +32,10 @@ func (c *FakeNetworkingV1alpha1) IPAddresses() v1alpha1.IPAddressInterface {
|
||||
return &FakeIPAddresses{c}
|
||||
}
|
||||
|
||||
func (c *FakeNetworkingV1alpha1) ServiceCIDRs() v1alpha1.ServiceCIDRInterface {
|
||||
return &FakeServiceCIDRs{c}
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
// with API server by this client implementation.
|
||||
func (c *FakeNetworkingV1alpha1) RESTClient() rest.Interface {
|
||||
|
||||
@@ -0,0 +1,178 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
"context"
|
||||
json "encoding/json"
|
||||
"fmt"
|
||||
|
||||
v1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1"
|
||||
testing "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
// FakeServiceCIDRs implements ServiceCIDRInterface
|
||||
type FakeServiceCIDRs struct {
|
||||
Fake *FakeNetworkingV1alpha1
|
||||
}
|
||||
|
||||
var servicecidrsResource = v1alpha1.SchemeGroupVersion.WithResource("servicecidrs")
|
||||
|
||||
var servicecidrsKind = v1alpha1.SchemeGroupVersion.WithKind("ServiceCIDR")
|
||||
|
||||
// Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any.
|
||||
func (c *FakeServiceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ServiceCIDR, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootGetAction(servicecidrsResource, name), &v1alpha1.ServiceCIDR{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.ServiceCIDR), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors.
|
||||
func (c *FakeServiceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ServiceCIDRList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootListAction(servicecidrsResource, servicecidrsKind, opts), &v1alpha1.ServiceCIDRList{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &v1alpha1.ServiceCIDRList{ListMeta: obj.(*v1alpha1.ServiceCIDRList).ListMeta}
|
||||
for _, item := range obj.(*v1alpha1.ServiceCIDRList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested serviceCIDRs.
|
||||
func (c *FakeServiceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewRootWatchAction(servicecidrsResource, opts))
|
||||
}
|
||||
|
||||
// Create takes the representation of a serviceCIDR and creates it. Returns the server's representation of the serviceCIDR, and an error, if there is any.
|
||||
func (c *FakeServiceCIDRs) Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (result *v1alpha1.ServiceCIDR, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootCreateAction(servicecidrsResource, serviceCIDR), &v1alpha1.ServiceCIDR{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.ServiceCIDR), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any.
|
||||
func (c *FakeServiceCIDRs) Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootUpdateAction(servicecidrsResource, serviceCIDR), &v1alpha1.ServiceCIDR{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.ServiceCIDR), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeServiceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootUpdateSubresourceAction(servicecidrsResource, "status", serviceCIDR), &v1alpha1.ServiceCIDR{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.ServiceCIDR), err
|
||||
}
|
||||
|
||||
// Delete takes name of the serviceCIDR and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeServiceCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewRootDeleteActionWithOptions(servicecidrsResource, name, opts), &v1alpha1.ServiceCIDR{})
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeServiceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
action := testing.NewRootDeleteCollectionAction(servicecidrsResource, listOpts)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &v1alpha1.ServiceCIDRList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched serviceCIDR.
|
||||
func (c *FakeServiceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, name, pt, data, subresources...), &v1alpha1.ServiceCIDR{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.ServiceCIDR), err
|
||||
}
|
||||
|
||||
// Apply takes the given apply declarative configuration, applies it and returns the applied serviceCIDR.
|
||||
func (c *FakeServiceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) {
|
||||
if serviceCIDR == nil {
|
||||
return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil")
|
||||
}
|
||||
data, err := json.Marshal(serviceCIDR)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
name := serviceCIDR.Name
|
||||
if name == nil {
|
||||
return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply")
|
||||
}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, *name, types.ApplyPatchType, data), &v1alpha1.ServiceCIDR{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.ServiceCIDR), err
|
||||
}
|
||||
|
||||
// ApplyStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
|
||||
func (c *FakeServiceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) {
|
||||
if serviceCIDR == nil {
|
||||
return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil")
|
||||
}
|
||||
data, err := json.Marshal(serviceCIDR)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
name := serviceCIDR.Name
|
||||
if name == nil {
|
||||
return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply")
|
||||
}
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewRootPatchSubresourceAction(servicecidrsResource, *name, types.ApplyPatchType, data, "status"), &v1alpha1.ServiceCIDR{})
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*v1alpha1.ServiceCIDR), err
|
||||
}
|
||||
@@ -19,3 +19,5 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
type IPAddressExpansion interface{}
|
||||
|
||||
type ServiceCIDRExpansion interface{}
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
type NetworkingV1alpha1Interface interface {
|
||||
RESTClient() rest.Interface
|
||||
IPAddressesGetter
|
||||
ServiceCIDRsGetter
|
||||
}
|
||||
|
||||
// NetworkingV1alpha1Client is used to interact with features provided by the networking.k8s.io group.
|
||||
@@ -40,6 +41,10 @@ func (c *NetworkingV1alpha1Client) IPAddresses() IPAddressInterface {
|
||||
return newIPAddresses(c)
|
||||
}
|
||||
|
||||
func (c *NetworkingV1alpha1Client) ServiceCIDRs() ServiceCIDRInterface {
|
||||
return newServiceCIDRs(c)
|
||||
}
|
||||
|
||||
// NewForConfig creates a new NetworkingV1alpha1Client for the given config.
|
||||
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
|
||||
// where httpClient was generated with rest.HTTPClientFor(c).
|
||||
|
||||
@@ -0,0 +1,243 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"context"
|
||||
json "encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1"
|
||||
scheme "k8s.io/client-go/kubernetes/scheme"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// ServiceCIDRsGetter has a method to return a ServiceCIDRInterface.
|
||||
// A group's client should implement this interface.
|
||||
type ServiceCIDRsGetter interface {
|
||||
ServiceCIDRs() ServiceCIDRInterface
|
||||
}
|
||||
|
||||
// ServiceCIDRInterface has methods to work with ServiceCIDR resources.
|
||||
type ServiceCIDRInterface interface {
|
||||
Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (*v1alpha1.ServiceCIDR, error)
|
||||
Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error)
|
||||
UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (*v1alpha1.ServiceCIDR, error)
|
||||
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
|
||||
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
|
||||
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ServiceCIDR, error)
|
||||
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ServiceCIDRList, error)
|
||||
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
|
||||
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error)
|
||||
Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error)
|
||||
ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error)
|
||||
ServiceCIDRExpansion
|
||||
}
|
||||
|
||||
// serviceCIDRs implements ServiceCIDRInterface
|
||||
type serviceCIDRs struct {
|
||||
client rest.Interface
|
||||
}
|
||||
|
||||
// newServiceCIDRs returns a ServiceCIDRs
|
||||
func newServiceCIDRs(c *NetworkingV1alpha1Client) *serviceCIDRs {
|
||||
return &serviceCIDRs{
|
||||
client: c.RESTClient(),
|
||||
}
|
||||
}
|
||||
|
||||
// Get takes name of the serviceCIDR, and returns the corresponding serviceCIDR object, and an error if there is any.
|
||||
func (c *serviceCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ServiceCIDR, err error) {
|
||||
result = &v1alpha1.ServiceCIDR{}
|
||||
err = c.client.Get().
|
||||
Resource("servicecidrs").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of ServiceCIDRs that match those selectors.
|
||||
func (c *serviceCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ServiceCIDRList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1alpha1.ServiceCIDRList{}
|
||||
err = c.client.Get().
|
||||
Resource("servicecidrs").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested serviceCIDRs.
|
||||
func (c *serviceCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Resource("servicecidrs").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch(ctx)
|
||||
}
|
||||
|
||||
// Create takes the representation of a serviceCIDR and creates it. Returns the server's representation of the serviceCIDR, and an error, if there is any.
|
||||
func (c *serviceCIDRs) Create(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.CreateOptions) (result *v1alpha1.ServiceCIDR, err error) {
|
||||
result = &v1alpha1.ServiceCIDR{}
|
||||
err = c.client.Post().
|
||||
Resource("servicecidrs").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(serviceCIDR).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a serviceCIDR and updates it. Returns the server's representation of the serviceCIDR, and an error, if there is any.
|
||||
func (c *serviceCIDRs) Update(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) {
|
||||
result = &v1alpha1.ServiceCIDR{}
|
||||
err = c.client.Put().
|
||||
Resource("servicecidrs").
|
||||
Name(serviceCIDR.Name).
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(serviceCIDR).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *serviceCIDRs) UpdateStatus(ctx context.Context, serviceCIDR *v1alpha1.ServiceCIDR, opts v1.UpdateOptions) (result *v1alpha1.ServiceCIDR, err error) {
|
||||
result = &v1alpha1.ServiceCIDR{}
|
||||
err = c.client.Put().
|
||||
Resource("servicecidrs").
|
||||
Name(serviceCIDR.Name).
|
||||
SubResource("status").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(serviceCIDR).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the serviceCIDR and deletes it. Returns an error if one occurs.
|
||||
func (c *serviceCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Resource("servicecidrs").
|
||||
Name(name).
|
||||
Body(&opts).
|
||||
Do(ctx).
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *serviceCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOpts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Resource("servicecidrs").
|
||||
VersionedParams(&listOpts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(&opts).
|
||||
Do(ctx).
|
||||
Error()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched serviceCIDR.
|
||||
func (c *serviceCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ServiceCIDR, err error) {
|
||||
result = &v1alpha1.ServiceCIDR{}
|
||||
err = c.client.Patch(pt).
|
||||
Resource("servicecidrs").
|
||||
Name(name).
|
||||
SubResource(subresources...).
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Body(data).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Apply takes the given apply declarative configuration, applies it and returns the applied serviceCIDR.
|
||||
func (c *serviceCIDRs) Apply(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) {
|
||||
if serviceCIDR == nil {
|
||||
return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil")
|
||||
}
|
||||
patchOpts := opts.ToPatchOptions()
|
||||
data, err := json.Marshal(serviceCIDR)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
name := serviceCIDR.Name
|
||||
if name == nil {
|
||||
return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply")
|
||||
}
|
||||
result = &v1alpha1.ServiceCIDR{}
|
||||
err = c.client.Patch(types.ApplyPatchType).
|
||||
Resource("servicecidrs").
|
||||
Name(*name).
|
||||
VersionedParams(&patchOpts, scheme.ParameterCodec).
|
||||
Body(data).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// ApplyStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating ApplyStatus().
|
||||
func (c *serviceCIDRs) ApplyStatus(ctx context.Context, serviceCIDR *networkingv1alpha1.ServiceCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ServiceCIDR, err error) {
|
||||
if serviceCIDR == nil {
|
||||
return nil, fmt.Errorf("serviceCIDR provided to Apply must not be nil")
|
||||
}
|
||||
patchOpts := opts.ToPatchOptions()
|
||||
data, err := json.Marshal(serviceCIDR)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
name := serviceCIDR.Name
|
||||
if name == nil {
|
||||
return nil, fmt.Errorf("serviceCIDR.Name must be provided to Apply")
|
||||
}
|
||||
|
||||
result = &v1alpha1.ServiceCIDR{}
|
||||
err = c.client.Patch(types.ApplyPatchType).
|
||||
Resource("servicecidrs").
|
||||
Name(*name).
|
||||
SubResource("status").
|
||||
VersionedParams(&patchOpts, scheme.ParameterCodec).
|
||||
Body(data).
|
||||
Do(ctx).
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
@@ -21,3 +21,7 @@ package v1alpha1
|
||||
// IPAddressListerExpansion allows custom methods to be added to
|
||||
// IPAddressLister.
|
||||
type IPAddressListerExpansion interface{}
|
||||
|
||||
// ServiceCIDRListerExpansion allows custom methods to be added to
|
||||
// ServiceCIDRLister.
|
||||
type ServiceCIDRListerExpansion interface{}
|
||||
|
||||
@@ -0,0 +1,68 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by lister-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
v1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// ServiceCIDRLister helps list ServiceCIDRs.
|
||||
// All objects returned here must be treated as read-only.
|
||||
type ServiceCIDRLister interface {
|
||||
// List lists all ServiceCIDRs in the indexer.
|
||||
// Objects returned here must be treated as read-only.
|
||||
List(selector labels.Selector) (ret []*v1alpha1.ServiceCIDR, err error)
|
||||
// Get retrieves the ServiceCIDR from the index for a given name.
|
||||
// Objects returned here must be treated as read-only.
|
||||
Get(name string) (*v1alpha1.ServiceCIDR, error)
|
||||
ServiceCIDRListerExpansion
|
||||
}
|
||||
|
||||
// serviceCIDRLister implements the ServiceCIDRLister interface.
|
||||
type serviceCIDRLister struct {
|
||||
indexer cache.Indexer
|
||||
}
|
||||
|
||||
// NewServiceCIDRLister returns a new ServiceCIDRLister.
|
||||
func NewServiceCIDRLister(indexer cache.Indexer) ServiceCIDRLister {
|
||||
return &serviceCIDRLister{indexer: indexer}
|
||||
}
|
||||
|
||||
// List lists all ServiceCIDRs in the indexer.
|
||||
func (s *serviceCIDRLister) List(selector labels.Selector) (ret []*v1alpha1.ServiceCIDR, err error) {
|
||||
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*v1alpha1.ServiceCIDR))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// Get retrieves the ServiceCIDR from the index for a given name.
|
||||
func (s *serviceCIDRLister) Get(name string) (*v1alpha1.ServiceCIDR, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(v1alpha1.Resource("servicecidr"), name)
|
||||
}
|
||||
return obj.(*v1alpha1.ServiceCIDR), nil
|
||||
}
|
||||
@@ -215,6 +215,7 @@ func describerMap(clientConfig *rest.Config) (map[schema.GroupKind]ResourceDescr
|
||||
{Group: networkingv1beta1.GroupName, Kind: "IngressClass"}: &IngressClassDescriber{c},
|
||||
{Group: networkingv1.GroupName, Kind: "Ingress"}: &IngressDescriber{c},
|
||||
{Group: networkingv1.GroupName, Kind: "IngressClass"}: &IngressClassDescriber{c},
|
||||
{Group: networkingv1alpha1.GroupName, Kind: "ServiceCIDR"}: &ServiceCIDRDescriber{c},
|
||||
{Group: networkingv1alpha1.GroupName, Kind: "IPAddress"}: &IPAddressDescriber{c},
|
||||
{Group: batchv1.GroupName, Kind: "Job"}: &JobDescriber{c},
|
||||
{Group: batchv1.GroupName, Kind: "CronJob"}: &CronJobDescriber{c},
|
||||
@@ -2844,6 +2845,55 @@ func (i *IngressClassDescriber) describeIngressClassV1(ic *networkingv1.IngressC
|
||||
})
|
||||
}
|
||||
|
||||
// ServiceCIDRDescriber generates information about a ServiceCIDR.
|
||||
type ServiceCIDRDescriber struct {
|
||||
client clientset.Interface
|
||||
}
|
||||
|
||||
func (c *ServiceCIDRDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) {
|
||||
var events *corev1.EventList
|
||||
|
||||
svcV1alpha1, err := c.client.NetworkingV1alpha1().ServiceCIDRs().Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err == nil {
|
||||
if describerSettings.ShowEvents {
|
||||
events, _ = searchEvents(c.client.CoreV1(), svcV1alpha1, describerSettings.ChunkSize)
|
||||
}
|
||||
return c.describeServiceCIDRV1alpha1(svcV1alpha1, events)
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
func (c *ServiceCIDRDescriber) describeServiceCIDRV1alpha1(svc *networkingv1alpha1.ServiceCIDR, events *corev1.EventList) (string, error) {
|
||||
return tabbedString(func(out io.Writer) error {
|
||||
w := NewPrefixWriter(out)
|
||||
w.Write(LEVEL_0, "Name:\t%v\n", svc.Name)
|
||||
printLabelsMultiline(w, "Labels", svc.Labels)
|
||||
printAnnotationsMultiline(w, "Annotations", svc.Annotations)
|
||||
|
||||
w.Write(LEVEL_0, "CIDRs:\t%v\n", strings.Join(svc.Spec.CIDRs, ", "))
|
||||
|
||||
if len(svc.Status.Conditions) > 0 {
|
||||
w.Write(LEVEL_0, "Status:\n")
|
||||
w.Write(LEVEL_0, "Conditions:\n")
|
||||
w.Write(LEVEL_1, "Type\tStatus\tLastTransitionTime\tReason\tMessage\n")
|
||||
w.Write(LEVEL_1, "----\t------\t------------------\t------\t-------\n")
|
||||
for _, c := range svc.Status.Conditions {
|
||||
w.Write(LEVEL_1, "%v\t%v\t%s\t%v\t%v\n",
|
||||
c.Type,
|
||||
c.Status,
|
||||
c.LastTransitionTime.Time.Format(time.RFC1123Z),
|
||||
c.Reason,
|
||||
c.Message)
|
||||
}
|
||||
}
|
||||
|
||||
if events != nil {
|
||||
DescribeEvents(events, w)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// IPAddressDescriber generates information about an IPAddress.
|
||||
type IPAddressDescriber struct {
|
||||
client clientset.Interface
|
||||
|
||||
@@ -5932,6 +5932,77 @@ Events: <none>` + "\n",
|
||||
}
|
||||
}
|
||||
|
||||
func TestDescribeServiceCIDR(t *testing.T) {
|
||||
|
||||
testcases := map[string]struct {
|
||||
input *fake.Clientset
|
||||
output string
|
||||
}{
|
||||
"ServiceCIDR v1alpha1": {
|
||||
input: fake.NewSimpleClientset(&networkingv1alpha1.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo.123",
|
||||
},
|
||||
Spec: networkingv1alpha1.ServiceCIDRSpec{
|
||||
CIDRs: []string{"10.1.0.0/16", "fd00:1:1::/64"},
|
||||
},
|
||||
}),
|
||||
|
||||
output: `Name: foo.123
|
||||
Labels: <none>
|
||||
Annotations: <none>
|
||||
CIDRs: 10.1.0.0/16, fd00:1:1::/64
|
||||
Events: <none>` + "\n",
|
||||
},
|
||||
"ServiceCIDR v1alpha1 IPv4": {
|
||||
input: fake.NewSimpleClientset(&networkingv1alpha1.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo.123",
|
||||
},
|
||||
Spec: networkingv1alpha1.ServiceCIDRSpec{
|
||||
CIDRs: []string{"10.1.0.0/16"},
|
||||
},
|
||||
}),
|
||||
|
||||
output: `Name: foo.123
|
||||
Labels: <none>
|
||||
Annotations: <none>
|
||||
CIDRs: 10.1.0.0/16
|
||||
Events: <none>` + "\n",
|
||||
},
|
||||
"ServiceCIDR v1alpha1 IPv6": {
|
||||
input: fake.NewSimpleClientset(&networkingv1alpha1.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "foo.123",
|
||||
},
|
||||
Spec: networkingv1alpha1.ServiceCIDRSpec{
|
||||
CIDRs: []string{"fd00:1:1::/64"},
|
||||
},
|
||||
}),
|
||||
|
||||
output: `Name: foo.123
|
||||
Labels: <none>
|
||||
Annotations: <none>
|
||||
CIDRs: fd00:1:1::/64
|
||||
Events: <none>` + "\n",
|
||||
},
|
||||
}
|
||||
|
||||
for name, tc := range testcases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
c := &describeClient{T: t, Namespace: "foo", Interface: tc.input}
|
||||
d := ServiceCIDRDescriber{c}
|
||||
out, err := d.Describe("bar", "foo.123", DescriberSettings{ShowEvents: true})
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error: %v", err)
|
||||
}
|
||||
if out != tc.output {
|
||||
t.Errorf("expected :\n%s\nbut got output:\n%s diff:\n%s", tc.output, out, cmp.Diff(tc.output, out))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDescribeIPAddress(t *testing.T) {
|
||||
|
||||
testcases := map[string]struct {
|
||||
|
||||
116
test/e2e/network/service_cidrs.go
Normal file
116
test/e2e/network/service_cidrs.go
Normal file
@@ -0,0 +1,116 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/onsi/ginkgo/v2"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubernetes/test/e2e/framework"
|
||||
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
|
||||
e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
|
||||
e2eservice "k8s.io/kubernetes/test/e2e/framework/service"
|
||||
e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
|
||||
"k8s.io/kubernetes/test/e2e/network/common"
|
||||
admissionapi "k8s.io/pod-security-admission/api"
|
||||
)
|
||||
|
||||
var _ = common.SIGDescribe("[Feature:ServiceCIDRs]", func() {
|
||||
|
||||
fr := framework.NewDefaultFramework("servicecidrs")
|
||||
fr.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
||||
|
||||
var (
|
||||
cs clientset.Interface
|
||||
ns string
|
||||
)
|
||||
|
||||
ginkgo.BeforeEach(func(ctx context.Context) {
|
||||
cs = fr.ClientSet
|
||||
ns = fr.Namespace.Name
|
||||
|
||||
nodes, err := e2enode.GetBoundedReadySchedulableNodes(ctx, cs, 2)
|
||||
framework.ExpectNoError(err)
|
||||
if len(nodes.Items) < 2 {
|
||||
e2eskipper.Skipf(
|
||||
"Test requires >= 2 Ready nodes, but there are only %v nodes",
|
||||
len(nodes.Items))
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
ginkgo.It("should create Services and servce on different Service CIDRs", func(ctx context.Context) {
|
||||
// create a new service CIDR
|
||||
svcCIDR := &networkingv1alpha1.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test-svc-cidr",
|
||||
},
|
||||
Spec: networkingv1alpha1.ServiceCIDRSpec{
|
||||
CIDRs: []string{"10.196.196.0/24"},
|
||||
},
|
||||
}
|
||||
_, err := cs.NetworkingV1alpha1().ServiceCIDRs().Create(context.TODO(), svcCIDR, metav1.CreateOptions{})
|
||||
framework.ExpectNoError(err, "error creating ServiceCIDR")
|
||||
if pollErr := wait.PollUntilContextTimeout(ctx, framework.Poll, e2eservice.RespondingTimeout, false, func(ctx context.Context) (bool, error) {
|
||||
svcCIDR, err := cs.NetworkingV1alpha1().ServiceCIDRs().Get(ctx, svcCIDR.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return isReady(svcCIDR), nil
|
||||
}); pollErr != nil {
|
||||
framework.Failf("Failed to wait for serviceCIDR to be ready: %v", pollErr)
|
||||
}
|
||||
|
||||
serviceName := "cidr1-test"
|
||||
jig := e2eservice.NewTestJig(cs, ns, serviceName)
|
||||
|
||||
ginkgo.By("creating service " + serviceName + " with type=NodePort in namespace " + ns)
|
||||
nodePortService, err := jig.CreateTCPService(ctx, func(svc *v1.Service) {
|
||||
svc.Spec.ClusterIP = "10.196.196.77"
|
||||
svc.Spec.Type = v1.ServiceTypeNodePort
|
||||
svc.Spec.Ports = []v1.ServicePort{
|
||||
{Port: 80, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(9376)},
|
||||
}
|
||||
})
|
||||
framework.ExpectNoError(err)
|
||||
err = jig.CreateServicePods(ctx, 2)
|
||||
framework.ExpectNoError(err)
|
||||
execPod := e2epod.CreateExecPodOrFail(ctx, cs, ns, "execpod", nil)
|
||||
err = jig.CheckServiceReachability(ctx, nodePortService, execPod)
|
||||
framework.ExpectNoError(err)
|
||||
})
|
||||
|
||||
})
|
||||
|
||||
func isReady(serviceCIDR *networkingv1alpha1.ServiceCIDR) bool {
|
||||
if serviceCIDR == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, condition := range serviceCIDR.Status.Conditions {
|
||||
if condition.Type == string(networkingv1alpha1.ServiceCIDRConditionReady) {
|
||||
return condition.Status == metav1.ConditionStatus(metav1.ConditionTrue)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
@@ -65,6 +65,7 @@ var resetFieldsStatusData = map[schema.GroupVersionResource]string{
|
||||
// standard for []metav1.Condition
|
||||
gvr("admissionregistration.k8s.io", "v1alpha1", "validatingadmissionpolicies"): `{"status": {"conditions":[{"type":"Accepted","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"RuleApplied","message":"Rule was applied"}]}}`,
|
||||
gvr("admissionregistration.k8s.io", "v1beta1", "validatingadmissionpolicies"): `{"status": {"conditions":[{"type":"Accepted","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"RuleApplied","message":"Rule was applied"}]}}`,
|
||||
gvr("networking.k8s.io", "v1alpha1", "servicecidrs"): `{"status": {"conditions":[{"type":"Accepted","status":"True","lastTransitionTime":"2020-01-01T00:00:00Z","reason":"RuleApplied","message":"Rule was applied"}]}}`,
|
||||
}
|
||||
|
||||
// resetFieldsStatusDefault conflicts with statusDefault
|
||||
@@ -81,6 +82,9 @@ var noConflicts = map[string]struct{}{
|
||||
// storageVersions are skipped because their spec is empty
|
||||
// and thus they can never have a conflict.
|
||||
"storageversions": {},
|
||||
// servicecidrs are skipped because their spec is inmutable
|
||||
// and thus they can never have a conflict.
|
||||
"servicecidrs": {},
|
||||
// namespaces only have a spec.finalizers field which is also skipped,
|
||||
// thus it will never have a conflict.
|
||||
"namespaces": {},
|
||||
@@ -134,6 +138,7 @@ var resetFieldsSpecData = map[schema.GroupVersionResource]string{
|
||||
gvr("extensions", "v1beta1", "ingresses"): `{"spec": {"backend": {"serviceName": "service2"}}}`,
|
||||
gvr("networking.k8s.io", "v1beta1", "ingresses"): `{"spec": {"backend": {"serviceName": "service2"}}}`,
|
||||
gvr("networking.k8s.io", "v1", "ingresses"): `{"spec": {"defaultBackend": {"service": {"name": "service2"}}}}`,
|
||||
gvr("networking.k8s.io", "v1alpha1", "servicecidrs"): `{}`,
|
||||
gvr("policy", "v1", "poddisruptionbudgets"): `{"spec": {"selector": {"matchLabels": {"anokkey2": "anokvalue"}}}}`,
|
||||
gvr("policy", "v1beta1", "poddisruptionbudgets"): `{"spec": {"selector": {"matchLabels": {"anokkey2": "anokvalue"}}}}`,
|
||||
gvr("storage.k8s.io", "v1alpha1", "volumeattachments"): `{"metadata": {"name": "va3"}, "spec": {"nodeName": "localhost2"}}`,
|
||||
|
||||
@@ -210,6 +210,13 @@ func GetEtcdStorageDataForNamespace(namespace string) map[schema.GroupVersionRes
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/networking/v1alpha1
|
||||
gvr("networking.k8s.io", "v1alpha1", "servicecidrs"): {
|
||||
Stub: `{"metadata": {"name": "range1"}, "spec": {"cidrs": ["192.168.0.0/16","fd00:1::/120"]}}`,
|
||||
ExpectedEtcdPath: "/registry/servicecidrs/range1",
|
||||
},
|
||||
// --
|
||||
|
||||
// k8s.io/kubernetes/pkg/apis/policy/v1
|
||||
gvr("policy", "v1", "poddisruptionbudgets"): {
|
||||
Stub: `{"metadata": {"name": "pdbv1"}, "spec": {"selector": {"matchLabels": {"anokkey": "anokvalue"}}}}`,
|
||||
|
||||
285
test/integration/servicecidr/migration_test.go
Normal file
285
test/integration/servicecidr/migration_test.go
Normal file
@@ -0,0 +1,285 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package servicecidr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/pkg/controller/servicecidrs"
|
||||
"k8s.io/kubernetes/pkg/controlplane/controller/defaultservicecidr"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
// TestMigrateServiceCIDR validates the steps necessary to migrate a cluster default ServiceCIDR
|
||||
// including the existing kubernetes.default service.
|
||||
// 1. start apiserver with --service-cluster-ip-range 192.168.0.0/29"
|
||||
// 2. create services to use some addresses on the cidr
|
||||
// 3. create a temporary new ServiceCIDR 10.168.0.0/24 to migrate the cluster to it
|
||||
// 4. delete the default service CIDR so the allocators ignore it (it will be pending because of the finalizer and having still IPs)
|
||||
// 5. recreate the services, the allocator should pick the temporary ServiceCIDR
|
||||
// 6. start the new apiserver with the new ServiceCIDRs on the flags and shutdown the old one
|
||||
// 7. delete the kubernetes.default service, the new apiserver will recreate it within the new ServiceCIDR
|
||||
func TestMigrateServiceCIDR(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRServiceAllocator, true)()
|
||||
ctx, cancelFn := context.WithCancel(context.Background())
|
||||
defer cancelFn()
|
||||
|
||||
cidr1 := "192.168.0.0/29"
|
||||
cidr2 := "10.168.0.0/24"
|
||||
|
||||
etcdOptions := framework.SharedEtcd()
|
||||
apiServerOptions := kubeapiservertesting.NewDefaultTestServerOptions()
|
||||
s1 := kubeapiservertesting.StartTestServerOrDie(t,
|
||||
apiServerOptions,
|
||||
[]string{
|
||||
"--runtime-config=networking.k8s.io/v1alpha1=true",
|
||||
"--service-cluster-ip-range=" + cidr1,
|
||||
"--advertise-address=10.1.1.1",
|
||||
"--disable-admission-plugins=ServiceAccount",
|
||||
},
|
||||
etcdOptions)
|
||||
|
||||
client1, err := clientset.NewForConfig(s1.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
ns := framework.CreateNamespaceOrDie(client1, "test-migrate-service-cidr", t)
|
||||
|
||||
resyncPeriod := 12 * time.Hour
|
||||
informers1 := informers.NewSharedInformerFactory(client1, resyncPeriod)
|
||||
// ServiceCIDR controller
|
||||
go servicecidrs.NewController(
|
||||
informers1.Networking().V1alpha1().ServiceCIDRs(),
|
||||
informers1.Networking().V1alpha1().IPAddresses(),
|
||||
client1,
|
||||
).Run(ctx, 5)
|
||||
informers1.Start(ctx.Done())
|
||||
|
||||
// the default serviceCIDR should have a finalizer and ready condition set to true
|
||||
if err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, time.Minute, false, func(ctx context.Context) (bool, error) {
|
||||
cidr, err := client1.NetworkingV1alpha1().ServiceCIDRs().Get(context.TODO(), defaultservicecidr.DefaultServiceCIDRName, metav1.GetOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
if len(cidr.Finalizers) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return isServiceCIDRReady(cidr), nil
|
||||
}); err != nil {
|
||||
t.Fatalf("waiting for default service cidr ready condition set to false: %v", err)
|
||||
}
|
||||
|
||||
svc := func(i int) *v1.Service {
|
||||
return &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("svc-%v", i),
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeClusterIP,
|
||||
Ports: []v1.ServicePort{
|
||||
{Port: 80},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// make 2 services , there will be still 3 free addresses
|
||||
for i := 0; i < 2; i++ {
|
||||
if _, err := client1.CoreV1().Services(ns.Name).Create(context.TODO(), svc(i), metav1.CreateOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
// Add a new service CIDR to be able to migrate the apiserver
|
||||
if _, err := client1.NetworkingV1alpha1().ServiceCIDRs().Create(context.Background(), makeServiceCIDR("migration-cidr", cidr2, ""), metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("got unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// wait ServiceCIDR is ready
|
||||
if err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, time.Minute, false, func(ctx context.Context) (bool, error) {
|
||||
cidr, err := client1.NetworkingV1alpha1().ServiceCIDRs().Get(context.TODO(), "migration-cidr", metav1.GetOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
return isServiceCIDRReady(cidr), nil
|
||||
}); err != nil {
|
||||
t.Fatalf("waiting for default service cidr ready condition set to false: %v", err)
|
||||
}
|
||||
|
||||
// delete the default ServiceCIDR so is no longer used for allocating IPs
|
||||
if err := client1.NetworkingV1alpha1().ServiceCIDRs().Delete(context.Background(), defaultservicecidr.DefaultServiceCIDRName, metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("got unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// the default serviceCIDR should be pending deletion with Ready condition set to false
|
||||
if err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, time.Minute, false, func(ctx context.Context) (bool, error) {
|
||||
cidr, err := client1.NetworkingV1alpha1().ServiceCIDRs().Get(context.TODO(), defaultservicecidr.DefaultServiceCIDRName, metav1.GetOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
for _, condition := range cidr.Status.Conditions {
|
||||
if condition.Type == networkingv1alpha1.ServiceCIDRConditionReady {
|
||||
return condition.Status == metav1.ConditionFalse, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("waiting for default service cidr ready condition set to false: %v", err)
|
||||
}
|
||||
|
||||
// Migrate the services, delete the existing ones and recreate without specifying the ClusterIP
|
||||
services, err := client1.CoreV1().Services("").List(context.TODO(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for _, svc := range services.Items {
|
||||
// skip the default service since is managed by the apiserver
|
||||
// and we want the new apiserver with the new service cidr to take over
|
||||
if svc.Name == "kubernetes" {
|
||||
continue
|
||||
}
|
||||
// wipe the necessary fields so we can recreate the Service
|
||||
svc.ResourceVersion = ""
|
||||
svc.Spec.ClusterIP = ""
|
||||
svc.Spec.ClusterIPs = nil
|
||||
svc.Status = v1.ServiceStatus{}
|
||||
if err := client1.CoreV1().Services(svc.Namespace).Delete(context.Background(), svc.Name, metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("got unexpected error: %v", err)
|
||||
}
|
||||
svc, err := client1.CoreV1().Services(svc.Namespace).Create(context.Background(), &svc, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("got unexpected error: %v", err)
|
||||
}
|
||||
if !cidrContainsIP(cidr2, svc.Spec.ClusterIP) {
|
||||
t.Fatalf("Service expected to have an ip in range 10.168.0.0/24, got %s", svc.Spec.ClusterIP)
|
||||
}
|
||||
}
|
||||
|
||||
// start second apiserver with the new range and new service cidr controller
|
||||
s2 := kubeapiservertesting.StartTestServerOrDie(t,
|
||||
apiServerOptions,
|
||||
[]string{
|
||||
"--runtime-config=networking.k8s.io/v1alpha1=true",
|
||||
"--service-cluster-ip-range=" + cidr2,
|
||||
"--advertise-address=10.1.1.1",
|
||||
"--disable-admission-plugins=ServiceAccount",
|
||||
},
|
||||
etcdOptions)
|
||||
defer s2.TearDownFn()
|
||||
|
||||
client2, err := clientset.NewForConfig(s2.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
defer framework.DeleteNamespaceOrDie(client2, ns, t)
|
||||
|
||||
// switch the controller to the new apiserver
|
||||
cancelFn()
|
||||
s1.TearDownFn()
|
||||
|
||||
// ServiceCIDR controller
|
||||
ctx2, cancelFn2 := context.WithCancel(context.Background())
|
||||
defer cancelFn2()
|
||||
informers2 := informers.NewSharedInformerFactory(client2, resyncPeriod)
|
||||
go servicecidrs.NewController(
|
||||
informers2.Networking().V1alpha1().ServiceCIDRs(),
|
||||
informers2.Networking().V1alpha1().IPAddresses(),
|
||||
client2,
|
||||
).Run(ctx2, 5)
|
||||
informers2.Start(ctx2.Done())
|
||||
|
||||
// delete the kubernetes.default service so the old DefaultServiceCIDR can be deleted
|
||||
// and the new apiserver can take over
|
||||
if err := client2.CoreV1().Services(metav1.NamespaceDefault).Delete(context.Background(), "kubernetes", metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// the default serviceCIDR should be the new one
|
||||
if err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, time.Minute, false, func(ctx context.Context) (bool, error) {
|
||||
cidr, err := client2.NetworkingV1alpha1().ServiceCIDRs().Get(context.TODO(), defaultservicecidr.DefaultServiceCIDRName, metav1.GetOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if len(cidr.Spec.CIDRs) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if cidr.Spec.CIDRs[0] != cidr2 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if len(cidr.Finalizers) == 0 {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
for _, condition := range cidr.Status.Conditions {
|
||||
if condition.Type == networkingv1alpha1.ServiceCIDRConditionReady {
|
||||
return condition.Status == metav1.ConditionTrue, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("waiting for default service cidr ready condition set to true: %v", err)
|
||||
}
|
||||
|
||||
if err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, time.Minute, false, func(ctx context.Context) (bool, error) {
|
||||
svc, err := client2.CoreV1().Services(metav1.NamespaceDefault).Get(context.TODO(), "kubernetes", metav1.GetOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if svc.Spec.ClusterIP != "10.168.0.1" {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("waiting for default service kubernetes.default to be migrated: %v", err)
|
||||
}
|
||||
|
||||
// The temporary ServiceCIDR can be deleted now since the Default ServiceCIDR will cover it
|
||||
if err := client2.NetworkingV1alpha1().ServiceCIDRs().Delete(context.Background(), "migration-cidr", metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatalf("got unexpected error: %v", err)
|
||||
}
|
||||
|
||||
// wait ServiceCIDR no longer exist
|
||||
if err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, time.Minute, false, func(ctx context.Context) (bool, error) {
|
||||
_, err := client2.NetworkingV1alpha1().ServiceCIDRs().Get(context.TODO(), "migration-cidr", metav1.GetOptions{})
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("waiting for the migration service cidr to be deleted: %v", err)
|
||||
}
|
||||
|
||||
}
|
||||
354
test/integration/servicecidr/servicecidr_test.go
Normal file
354
test/integration/servicecidr/servicecidr_test.go
Normal file
@@ -0,0 +1,354 @@
|
||||
/*
|
||||
Copyright 2023 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package servicecidr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/netip"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
networkingv1alpha1 "k8s.io/api/networking/v1alpha1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
utilfeature "k8s.io/apiserver/pkg/util/feature"
|
||||
"k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
featuregatetesting "k8s.io/component-base/featuregate/testing"
|
||||
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
|
||||
"k8s.io/kubernetes/pkg/controller/servicecidrs"
|
||||
"k8s.io/kubernetes/pkg/features"
|
||||
"k8s.io/kubernetes/test/integration/framework"
|
||||
)
|
||||
|
||||
func TestServiceAllocNewServiceCIDR(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRServiceAllocator, true)()
|
||||
|
||||
etcdOptions := framework.SharedEtcd()
|
||||
apiServerOptions := kubeapiservertesting.NewDefaultTestServerOptions()
|
||||
s := kubeapiservertesting.StartTestServerOrDie(t,
|
||||
apiServerOptions,
|
||||
[]string{
|
||||
"--runtime-config=networking.k8s.io/v1alpha1=true",
|
||||
"--service-cluster-ip-range=192.168.0.0/29",
|
||||
"--advertise-address=10.1.1.1",
|
||||
"--disable-admission-plugins=ServiceAccount",
|
||||
},
|
||||
etcdOptions)
|
||||
defer s.TearDownFn()
|
||||
|
||||
client, err := kubernetes.NewForConfig(s.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
// ServiceCIDR controller
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
resyncPeriod := 12 * time.Hour
|
||||
informerFactory := informers.NewSharedInformerFactory(client, resyncPeriod)
|
||||
go servicecidrs.NewController(
|
||||
informerFactory.Networking().V1alpha1().ServiceCIDRs(),
|
||||
informerFactory.Networking().V1alpha1().IPAddresses(),
|
||||
client,
|
||||
).Run(ctx, 5)
|
||||
informerFactory.Start(ctx.Done())
|
||||
|
||||
// /29 = 6 services, kubernetes.default takes the first address
|
||||
// make 5 more services to take up all IPs
|
||||
for i := 0; i < 5; i++ {
|
||||
if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), makeService(fmt.Sprintf("service-%d", i)), metav1.CreateOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Make another service. It will fail because we're out of cluster IPs
|
||||
if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), makeService("fail"), metav1.CreateOptions{}); err != nil {
|
||||
if !strings.Contains(err.Error(), "range is full") {
|
||||
t.Fatalf("unexpected error text: %v", err)
|
||||
}
|
||||
} else {
|
||||
svcs, err := client.CoreV1().Services(metav1.NamespaceAll).List(context.Background(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error getting the services: %v", err)
|
||||
}
|
||||
allIPs := []string{}
|
||||
for _, s := range svcs.Items {
|
||||
allIPs = append(allIPs, s.Spec.ClusterIP)
|
||||
}
|
||||
t.Fatalf("unexpected creation success. The following IPs exist: %#v. It should only be possible to allocate 6 IP addresses in this cluster.\n\n%#v", allIPs, svcs)
|
||||
}
|
||||
|
||||
// Add a new service CIDR to be able to create new IPs.
|
||||
cidr := makeServiceCIDR("test2", "10.168.0.0/24", "")
|
||||
if _, err := client.NetworkingV1alpha1().ServiceCIDRs().Create(context.Background(), cidr, metav1.CreateOptions{}); err != nil {
|
||||
t.Fatalf("got unexpected error: %v", err)
|
||||
}
|
||||
// wait ServiceCIDR is ready
|
||||
if err := wait.PollUntilContextTimeout(context.Background(), 1*time.Second, time.Minute, false, func(ctx context.Context) (bool, error) {
|
||||
cidr, err := client.NetworkingV1alpha1().ServiceCIDRs().Get(context.TODO(), cidr.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return isServiceCIDRReady(cidr), nil
|
||||
}); err != nil {
|
||||
t.Fatalf("waiting for default service cidr ready condition set to false: %v", err)
|
||||
}
|
||||
// This time creating more Services should work
|
||||
for i := 10; i < 150; i++ {
|
||||
if _, err := client.CoreV1().Services(metav1.NamespaceDefault).Create(context.Background(), makeService(fmt.Sprintf("service-%d", i)), metav1.CreateOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A ServiceCIDR can be deleted if there are no orphan IPs or if the existing IPs are contained in other ServiceCIDR
|
||||
// that is not being deleted.
|
||||
// The test starts the apiserver with the range "192.168.0.0/29"
|
||||
// Create Services to fill the range
|
||||
// Creates a new ServiceCIDR cidr1 with the same range as the one defined in the apiserver
|
||||
// Deletes cidr1 object will work since its range is covered by the default ServiceCIDR created by the apiserver flags
|
||||
// Creates a new cidr2 with a different range than cidr1
|
||||
// Creates a new service so it picks an IPAddress on this range because "192.168.0.0/29" is full at this point
|
||||
// Creates a new cidr3 that contains cidr2
|
||||
// Deletes cidr2 since it is covered by cidr3
|
||||
// Tries to delete cidr3 but is blocked since there is an IPAddress
|
||||
// Deletes the Service with the IPAddress blocking the deletion
|
||||
// cidr3 must not exist at this point
|
||||
func TestServiceCIDRDeletion(t *testing.T) {
|
||||
defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRServiceAllocator, true)()
|
||||
cidr1 := "192.168.0.0/29" // same as the default
|
||||
cidr2 := "10.0.0.0/24" // new range
|
||||
cidr3 := "10.0.0.0/16" // contains cidr2
|
||||
|
||||
etcdOptions := framework.SharedEtcd()
|
||||
apiServerOptions := kubeapiservertesting.NewDefaultTestServerOptions()
|
||||
s := kubeapiservertesting.StartTestServerOrDie(t,
|
||||
apiServerOptions,
|
||||
[]string{
|
||||
"--runtime-config=networking.k8s.io/v1alpha1=true",
|
||||
"--service-cluster-ip-range=" + cidr1,
|
||||
"--advertise-address=172.16.1.1",
|
||||
"--disable-admission-plugins=ServiceAccount",
|
||||
},
|
||||
etcdOptions)
|
||||
defer s.TearDownFn()
|
||||
|
||||
client, err := kubernetes.NewForConfig(s.ClientConfig)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpected error: %v", err)
|
||||
}
|
||||
|
||||
ns := framework.CreateNamespaceOrDie(client, "test-service-cidr-deletion", t)
|
||||
defer framework.DeleteNamespaceOrDie(client, ns, t)
|
||||
|
||||
// ServiceCIDR controller
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
resyncPeriod := 12 * time.Hour
|
||||
informerFactory := informers.NewSharedInformerFactory(client, resyncPeriod)
|
||||
go servicecidrs.NewController(
|
||||
informerFactory.Networking().V1alpha1().ServiceCIDRs(),
|
||||
informerFactory.Networking().V1alpha1().IPAddresses(),
|
||||
client,
|
||||
).Run(ctx, 5)
|
||||
informerFactory.Start(ctx.Done())
|
||||
|
||||
// /29 = 6 services, kubernetes.default takes the first address
|
||||
// make 5 more services to take up all IPs
|
||||
for i := 0; i < 5; i++ {
|
||||
if _, err := client.CoreV1().Services(ns.Name).Create(context.Background(), makeService(fmt.Sprintf("service-%d", i)), metav1.CreateOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
// create a new ServiceCIDRs that overlaps the default one
|
||||
_, err = client.NetworkingV1alpha1().ServiceCIDRs().Create(ctx, makeServiceCIDR("cidr1", cidr1, ""), metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal((err))
|
||||
}
|
||||
// Wait until is ready.
|
||||
if err := wait.PollUntilContextTimeout(context.Background(), 250*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
cidr, err := client.NetworkingV1alpha1().ServiceCIDRs().Get(ctx, "cidr1", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return isServiceCIDRReady(cidr), nil
|
||||
}); err != nil {
|
||||
t.Fatalf("cidr1 is not ready")
|
||||
}
|
||||
// we should be able to delete the ServiceCIDR despite it contains IP addresses as it overlaps with the default ServiceCIDR
|
||||
err = client.NetworkingV1alpha1().ServiceCIDRs().Delete(ctx, "cidr1", metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
t.Fatal((err))
|
||||
}
|
||||
|
||||
if err := wait.PollUntilContextTimeout(context.Background(), 250*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
_, err := client.NetworkingV1alpha1().ServiceCIDRs().Get(ctx, "cidr1", metav1.GetOptions{})
|
||||
if err != nil && apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("cidr1 has not been deleted")
|
||||
}
|
||||
|
||||
// add a new ServiceCIDR with a new range
|
||||
_, err = client.NetworkingV1alpha1().ServiceCIDRs().Create(ctx, makeServiceCIDR("cidr2", cidr2, ""), metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal((err))
|
||||
}
|
||||
// wait the allocator process the new ServiceCIDR
|
||||
// Wait until is ready.
|
||||
if err := wait.PollUntilContextTimeout(context.Background(), 250*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
cidr, err := client.NetworkingV1alpha1().ServiceCIDRs().Get(ctx, "cidr2", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return isServiceCIDRReady(cidr), nil
|
||||
}); err != nil {
|
||||
t.Fatalf("cidr2 is not ready")
|
||||
}
|
||||
// create a new Service so it will take a new IP address from the new range
|
||||
svc, err := client.CoreV1().Services(ns.Name).Create(context.Background(), makeService("new-cidr-service"), metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !cidrContainsIP(cidr2, svc.Spec.ClusterIP) {
|
||||
t.Fatalf("Service %s expected to have an IP on range %s, got %s", svc.Name, cidr2, svc.Spec.ClusterIP)
|
||||
}
|
||||
|
||||
// add a new ServiceCIDR that overlaps the existing one
|
||||
_, err = client.NetworkingV1alpha1().ServiceCIDRs().Create(ctx, makeServiceCIDR("cidr3", cidr3, ""), metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
t.Fatal((err))
|
||||
}
|
||||
// Wait until is ready.
|
||||
if err := wait.PollUntilContextTimeout(context.Background(), 250*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
cidr, err := client.NetworkingV1alpha1().ServiceCIDRs().Get(ctx, "cidr3", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
return isServiceCIDRReady(cidr), nil
|
||||
}); err != nil {
|
||||
t.Fatalf("cidr3 is not ready")
|
||||
}
|
||||
// we should be able to delete the ServiceCIDR2 despite it contains IP addresses as it is contained on ServiceCIDR3
|
||||
err = client.NetworkingV1alpha1().ServiceCIDRs().Delete(ctx, "cidr2", metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
t.Fatal((err))
|
||||
}
|
||||
|
||||
if err := wait.PollUntilContextTimeout(context.Background(), 250*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
_, err := client.NetworkingV1alpha1().ServiceCIDRs().Get(ctx, "cidr2", metav1.GetOptions{})
|
||||
if err != nil && apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("cidr2 has not been deleted")
|
||||
}
|
||||
|
||||
// serviceCIDR3 will not be able to be deleted until the IPAddress is removed
|
||||
err = client.NetworkingV1alpha1().ServiceCIDRs().Delete(ctx, "cidr3", metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
t.Fatal((err))
|
||||
}
|
||||
// Wait until is not ready.
|
||||
if err := wait.PollUntilContextTimeout(context.Background(), 250*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
cidr, err := client.NetworkingV1alpha1().ServiceCIDRs().Get(ctx, "cidr3", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return false, nil
|
||||
}
|
||||
for _, condition := range cidr.Status.Conditions {
|
||||
if condition.Type == networkingv1alpha1.ServiceCIDRConditionReady {
|
||||
return condition.Status == metav1.ConditionStatus(metav1.ConditionFalse), nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("cidr3 is ready")
|
||||
}
|
||||
|
||||
// delete the service blocking the deletion
|
||||
if err := client.CoreV1().Services(ns.Name).Delete(context.Background(), "new-cidr-service", metav1.DeleteOptions{}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// cidr3 must not exist
|
||||
if err := wait.PollUntilContextTimeout(context.Background(), 250*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
|
||||
_, err := client.NetworkingV1alpha1().ServiceCIDRs().Get(ctx, "cidr3", metav1.GetOptions{})
|
||||
if err != nil && apierrors.IsNotFound(err) {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}); err != nil {
|
||||
t.Fatalf("cidr3 has not been deleted")
|
||||
}
|
||||
}
|
||||
|
||||
func makeServiceCIDR(name, primary, secondary string) *networkingv1alpha1.ServiceCIDR {
|
||||
serviceCIDR := &networkingv1alpha1.ServiceCIDR{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: networkingv1alpha1.ServiceCIDRSpec{},
|
||||
}
|
||||
serviceCIDR.Spec.CIDRs = append(serviceCIDR.Spec.CIDRs, primary)
|
||||
if secondary != "" {
|
||||
serviceCIDR.Spec.CIDRs = append(serviceCIDR.Spec.CIDRs, secondary)
|
||||
}
|
||||
return serviceCIDR
|
||||
}
|
||||
|
||||
func makeService(name string) *v1.Service {
|
||||
return &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: v1.ServiceTypeClusterIP,
|
||||
Ports: []v1.ServicePort{
|
||||
{Port: 80},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// returns true of the ServiceCIDRConditionReady is true
|
||||
func isServiceCIDRReady(serviceCIDR *networkingv1alpha1.ServiceCIDR) bool {
|
||||
if serviceCIDR == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, condition := range serviceCIDR.Status.Conditions {
|
||||
if condition.Type == networkingv1alpha1.ServiceCIDRConditionReady {
|
||||
return condition.Status == metav1.ConditionStatus(metav1.ConditionTrue)
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func cidrContainsIP(cidr, ip string) bool {
|
||||
prefix := netip.MustParsePrefix(cidr)
|
||||
address := netip.MustParseAddr(ip)
|
||||
return prefix.Contains(address)
|
||||
}
|
||||
Reference in New Issue
Block a user