diff --git a/api/api-rules/violation_exceptions.list b/api/api-rules/violation_exceptions.list index 4f7068c401e..c666da22f85 100644 --- a/api/api-rules/violation_exceptions.list +++ b/api/api-rules/violation_exceptions.list @@ -355,8 +355,6 @@ API rule violation: names_match,k8s.io/api/core/v1,RBDVolumeSource,RBDPool API rule violation: names_match,k8s.io/api/core/v1,RBDVolumeSource,RadosUser API rule violation: names_match,k8s.io/api/core/v1,VolumeSource,CephFS API rule violation: names_match,k8s.io/api/core/v1,VolumeSource,StorageOS -API rule violation: names_match,k8s.io/api/networking/v1alpha1,ClusterCIDRSpec,IPv4 -API rule violation: names_match,k8s.io/api/networking/v1alpha1,ClusterCIDRSpec,IPv6 API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Ref API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Schema API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,XEmbeddedResource diff --git a/api/discovery/aggregated_v2beta1.json b/api/discovery/aggregated_v2beta1.json index 69f29ef5f93..dd7c3f2649b 100644 --- a/api/discovery/aggregated_v2beta1.json +++ b/api/discovery/aggregated_v2beta1.json @@ -853,29 +853,6 @@ { "freshness": "Current", "resources": [ - { - "resource": "clustercidrs", - "responseKind": { - "group": "", - "kind": "ClusterCIDR", - "version": "" - }, - "scope": "Cluster", - "shortNames": [ - "cc" - ], - "singularResource": "clustercidr", - "verbs": [ - "create", - "delete", - "deletecollection", - "get", - "list", - "patch", - "update", - "watch" - ] - }, { "resource": "ipaddresses", "responseKind": { diff --git a/api/discovery/apis__networking.k8s.io__v1alpha1.json b/api/discovery/apis__networking.k8s.io__v1alpha1.json index 15eeefa973f..aac5d63a218 100644 --- a/api/discovery/apis__networking.k8s.io__v1alpha1.json +++ b/api/discovery/apis__networking.k8s.io__v1alpha1.json @@ -3,26 +3,6 @@ "groupVersion": "networking.k8s.io/v1alpha1", "kind": "APIResourceList", "resources": [ - { - "kind": "ClusterCIDR", - "name": "clustercidrs", - "namespaced": false, - "shortNames": [ - "cc" - ], - "singularName": "clustercidr", - "storageVersionHash": "iC0u25BTSsc=", - "verbs": [ - "create", - "delete", - "deletecollection", - "get", - "list", - "patch", - "update", - "watch" - ] - }, { "kind": "IPAddress", "name": "ipaddresses", diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 7972bdb94df..eac2e6e1e8f 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -13095,96 +13095,6 @@ }, "type": "object" }, - "io.k8s.api.networking.v1alpha1.ClusterCIDR": { - "description": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", - "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" - }, - "spec": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDRSpec", - "description": "spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" - } - }, - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - ] - }, - "io.k8s.api.networking.v1alpha1.ClusterCIDRList": { - "description": "ClusterCIDRList contains a list of ClusterCIDR.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "items": { - "description": "items is the list of ClusterCIDRs.", - "items": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - }, - "type": "array" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", - "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" - } - }, - "required": [ - "items" - ], - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "networking.k8s.io", - "kind": "ClusterCIDRList", - "version": "v1alpha1" - } - ] - }, - "io.k8s.api.networking.v1alpha1.ClusterCIDRSpec": { - "description": "ClusterCIDRSpec defines the desired state of ClusterCIDR.", - "properties": { - "ipv4": { - "description": "ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - "type": "string" - }, - "ipv6": { - "description": "ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - "type": "string" - }, - "nodeSelector": { - "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector", - "description": "nodeSelector defines which nodes the config is applicable to. An empty or nil nodeSelector selects all nodes. This field is immutable." - }, - "perNodeHostBits": { - "description": "perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", - "format": "int32", - "type": "integer" - } - }, - "required": [ - "perNodeHostBits" - ], - "type": "object" - }, "io.k8s.api.networking.v1alpha1.IPAddress": { "description": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", "properties": { @@ -60993,485 +60903,6 @@ ] } }, - "/apis/networking.k8s.io/v1alpha1/clustercidrs": { - "delete": { - "consumes": [ - "*/*" - ], - "description": "delete collection of ClusterCIDR", - "operationId": "deleteNetworkingV1alpha1CollectionClusterCIDR", - "parameters": [ - { - "$ref": "#/parameters/body-2Y1dVQaQ" - }, - { - "$ref": "#/parameters/continue-QfD61s0i" - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/fieldSelector-xIcQKXFG" - }, - { - "$ref": "#/parameters/gracePeriodSeconds--K5HaBOS" - }, - { - "$ref": "#/parameters/labelSelector-5Zw57w4C" - }, - { - "$ref": "#/parameters/limit-1NfNmdNH" - }, - { - "$ref": "#/parameters/orphanDependents-uRB25kX5" - }, - { - "$ref": "#/parameters/propagationPolicy-6jk3prlO" - }, - { - "$ref": "#/parameters/resourceVersion-5WAnf1kx" - }, - { - "$ref": "#/parameters/resourceVersionMatch-t8XhRHeC" - }, - { - "$ref": "#/parameters/sendInitialEvents-rLXlEK_k" - }, - { - "$ref": "#/parameters/timeoutSeconds-yvYezaOC" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "deletecollection", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "get": { - "consumes": [ - "*/*" - ], - "description": "list or watch objects of kind ClusterCIDR", - "operationId": "listNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "$ref": "#/parameters/allowWatchBookmarks-HC2hJt-J" - }, - { - "$ref": "#/parameters/continue-QfD61s0i" - }, - { - "$ref": "#/parameters/fieldSelector-xIcQKXFG" - }, - { - "$ref": "#/parameters/labelSelector-5Zw57w4C" - }, - { - "$ref": "#/parameters/limit-1NfNmdNH" - }, - { - "$ref": "#/parameters/resourceVersion-5WAnf1kx" - }, - { - "$ref": "#/parameters/resourceVersionMatch-t8XhRHeC" - }, - { - "$ref": "#/parameters/sendInitialEvents-rLXlEK_k" - }, - { - "$ref": "#/parameters/timeoutSeconds-yvYezaOC" - }, - { - "$ref": "#/parameters/watch-XNNPZGbK" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf", - "application/json;stream=watch", - "application/vnd.kubernetes.protobuf;stream=watch" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDRList" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "list", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "$ref": "#/parameters/pretty-nN7o5FEq" - } - ], - "post": { - "consumes": [ - "*/*" - ], - "description": "create a ClusterCIDR", - "operationId": "createNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "in": "body", - "name": "body", - "required": true, - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/fieldManager-Qy4HdaTW" - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "type": "string", - "uniqueItems": true - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "201": { - "description": "Created", - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "202": { - "description": "Accepted", - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "post", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - } - }, - "/apis/networking.k8s.io/v1alpha1/clustercidrs/{name}": { - "delete": { - "consumes": [ - "*/*" - ], - "description": "delete a ClusterCIDR", - "operationId": "deleteNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "$ref": "#/parameters/body-2Y1dVQaQ" - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/gracePeriodSeconds--K5HaBOS" - }, - { - "$ref": "#/parameters/orphanDependents-uRB25kX5" - }, - { - "$ref": "#/parameters/propagationPolicy-6jk3prlO" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "202": { - "description": "Accepted", - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "delete", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "get": { - "consumes": [ - "*/*" - ], - "description": "read the specified ClusterCIDR", - "operationId": "readNetworkingV1alpha1ClusterCIDR", - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "get", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "description": "name of the ClusterCIDR", - "in": "path", - "name": "name", - "required": true, - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/pretty-nN7o5FEq" - } - ], - "patch": { - "consumes": [ - "application/json-patch+json", - "application/merge-patch+json", - "application/strategic-merge-patch+json", - "application/apply-patch+yaml" - ], - "description": "partially update the specified ClusterCIDR", - "operationId": "patchNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "$ref": "#/parameters/body-78PwaGsr" - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/fieldManager-7c6nTn1T" - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/force-tOGGb0Yi" - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "201": { - "description": "Created", - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "patch", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "put": { - "consumes": [ - "*/*" - ], - "description": "replace the specified ClusterCIDR", - "operationId": "replaceNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "in": "body", - "name": "body", - "required": true, - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/fieldManager-Qy4HdaTW" - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "type": "string", - "uniqueItems": true - } - ], - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "201": { - "description": "Created", - "schema": { - "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "put", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - } - }, "/apis/networking.k8s.io/v1alpha1/ipaddresses": { "delete": { "consumes": [ @@ -61951,162 +61382,6 @@ } } }, - "/apis/networking.k8s.io/v1alpha1/watch/clustercidrs": { - "get": { - "consumes": [ - "*/*" - ], - "description": "watch individual changes to a list of ClusterCIDR. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchNetworkingV1alpha1ClusterCIDRList", - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf", - "application/json;stream=watch", - "application/vnd.kubernetes.protobuf;stream=watch" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "watchlist", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "$ref": "#/parameters/allowWatchBookmarks-HC2hJt-J" - }, - { - "$ref": "#/parameters/continue-QfD61s0i" - }, - { - "$ref": "#/parameters/fieldSelector-xIcQKXFG" - }, - { - "$ref": "#/parameters/labelSelector-5Zw57w4C" - }, - { - "$ref": "#/parameters/limit-1NfNmdNH" - }, - { - "$ref": "#/parameters/pretty-nN7o5FEq" - }, - { - "$ref": "#/parameters/resourceVersion-5WAnf1kx" - }, - { - "$ref": "#/parameters/resourceVersionMatch-t8XhRHeC" - }, - { - "$ref": "#/parameters/sendInitialEvents-rLXlEK_k" - }, - { - "$ref": "#/parameters/timeoutSeconds-yvYezaOC" - }, - { - "$ref": "#/parameters/watch-XNNPZGbK" - } - ] - }, - "/apis/networking.k8s.io/v1alpha1/watch/clustercidrs/{name}": { - "get": { - "consumes": [ - "*/*" - ], - "description": "watch changes to an object of kind ClusterCIDR. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", - "operationId": "watchNetworkingV1alpha1ClusterCIDR", - "produces": [ - "application/json", - "application/yaml", - "application/vnd.kubernetes.protobuf", - "application/json;stream=watch", - "application/vnd.kubernetes.protobuf;stream=watch" - ], - "responses": { - "200": { - "description": "OK", - "schema": { - "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "401": { - "description": "Unauthorized" - } - }, - "schemes": [ - "https" - ], - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "watch", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "$ref": "#/parameters/allowWatchBookmarks-HC2hJt-J" - }, - { - "$ref": "#/parameters/continue-QfD61s0i" - }, - { - "$ref": "#/parameters/fieldSelector-xIcQKXFG" - }, - { - "$ref": "#/parameters/labelSelector-5Zw57w4C" - }, - { - "$ref": "#/parameters/limit-1NfNmdNH" - }, - { - "description": "name of the ClusterCIDR", - "in": "path", - "name": "name", - "required": true, - "type": "string", - "uniqueItems": true - }, - { - "$ref": "#/parameters/pretty-nN7o5FEq" - }, - { - "$ref": "#/parameters/resourceVersion-5WAnf1kx" - }, - { - "$ref": "#/parameters/resourceVersionMatch-t8XhRHeC" - }, - { - "$ref": "#/parameters/sendInitialEvents-rLXlEK_k" - }, - { - "$ref": "#/parameters/timeoutSeconds-yvYezaOC" - }, - { - "$ref": "#/parameters/watch-XNNPZGbK" - } - ] - }, "/apis/networking.k8s.io/v1alpha1/watch/ipaddresses": { "get": { "consumes": [ diff --git a/api/openapi-spec/v3/apis__networking.k8s.io__v1alpha1_openapi.json b/api/openapi-spec/v3/apis__networking.k8s.io__v1alpha1_openapi.json index e89199164ff..543d2f03651 100644 --- a/api/openapi-spec/v3/apis__networking.k8s.io__v1alpha1_openapi.json +++ b/api/openapi-spec/v3/apis__networking.k8s.io__v1alpha1_openapi.json @@ -1,204 +1,6 @@ { "components": { "schemas": { - "io.k8s.api.core.v1.NodeSelector": { - "description": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.", - "properties": { - "nodeSelectorTerms": { - "description": "Required. A list of node selector terms. The terms are ORed.", - "items": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelectorTerm" - } - ], - "default": {} - }, - "type": "array" - } - }, - "required": [ - "nodeSelectorTerms" - ], - "type": "object", - "x-kubernetes-map-type": "atomic" - }, - "io.k8s.api.core.v1.NodeSelectorRequirement": { - "description": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - "properties": { - "key": { - "default": "", - "description": "The label key that the selector applies to.", - "type": "string" - }, - "operator": { - "default": "", - "description": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", - "type": "string" - }, - "values": { - "description": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", - "items": { - "default": "", - "type": "string" - }, - "type": "array" - } - }, - "required": [ - "key", - "operator" - ], - "type": "object" - }, - "io.k8s.api.core.v1.NodeSelectorTerm": { - "description": "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.", - "properties": { - "matchExpressions": { - "description": "A list of node selector requirements by node's labels.", - "items": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelectorRequirement" - } - ], - "default": {} - }, - "type": "array" - }, - "matchFields": { - "description": "A list of node selector requirements by node's fields.", - "items": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelectorRequirement" - } - ], - "default": {} - }, - "type": "array" - } - }, - "type": "object", - "x-kubernetes-map-type": "atomic" - }, - "io.k8s.api.networking.v1alpha1.ClusterCIDR": { - "description": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" - } - ], - "default": {}, - "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" - }, - "spec": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRSpec" - } - ], - "default": {}, - "description": "spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" - } - }, - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - ] - }, - "io.k8s.api.networking.v1alpha1.ClusterCIDRList": { - "description": "ClusterCIDRList contains a list of ClusterCIDR.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "items": { - "description": "items is the list of ClusterCIDRs.", - "items": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - ], - "default": {} - }, - "type": "array" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" - } - ], - "default": {}, - "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" - } - }, - "required": [ - "items" - ], - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "networking.k8s.io", - "kind": "ClusterCIDRList", - "version": "v1alpha1" - } - ] - }, - "io.k8s.api.networking.v1alpha1.ClusterCIDRSpec": { - "description": "ClusterCIDRSpec defines the desired state of ClusterCIDR.", - "properties": { - "ipv4": { - "default": "", - "description": "ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - "type": "string" - }, - "ipv6": { - "default": "", - "description": "ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - "type": "string" - }, - "nodeSelector": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelector" - } - ], - "description": "nodeSelector defines which nodes the config is applicable to. An empty or nil nodeSelector selects all nodes. This field is immutable." - }, - "perNodeHostBits": { - "default": 0, - "description": "perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", - "format": "int32", - "type": "integer" - } - }, - "required": [ - "perNodeHostBits" - ], - "type": "object" - }, "io.k8s.api.networking.v1alpha1.IPAddress": { "description": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", "properties": { @@ -1482,817 +1284,6 @@ ] } }, - "/apis/networking.k8s.io/v1alpha1/clustercidrs": { - "delete": { - "description": "delete collection of ClusterCIDR", - "operationId": "deleteNetworkingV1alpha1CollectionClusterCIDR", - "parameters": [ - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", - "in": "query", - "name": "gracePeriodSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "in": "query", - "name": "orphanDependents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", - "in": "query", - "name": "propagationPolicy", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", - "in": "query", - "name": "sendInitialEvents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "deletecollection", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "get": { - "description": "list or watch objects of kind ClusterCIDR", - "operationId": "listNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", - "in": "query", - "name": "allowWatchBookmarks", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", - "in": "query", - "name": "sendInitialEvents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "in": "query", - "name": "watch", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ], - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" - } - }, - "application/json;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" - } - }, - "application/vnd.kubernetes.protobuf;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "list", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "description": "If 'true', then the output is pretty printed.", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "post": { - "description": "create a ClusterCIDR", - "operationId": "createNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "in": "query", - "name": "fieldManager", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "required": true - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "OK" - }, - "201": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "Created" - }, - "202": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "Accepted" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "post", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - } - }, - "/apis/networking.k8s.io/v1alpha1/clustercidrs/{name}": { - "delete": { - "description": "delete a ClusterCIDR", - "operationId": "deleteNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", - "in": "query", - "name": "gracePeriodSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "in": "query", - "name": "orphanDependents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", - "in": "query", - "name": "propagationPolicy", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - } - }, - "description": "OK" - }, - "202": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - } - }, - "description": "Accepted" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "delete", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "get": { - "description": "read the specified ClusterCIDR", - "operationId": "readNetworkingV1alpha1ClusterCIDR", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "get", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "description": "name of the ClusterCIDR", - "in": "path", - "name": "name", - "required": true, - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "If 'true', then the output is pretty printed.", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "patch": { - "description": "partially update the specified ClusterCIDR", - "operationId": "patchNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", - "in": "query", - "name": "fieldManager", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", - "in": "query", - "name": "force", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "application/apply-patch+yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - }, - "application/json-patch+json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - }, - "application/merge-patch+json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - }, - "application/strategic-merge-patch+json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - } - }, - "required": true - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "OK" - }, - "201": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "Created" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "patch", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "put": { - "description": "replace the specified ClusterCIDR", - "operationId": "replaceNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "in": "query", - "name": "fieldManager", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "required": true - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "OK" - }, - "201": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "Created" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "put", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - } - }, "/apis/networking.k8s.io/v1alpha1/ipaddresses": { "delete": { "description": "delete collection of IPAddress", @@ -3104,318 +2095,6 @@ } } }, - "/apis/networking.k8s.io/v1alpha1/watch/clustercidrs": { - "get": { - "description": "watch individual changes to a list of ClusterCIDR. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchNetworkingV1alpha1ClusterCIDRList", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/json;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "watchlist", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", - "in": "query", - "name": "allowWatchBookmarks", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "If 'true', then the output is pretty printed.", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", - "in": "query", - "name": "sendInitialEvents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "in": "query", - "name": "watch", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ] - }, - "/apis/networking.k8s.io/v1alpha1/watch/clustercidrs/{name}": { - "get": { - "description": "watch changes to an object of kind ClusterCIDR. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", - "operationId": "watchNetworkingV1alpha1ClusterCIDR", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/json;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "watch", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", - "in": "query", - "name": "allowWatchBookmarks", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "name of the ClusterCIDR", - "in": "path", - "name": "name", - "required": true, - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "If 'true', then the output is pretty printed.", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \"Bookmark\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\"k8s.io/initial-events-end\": \"true\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\n\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\n is interpreted as \"data at least as new as the provided `resourceVersion`\"\n and the bookmark event is send when the state is synced\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\n If `resourceVersion` is unset, this is interpreted as \"consistent read\" and the\n bookmark event is send when the state is synced at least to the moment\n when request started being processed.\n- `resourceVersionMatch` set to any other value or unset\n Invalid error is returned.\n\nDefaults to true if `resourceVersion=\"\"` or `resourceVersion=\"0\"` (for backward compatibility reasons) and to false otherwise.", - "in": "query", - "name": "sendInitialEvents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "in": "query", - "name": "watch", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ] - }, "/apis/networking.k8s.io/v1alpha1/watch/ipaddresses": { "get": { "description": "watch individual changes to a list of IPAddress. deprecated: use the 'watch' parameter with a list operation instead.", diff --git a/cmd/cloud-controller-manager/nodeipamcontroller.go b/cmd/cloud-controller-manager/nodeipamcontroller.go index 19b41e4a20a..36c792ab22f 100644 --- a/cmd/cloud-controller-manager/nodeipamcontroller.go +++ b/cmd/cloud-controller-manager/nodeipamcontroller.go @@ -26,8 +26,6 @@ import ( "net" "strings" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/informers/networking/v1alpha1" cloudprovider "k8s.io/cloud-provider" "k8s.io/cloud-provider/app" cloudcontrollerconfig "k8s.io/cloud-provider/app/config" @@ -38,7 +36,6 @@ import ( nodeipamcontroller "k8s.io/kubernetes/pkg/controller/nodeipam" nodeipamconfig "k8s.io/kubernetes/pkg/controller/nodeipam/config" "k8s.io/kubernetes/pkg/controller/nodeipam/ipam" - "k8s.io/kubernetes/pkg/features" netutils "k8s.io/utils/net" ) @@ -128,14 +125,9 @@ func startNodeIpamController(ctx context.Context, initContext app.ControllerInit return nil, false, err } - var clusterCIDRInformer v1alpha1.ClusterCIDRInformer - if utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRRangeAllocator) { - clusterCIDRInformer = controllerCtx.InformerFactory.Networking().V1alpha1().ClusterCIDRs() - } nodeIpamController, err := nodeipamcontroller.NewNodeIpamController( ctx, controllerCtx.InformerFactory.Core().V1().Nodes(), - clusterCIDRInformer, cloud, controllerCtx.ClientBuilder.ClientOrDie(initContext.ClientName), clusterCIDRs, diff --git a/cmd/kube-controller-manager/app/core.go b/cmd/kube-controller-manager/app/core.go index 1712f8026ab..bbeb7a360cc 100644 --- a/cmd/kube-controller-manager/app/core.go +++ b/cmd/kube-controller-manager/app/core.go @@ -27,9 +27,7 @@ import ( "strings" "time" - "k8s.io/client-go/informers/networking/v1alpha1" "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/features" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -150,15 +148,9 @@ func startNodeIpamController(ctx context.Context, controllerContext ControllerCo return nil, false, err } - var clusterCIDRInformer v1alpha1.ClusterCIDRInformer - if utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRRangeAllocator) { - clusterCIDRInformer = controllerContext.InformerFactory.Networking().V1alpha1().ClusterCIDRs() - } - nodeIpamController, err := nodeipamcontroller.NewNodeIpamController( ctx, controllerContext.InformerFactory.Core().V1().Nodes(), - clusterCIDRInformer, controllerContext.Cloud, controllerContext.ClientBuilder.ClientOrDie("node-controller"), clusterCIDRs, diff --git a/pkg/apis/networking/register.go b/pkg/apis/networking/register.go index 570a6a4db3b..0e1a01af46f 100644 --- a/pkg/apis/networking/register.go +++ b/pkg/apis/networking/register.go @@ -52,8 +52,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &IngressList{}, &IngressClass{}, &IngressClassList{}, - &ClusterCIDR{}, - &ClusterCIDRList{}, &IPAddress{}, &IPAddressList{}, ) diff --git a/pkg/apis/networking/types.go b/pkg/apis/networking/types.go index 33cd297bfd8..83a7a55f38e 100644 --- a/pkg/apis/networking/types.go +++ b/pkg/apis/networking/types.go @@ -598,71 +598,6 @@ type ServiceBackendPort struct { // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterCIDR represents a single configuration for per-Node Pod CIDR -// allocations when the MultiCIDRRangeAllocator is enabled (see the config for -// kube-controller-manager). A cluster may have any number of ClusterCIDR -// resources, all of which will be considered when allocating a CIDR for a -// Node. A ClusterCIDR is eligible to be used for a given Node when the node -// selector matches the node in question and has free CIDRs to allocate. In -// case of multiple matching ClusterCIDR resources, the allocator will attempt -// to break ties using internal heuristics, but any ClusterCIDR whose node -// selector matches the Node may be used. -type ClusterCIDR struct { - metav1.TypeMeta - - metav1.ObjectMeta - - Spec ClusterCIDRSpec -} - -// ClusterCIDRSpec defines the desired state of ClusterCIDR. -type ClusterCIDRSpec struct { - // nodeSelector defines which nodes the config is applicable to. - // An empty or nil nodeSelector selects all nodes. - // This field is immutable. - // +optional - NodeSelector *api.NodeSelector - - // perNodeHostBits defines the number of host bits to be configured per node. - // A subnet mask determines how much of the address is used for network bits - // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the - // address into 24 bits for the network portion and 8 bits for the host portion. - // To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). - // Minimum value is 4 (16 IPs). - // This field is immutable. - // +required - PerNodeHostBits int32 - - // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - IPv4 string - - // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - IPv6 string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterCIDRList contains a list of ClusterCIDRs. -type ClusterCIDRList struct { - metav1.TypeMeta - - // +optional - metav1.ListMeta - - // items is the list of ClusterCIDRs. - Items []ClusterCIDR -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - // IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs // that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. // An IP address can be represented in different formats, to guarantee the uniqueness of the IP, diff --git a/pkg/apis/networking/v1alpha1/zz_generated.conversion.go b/pkg/apis/networking/v1alpha1/zz_generated.conversion.go index 3339c87b502..28a388c7d58 100644 --- a/pkg/apis/networking/v1alpha1/zz_generated.conversion.go +++ b/pkg/apis/networking/v1alpha1/zz_generated.conversion.go @@ -24,11 +24,9 @@ package v1alpha1 import ( unsafe "unsafe" - v1 "k8s.io/api/core/v1" v1alpha1 "k8s.io/api/networking/v1alpha1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" - core "k8s.io/kubernetes/pkg/apis/core" networking "k8s.io/kubernetes/pkg/apis/networking" ) @@ -39,36 +37,6 @@ func init() { // RegisterConversions adds conversion functions to the given scheme. // Public to allow building arbitrary schemes. func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*v1alpha1.ClusterCIDR)(nil), (*networking.ClusterCIDR)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(a.(*v1alpha1.ClusterCIDR), b.(*networking.ClusterCIDR), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.ClusterCIDR)(nil), (*v1alpha1.ClusterCIDR)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(a.(*networking.ClusterCIDR), b.(*v1alpha1.ClusterCIDR), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.ClusterCIDRList)(nil), (*networking.ClusterCIDRList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(a.(*v1alpha1.ClusterCIDRList), b.(*networking.ClusterCIDRList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.ClusterCIDRList)(nil), (*v1alpha1.ClusterCIDRList)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(a.(*networking.ClusterCIDRList), b.(*v1alpha1.ClusterCIDRList), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1alpha1.ClusterCIDRSpec)(nil), (*networking.ClusterCIDRSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(a.(*v1alpha1.ClusterCIDRSpec), b.(*networking.ClusterCIDRSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*networking.ClusterCIDRSpec)(nil), (*v1alpha1.ClusterCIDRSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(a.(*networking.ClusterCIDRSpec), b.(*v1alpha1.ClusterCIDRSpec), scope) - }); err != nil { - return err - } if err := s.AddGeneratedConversionFunc((*v1alpha1.IPAddress)(nil), (*networking.IPAddress)(nil), func(a, b interface{}, scope conversion.Scope) error { return Convert_v1alpha1_IPAddress_To_networking_IPAddress(a.(*v1alpha1.IPAddress), b.(*networking.IPAddress), scope) }); err != nil { @@ -112,80 +80,6 @@ func RegisterConversions(s *runtime.Scheme) error { return nil } -func autoConvert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(in *v1alpha1.ClusterCIDR, out *networking.ClusterCIDR, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR is an autogenerated conversion function. -func Convert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(in *v1alpha1.ClusterCIDR, out *networking.ClusterCIDR, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(in, out, s) -} - -func autoConvert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(in *networking.ClusterCIDR, out *v1alpha1.ClusterCIDR, s conversion.Scope) error { - out.ObjectMeta = in.ObjectMeta - if err := Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(&in.Spec, &out.Spec, s); err != nil { - return err - } - return nil -} - -// Convert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR is an autogenerated conversion function. -func Convert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(in *networking.ClusterCIDR, out *v1alpha1.ClusterCIDR, s conversion.Scope) error { - return autoConvert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(in, out, s) -} - -func autoConvert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(in *v1alpha1.ClusterCIDRList, out *networking.ClusterCIDRList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]networking.ClusterCIDR)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList is an autogenerated conversion function. -func Convert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(in *v1alpha1.ClusterCIDRList, out *networking.ClusterCIDRList, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(in, out, s) -} - -func autoConvert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(in *networking.ClusterCIDRList, out *v1alpha1.ClusterCIDRList, s conversion.Scope) error { - out.ListMeta = in.ListMeta - out.Items = *(*[]v1alpha1.ClusterCIDR)(unsafe.Pointer(&in.Items)) - return nil -} - -// Convert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList is an autogenerated conversion function. -func Convert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(in *networking.ClusterCIDRList, out *v1alpha1.ClusterCIDRList, s conversion.Scope) error { - return autoConvert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(in, out, s) -} - -func autoConvert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(in *v1alpha1.ClusterCIDRSpec, out *networking.ClusterCIDRSpec, s conversion.Scope) error { - out.NodeSelector = (*core.NodeSelector)(unsafe.Pointer(in.NodeSelector)) - out.PerNodeHostBits = in.PerNodeHostBits - out.IPv4 = in.IPv4 - out.IPv6 = in.IPv6 - return nil -} - -// Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec is an autogenerated conversion function. -func Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(in *v1alpha1.ClusterCIDRSpec, out *networking.ClusterCIDRSpec, s conversion.Scope) error { - return autoConvert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(in, out, s) -} - -func autoConvert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(in *networking.ClusterCIDRSpec, out *v1alpha1.ClusterCIDRSpec, s conversion.Scope) error { - out.NodeSelector = (*v1.NodeSelector)(unsafe.Pointer(in.NodeSelector)) - out.PerNodeHostBits = in.PerNodeHostBits - out.IPv4 = in.IPv4 - out.IPv6 = in.IPv6 - return nil -} - -// Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec is an autogenerated conversion function. -func Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(in *networking.ClusterCIDRSpec, out *v1alpha1.ClusterCIDRSpec, s conversion.Scope) error { - return autoConvert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(in, out, s) -} - func autoConvert_v1alpha1_IPAddress_To_networking_IPAddress(in *v1alpha1.IPAddress, out *networking.IPAddress, s conversion.Scope) error { out.ObjectMeta = in.ObjectMeta if err := Convert_v1alpha1_IPAddressSpec_To_networking_IPAddressSpec(&in.Spec, &out.Spec, s); err != nil { diff --git a/pkg/apis/networking/validation/validation.go b/pkg/apis/networking/validation/validation.go index 84928914718..4f577f51c0f 100644 --- a/pkg/apis/networking/validation/validation.go +++ b/pkg/apis/networking/validation/validation.go @@ -21,7 +21,6 @@ import ( "net/netip" "strings" - v1 "k8s.io/api/core/v1" apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" pathvalidation "k8s.io/apimachinery/pkg/api/validation/path" unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" @@ -649,92 +648,6 @@ func allowInvalidWildcardHostRule(oldIngress *networking.Ingress) bool { return false } -// ValidateClusterCIDRName validates that the given name can be used as an -// ClusterCIDR name. -var ValidateClusterCIDRName = apimachineryvalidation.NameIsDNSLabel - -// ValidateClusterCIDR validates a ClusterCIDR. -func ValidateClusterCIDR(cc *networking.ClusterCIDR) field.ErrorList { - allErrs := apivalidation.ValidateObjectMeta(&cc.ObjectMeta, false, ValidateClusterCIDRName, field.NewPath("metadata")) - allErrs = append(allErrs, ValidateClusterCIDRSpec(&cc.Spec, field.NewPath("spec"))...) - return allErrs -} - -// ValidateClusterCIDRSpec validates ClusterCIDR Spec. -func ValidateClusterCIDRSpec(spec *networking.ClusterCIDRSpec, fldPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - if spec.NodeSelector != nil { - allErrs = append(allErrs, apivalidation.ValidateNodeSelector(spec.NodeSelector, fldPath.Child("nodeSelector"))...) - } - - // Validate if CIDR is specified for at least one IP Family(IPv4/IPv6). - if spec.IPv4 == "" && spec.IPv6 == "" { - allErrs = append(allErrs, field.Required(fldPath, "one or both of `ipv4` and `ipv6` must be specified")) - return allErrs - } - - // Validate specified IPv4 CIDR and PerNodeHostBits. - if spec.IPv4 != "" { - allErrs = append(allErrs, validateCIDRConfig(spec.IPv4, spec.PerNodeHostBits, 32, v1.IPv4Protocol, fldPath)...) - } - - // Validate specified IPv6 CIDR and PerNodeHostBits. - if spec.IPv6 != "" { - allErrs = append(allErrs, validateCIDRConfig(spec.IPv6, spec.PerNodeHostBits, 128, v1.IPv6Protocol, fldPath)...) - } - - return allErrs -} - -func validateCIDRConfig(configCIDR string, perNodeHostBits, maxMaskSize int32, ipFamily v1.IPFamily, fldPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - minPerNodeHostBits := int32(4) - - ip, ipNet, err := netutils.ParseCIDRSloppy(configCIDR) - if err != nil { - allErrs = append(allErrs, field.Invalid(fldPath.Child(string(ipFamily)), configCIDR, fmt.Sprintf("must be a valid CIDR: %s", configCIDR))) - return allErrs - } - - if ipFamily == v1.IPv4Protocol && !netutils.IsIPv4(ip) { - allErrs = append(allErrs, field.Invalid(fldPath.Child(string(ipFamily)), configCIDR, "must be a valid IPv4 CIDR")) - } - if ipFamily == v1.IPv6Protocol && !netutils.IsIPv6(ip) { - allErrs = append(allErrs, field.Invalid(fldPath.Child(string(ipFamily)), configCIDR, "must be a valid IPv6 CIDR")) - } - - // Validate PerNodeHostBits - maskSize, _ := ipNet.Mask.Size() - maxPerNodeHostBits := maxMaskSize - int32(maskSize) - - if perNodeHostBits < minPerNodeHostBits { - allErrs = append(allErrs, field.Invalid(fldPath.Child("perNodeHostBits"), perNodeHostBits, fmt.Sprintf("must be greater than or equal to %d", minPerNodeHostBits))) - } - if perNodeHostBits > maxPerNodeHostBits { - allErrs = append(allErrs, field.Invalid(fldPath.Child("perNodeHostBits"), perNodeHostBits, fmt.Sprintf("must be less than or equal to %d", maxPerNodeHostBits))) - } - return allErrs -} - -// ValidateClusterCIDRUpdate tests if an update to a ClusterCIDR is valid. -func ValidateClusterCIDRUpdate(update, old *networking.ClusterCIDR) field.ErrorList { - var allErrs field.ErrorList - allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...) - allErrs = append(allErrs, validateClusterCIDRUpdateSpec(&update.Spec, &old.Spec, field.NewPath("spec"))...) - return allErrs -} - -func validateClusterCIDRUpdateSpec(update, old *networking.ClusterCIDRSpec, fldPath *field.Path) field.ErrorList { - var allErrs field.ErrorList - - allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.NodeSelector, old.NodeSelector, fldPath.Child("nodeSelector"))...) - allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.PerNodeHostBits, old.PerNodeHostBits, fldPath.Child("perNodeHostBits"))...) - allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.IPv4, old.IPv4, fldPath.Child("ipv4"))...) - allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.IPv6, old.IPv6, fldPath.Child("ipv6"))...) - - return allErrs -} - // ValidateIPAddressName validates that the name is the decimal representation of an IP address. // IPAddress does not support generating names, prefix is not considered. func ValidateIPAddressName(name string, prefix bool) []string { diff --git a/pkg/apis/networking/validation/validation_test.go b/pkg/apis/networking/validation/validation_test.go index 94b5ed73879..b73935e44c9 100644 --- a/pkg/apis/networking/validation/validation_test.go +++ b/pkg/apis/networking/validation/validation_test.go @@ -1837,191 +1837,6 @@ func TestValidateIngressStatusUpdate(t *testing.T) { } } -func makeNodeSelector(key string, op api.NodeSelectorOperator, values []string) *api.NodeSelector { - return &api.NodeSelector{ - NodeSelectorTerms: []api.NodeSelectorTerm{{ - MatchExpressions: []api.NodeSelectorRequirement{{ - Key: key, - Operator: op, - Values: values, - }}, - }}, - } -} - -func makeClusterCIDR(perNodeHostBits int32, ipv4, ipv6 string, nodeSelector *api.NodeSelector) *networking.ClusterCIDR { - return &networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - ResourceVersion: "9", - }, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv4: ipv4, - IPv6: ipv6, - NodeSelector: nodeSelector, - }, - } -} - -func TestValidateClusterCIDR(t *testing.T) { - testCases := []struct { - name string - cc *networking.ClusterCIDR - expectErr bool - }{{ - name: "valid SingleStack IPv4 ClusterCIDR", - cc: makeClusterCIDR(8, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: false, - }, { - name: "valid SingleStack IPv4 ClusterCIDR, perNodeHostBits = maxPerNodeHostBits", - cc: makeClusterCIDR(16, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: false, - }, { - name: "valid SingleStack IPv4 ClusterCIDR, perNodeHostBits > minPerNodeHostBits", - cc: makeClusterCIDR(4, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: false, - }, { - name: "valid SingleStack IPv6 ClusterCIDR", - cc: makeClusterCIDR(8, "", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: false, - }, { - name: "valid SingleStack IPv6 ClusterCIDR, perNodeHostBits = maxPerNodeHostBit", - cc: makeClusterCIDR(64, "", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: false, - }, { - name: "valid SingleStack IPv6 ClusterCIDR, perNodeHostBits > minPerNodeHostBit", - cc: makeClusterCIDR(4, "", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: false, - }, { - name: "valid SingleStack IPv6 ClusterCIDR perNodeHostBits=100", - cc: makeClusterCIDR(100, "", "fd00:1:1::/16", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: false, - }, { - name: "valid DualStack ClusterCIDR", - cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: false, - }, { - name: "valid DualStack ClusterCIDR, no NodeSelector", - cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", nil), - expectErr: false, - }, - // Failure cases. - { - name: "invalid ClusterCIDR, no IPv4 or IPv6 CIDR", - cc: makeClusterCIDR(8, "", "", nil), - expectErr: true, - }, { - name: "invalid ClusterCIDR, invalid nodeSelector", - cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("NoUppercaseOrSpecialCharsLike=Equals", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, - // IPv4 tests. - { - name: "invalid SingleStack IPv4 ClusterCIDR, invalid spec.IPv4", - cc: makeClusterCIDR(8, "test", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "invalid Singlestack IPv4 ClusterCIDR, perNodeHostBits > maxPerNodeHostBits", - cc: makeClusterCIDR(100, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "invalid SingleStack IPv4 ClusterCIDR, perNodeHostBits < minPerNodeHostBits", - cc: makeClusterCIDR(2, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, - // IPv6 tests. - { - name: "invalid SingleStack IPv6 ClusterCIDR, invalid spec.IPv6", - cc: makeClusterCIDR(8, "", "testv6", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "invalid SingleStack IPv6 ClusterCIDR, valid IPv4 CIDR in spec.IPv6", - cc: makeClusterCIDR(8, "", "10.2.0.0/16", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "invalid SingleStack IPv6 ClusterCIDR, invalid perNodeHostBits > maxPerNodeHostBits", - cc: makeClusterCIDR(12, "", "fd00::/120", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "invalid SingleStack IPv6 ClusterCIDR, invalid perNodeHostBits < minPerNodeHostBits", - cc: makeClusterCIDR(3, "", "fd00::/120", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, - // DualStack tests - { - name: "invalid DualStack ClusterCIDR, valid spec.IPv4, invalid spec.IPv6", - cc: makeClusterCIDR(8, "10.1.0.0/16", "testv6", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "invalid DualStack ClusterCIDR, valid spec.IPv6, invalid spec.IPv4", - cc: makeClusterCIDR(8, "testv4", "fd00::/120", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "invalid DualStack ClusterCIDR, invalid perNodeHostBits > maxPerNodeHostBits", - cc: makeClusterCIDR(24, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "invalid DualStack ClusterCIDR, valid IPv6 CIDR in spec.IPv4", - cc: makeClusterCIDR(8, "fd00::/120", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, - } - - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - err := ValidateClusterCIDR(testCase.cc) - if !testCase.expectErr && err != nil { - t.Errorf("ValidateClusterCIDR(%+v) must be successful for test '%s', got %v", testCase.cc, testCase.name, err) - } - if testCase.expectErr && err == nil { - t.Errorf("ValidateClusterCIDR(%+v) must return an error for test: %s, but got nil", testCase.cc, testCase.name) - } - }) - } -} - -func TestValidateClusterConfigUpdate(t *testing.T) { - oldCCC := makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})) - - testCases := []struct { - name string - cc *networking.ClusterCIDR - expectErr bool - }{{ - name: "Successful update, no changes to ClusterCIDR.Spec", - cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: false, - }, { - name: "Failed update, update spec.PerNodeHostBits", - cc: makeClusterCIDR(12, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "Failed update, update spec.IPv4", - cc: makeClusterCIDR(8, "10.2.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "Failed update, update spec.IPv6", - cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:2:/112", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), - expectErr: true, - }, { - name: "Failed update, update spec.NodeSelector", - cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar2"})), - expectErr: true, - }} - for _, testCase := range testCases { - t.Run(testCase.name, func(t *testing.T) { - err := ValidateClusterCIDRUpdate(testCase.cc, oldCCC) - if !testCase.expectErr && err != nil { - t.Errorf("ValidateClusterCIDRUpdate(%+v) must be successful for test '%s', got %v", testCase.cc, testCase.name, err) - } - if testCase.expectErr && err == nil { - t.Errorf("ValidateClusterCIDRUpdate(%+v) must return error for test: %s, but got nil", testCase.cc, testCase.name) - } - }) - } -} - func TestValidateIPAddress(t *testing.T) { testCases := map[string]struct { expectedErrors int diff --git a/pkg/apis/networking/zz_generated.deepcopy.go b/pkg/apis/networking/zz_generated.deepcopy.go index 3a39c6cac40..5752aa40ce8 100644 --- a/pkg/apis/networking/zz_generated.deepcopy.go +++ b/pkg/apis/networking/zz_generated.deepcopy.go @@ -28,87 +28,6 @@ import ( core "k8s.io/kubernetes/pkg/apis/core" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDR) DeepCopyInto(out *ClusterCIDR) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDR. -func (in *ClusterCIDR) DeepCopy() *ClusterCIDR { - if in == nil { - return nil - } - out := new(ClusterCIDR) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterCIDR) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDRList) DeepCopyInto(out *ClusterCIDRList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterCIDR, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRList. -func (in *ClusterCIDRList) DeepCopy() *ClusterCIDRList { - if in == nil { - return nil - } - out := new(ClusterCIDRList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterCIDRList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDRSpec) DeepCopyInto(out *ClusterCIDRSpec) { - *out = *in - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = new(core.NodeSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRSpec. -func (in *ClusterCIDRSpec) DeepCopy() *ClusterCIDRSpec { - if in == nil { - return nil - } - out := new(ClusterCIDRSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { *out = *in diff --git a/pkg/controller/nodeipam/ipam/cidr_allocator.go b/pkg/controller/nodeipam/ipam/cidr_allocator.go index 13fc26ddfff..10f0dce5a44 100644 --- a/pkg/controller/nodeipam/ipam/cidr_allocator.go +++ b/pkg/controller/nodeipam/ipam/cidr_allocator.go @@ -22,18 +22,15 @@ import ( "net" "time" - "k8s.io/api/core/v1" + v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" informers "k8s.io/client-go/informers/core/v1" - networkinginformers "k8s.io/client-go/informers/networking/v1alpha1" clientset "k8s.io/client-go/kubernetes" cloudprovider "k8s.io/cloud-provider" "k8s.io/klog/v2" - "k8s.io/kubernetes/pkg/features" ) // CIDRAllocatorType is the type of the allocator to use. @@ -43,9 +40,6 @@ const ( // RangeAllocatorType is the allocator that uses an internal CIDR // range allocator to do node CIDR range allocations. RangeAllocatorType CIDRAllocatorType = "RangeAllocator" - // MultiCIDRRangeAllocatorType is the allocator that uses an internal CIDR - // range allocator to do node CIDR range allocations. - MultiCIDRRangeAllocatorType CIDRAllocatorType = "MultiCIDRRangeAllocator" // CloudAllocatorType is the allocator that uses cloud platform // support to do node CIDR range allocations. CloudAllocatorType CIDRAllocatorType = "CloudAllocator" @@ -119,7 +113,7 @@ type nodeReservedCIDRs struct { } // New creates a new CIDR range allocator. -func New(ctx context.Context, kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, clusterCIDRInformer networkinginformers.ClusterCIDRInformer, allocatorType CIDRAllocatorType, allocatorParams CIDRAllocatorParams) (CIDRAllocator, error) { +func New(ctx context.Context, kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, allocatorType CIDRAllocatorType, allocatorParams CIDRAllocatorParams) (CIDRAllocator, error) { logger := klog.FromContext(ctx) nodeList, err := listNodes(logger, kubeClient) if err != nil { @@ -129,12 +123,6 @@ func New(ctx context.Context, kubeClient clientset.Interface, cloud cloudprovide switch allocatorType { case RangeAllocatorType: return NewCIDRRangeAllocator(logger, kubeClient, nodeInformer, allocatorParams, nodeList) - case MultiCIDRRangeAllocatorType: - if !utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRRangeAllocator) { - return nil, fmt.Errorf("invalid CIDR allocator type: %v, feature gate %v must be enabled", allocatorType, features.MultiCIDRRangeAllocator) - } - return NewMultiCIDRRangeAllocator(ctx, kubeClient, nodeInformer, clusterCIDRInformer, allocatorParams, nodeList, nil) - case CloudAllocatorType: return NewCloudCIDRAllocator(logger, kubeClient, cloud, nodeInformer) default: diff --git a/pkg/controller/nodeipam/ipam/multi_cidr_priority_queue.go b/pkg/controller/nodeipam/ipam/multi_cidr_priority_queue.go deleted file mode 100644 index 1c3eedc7d17..00000000000 --- a/pkg/controller/nodeipam/ipam/multi_cidr_priority_queue.go +++ /dev/null @@ -1,140 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ipam - -import ( - "math" - - cidrset "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/multicidrset" -) - -// A PriorityQueue implementation based on https://pkg.go.dev/container/heap#example-package-PriorityQueue - -// An PriorityQueueItem is something we manage in a priority queue. -type PriorityQueueItem struct { - clusterCIDR *cidrset.ClusterCIDR - // labelMatchCount is the first determinant of priority. - labelMatchCount int - // selectorString is a string representation of the labelSelector associated with the cidrSet. - selectorString string - // index is needed by update and is maintained by the heap.Interface methods. - index int // The index of the item in the heap. -} - -// A PriorityQueue implements heap.Interface and holds PriorityQueueItems. -type PriorityQueue []*PriorityQueueItem - -func (pq PriorityQueue) Len() int { return len(pq) } - -// Less compares the priority queue items, to store in a min heap. -// Less(i,j) == true denotes i has higher priority than j. -func (pq PriorityQueue) Less(i, j int) bool { - if pq[i].labelMatchCount != pq[j].labelMatchCount { - // P0: CidrSet with higher number of matching labels has the highest priority. - return pq[i].labelMatchCount > pq[j].labelMatchCount - } - - // If the count of matching labels is equal, compare the max allocatable pod CIDRs. - if pq[i].maxAllocatable() != pq[j].maxAllocatable() { - // P1: CidrSet with fewer allocatable pod CIDRs has higher priority. - return pq[i].maxAllocatable() < pq[j].maxAllocatable() - } - - // If the value of allocatable pod CIDRs is equal, compare the node mask size. - if pq[i].nodeMaskSize() != pq[j].nodeMaskSize() { - // P2: CidrSet with a PerNodeMaskSize having fewer IPs has higher priority. - // For example, `27` (32 IPs) picked before `25` (128 IPs). - return pq[i].nodeMaskSize() > pq[j].nodeMaskSize() - } - - // If the per node mask size are equal compare the CIDR labels. - if pq[i].selectorString != pq[j].selectorString { - // P3: CidrSet having label with lower alphanumeric value has higher priority. - return pq[i].selectorString < pq[j].selectorString - } - - // P4: CidrSet having an alpha-numerically smaller IP address value has a higher priority. - return pq[i].cidrLabel() < pq[j].cidrLabel() -} - -func (pq PriorityQueue) Swap(i, j int) { - pq[i], pq[j] = pq[j], pq[i] - pq[i].index = i - pq[j].index = j -} - -func (pq *PriorityQueue) Push(x interface{}) { - n := len(*pq) - if item, ok := x.(*PriorityQueueItem); ok { - item.index = n - *pq = append(*pq, item) - } -} - -func (pq *PriorityQueue) Pop() interface{} { - old := *pq - n := len(old) - item := old[n-1] - old[n-1] = nil // avoid memory leak. - item.index = -1 // for safety. - *pq = old[0 : n-1] - return item -} - -// maxAllocatable computes the minimum value of the MaxCIDRs for a ClusterCIDR. -// It compares the MaxCIDRs for each CIDR family and returns the minimum. -// e.g. IPv4 - 10.0.0.0/16 PerNodeMaskSize: 24 MaxCIDRs = 256 -// IPv6 - ff:ff::/120 PerNodeMaskSize: 120 MaxCIDRs = 1 -// MaxAllocatable for this ClusterCIDR = 1 -func (pqi *PriorityQueueItem) maxAllocatable() int { - ipv4Allocatable := math.MaxInt - ipv6Allocatable := math.MaxInt - - if pqi.clusterCIDR.IPv4CIDRSet != nil { - ipv4Allocatable = pqi.clusterCIDR.IPv4CIDRSet.MaxCIDRs - } - - if pqi.clusterCIDR.IPv6CIDRSet != nil { - ipv6Allocatable = pqi.clusterCIDR.IPv6CIDRSet.MaxCIDRs - } - - if ipv4Allocatable < ipv6Allocatable { - return ipv4Allocatable - } - - return ipv6Allocatable -} - -// nodeMaskSize returns IPv4 NodeMaskSize if present, else returns IPv6 NodeMaskSize. -// Note the requirement: 32 - IPv4 NodeMaskSize == 128 - IPv6 NodeMaskSize -// Due to the above requirement it does not matter which NodeMaskSize we compare. -func (pqi *PriorityQueueItem) nodeMaskSize() int { - if pqi.clusterCIDR.IPv4CIDRSet != nil { - return pqi.clusterCIDR.IPv4CIDRSet.NodeMaskSize - } - - return pqi.clusterCIDR.IPv6CIDRSet.NodeMaskSize -} - -// cidrLabel returns IPv4 CIDR if present, else returns IPv6 CIDR. -func (pqi *PriorityQueueItem) cidrLabel() string { - if pqi.clusterCIDR.IPv4CIDRSet != nil { - return pqi.clusterCIDR.IPv4CIDRSet.Label - } - - return pqi.clusterCIDR.IPv6CIDRSet.Label -} diff --git a/pkg/controller/nodeipam/ipam/multi_cidr_priority_queue_test.go b/pkg/controller/nodeipam/ipam/multi_cidr_priority_queue_test.go deleted file mode 100644 index 357592f6ba1..00000000000 --- a/pkg/controller/nodeipam/ipam/multi_cidr_priority_queue_test.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ipam - -import ( - "container/heap" - "testing" - - "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/multicidrset" - utilnet "k8s.io/utils/net" -) - -func createTestPriorityQueueItem(name, cidr, selectorString string, labelMatchCount, perNodeHostBits int) *PriorityQueueItem { - _, clusterCIDR, _ := utilnet.ParseCIDRSloppy(cidr) - cidrSet, _ := multicidrset.NewMultiCIDRSet(clusterCIDR, perNodeHostBits) - - return &PriorityQueueItem{ - clusterCIDR: &multicidrset.ClusterCIDR{ - Name: name, - IPv4CIDRSet: cidrSet, - }, - labelMatchCount: labelMatchCount, - selectorString: selectorString, - } -} - -func TestPriorityQueue(t *testing.T) { - - pqi1 := createTestPriorityQueueItem("cidr1", "192.168.0.0/16", "foo=bar,name=test1", 1, 8) - pqi2 := createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8) - pqi3 := createTestPriorityQueueItem("cidr3", "172.16.0.0/16", "foo=bar,name=test3", 2, 8) - pqi4 := createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6) - pqi5 := createTestPriorityQueueItem("cidr5", "10.1.2.0/26", "foo=bar,name=test5", 2, 6) - pqi6 := createTestPriorityQueueItem("cidr6", "10.1.3.0/26", "abc=bar,name=test4", 2, 6) - - for _, testQueue := range []struct { - name string - items []*PriorityQueueItem - want *PriorityQueueItem - }{ - {"Test queue with single item", []*PriorityQueueItem{pqi1}, pqi1}, - {"Test queue with items having different labelMatchCount", []*PriorityQueueItem{pqi1, pqi2}, pqi2}, - {"Test queue with items having same labelMatchCount, different max Allocatable Pod CIDRs", []*PriorityQueueItem{pqi1, pqi2, pqi3}, pqi2}, - {"Test queue with items having same labelMatchCount, max Allocatable Pod CIDRs, different PerNodeMaskSize", []*PriorityQueueItem{pqi1, pqi2, pqi4}, pqi4}, - {"Test queue with items having same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, different labels", []*PriorityQueueItem{pqi1, pqi2, pqi4, pqi5}, pqi4}, - {"Test queue with items having same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, labels, different IP addresses", []*PriorityQueueItem{pqi1, pqi2, pqi4, pqi5, pqi6}, pqi4}, - } { - pq := make(PriorityQueue, 0) - for _, pqi := range testQueue.items { - heap.Push(&pq, pqi) - } - - got := heap.Pop(&pq) - - if got != testQueue.want { - t.Errorf("Error, wanted: %+v, got: %+v", testQueue.want, got) - } - } -} - -func TestLess(t *testing.T) { - - for _, testQueue := range []struct { - name string - items []*PriorityQueueItem - want bool - }{ - { - name: "different labelMatchCount, i higher priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr1", "192.168.0.0/16", "foo=bar,name=test1", 2, 8), - createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 1, 8), - }, - want: true, - }, - { - name: "different labelMatchCount, i lower priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr1", "192.168.0.0/16", "foo=bar,name=test1", 1, 8), - createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8), - }, - want: false, - }, - { - name: "same labelMatchCount, different max allocatable cidrs, i higher priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8), - createTestPriorityQueueItem("cidr3", "172.16.0.0/16", "foo=bar,name=test3", 2, 8), - }, - want: true, - }, - { - name: "same labelMatchCount, different max allocatable cidrs, i lower priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr2", "10.1.0.0/16", "foo=bar,name=test2", 2, 8), - createTestPriorityQueueItem("cidr3", "172.16.0.0/24", "foo=bar,name=test3", 2, 8), - }, - want: false, - }, - { - name: "same labelMatchCount, max allocatable cidrs, different PerNodeMaskSize i higher priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr2", "10.1.0.0/26", "foo=bar,name=test2", 2, 6), - createTestPriorityQueueItem("cidr4", "10.1.1.0/24", "abc=bar,name=test4", 2, 8), - }, - want: true, - }, - { - name: "same labelMatchCount, max allocatable cidrs, different PerNodeMaskSize i lower priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8), - createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6), - }, - want: false, - }, - { - name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, different labels i higher priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6), - createTestPriorityQueueItem("cidr5", "10.1.2.0/26", "foo=bar,name=test5", 2, 6), - }, - want: true, - }, - { - name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, different labels i lower priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "xyz=bar,name=test4", 2, 6), - createTestPriorityQueueItem("cidr5", "10.1.2.0/26", "foo=bar,name=test5", 2, 6), - }, - want: false, - }, - { - name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, labels, different IP addresses i higher priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6), - createTestPriorityQueueItem("cidr6", "10.1.3.0/26", "abc=bar,name=test4", 2, 6), - }, - want: true, - }, - { - name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, labels, different IP addresses i lower priority than j", - items: []*PriorityQueueItem{ - createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "xyz=bar,name=test4", 2, 6), - createTestPriorityQueueItem("cidr6", "10.0.3.0/26", "abc=bar,name=test4", 2, 6), - }, - want: false, - }, - } { - var pq PriorityQueue - pq = testQueue.items - got := pq.Less(0, 1) - if got != testQueue.want { - t.Errorf("Error, wanted: %v, got: %v\nTest %q \npq[0]: %+v \npq[1]: %+v ", testQueue.want, got, testQueue.name, pq[0], pq[1]) - } - } -} diff --git a/pkg/controller/nodeipam/ipam/multi_cidr_range_allocator.go b/pkg/controller/nodeipam/ipam/multi_cidr_range_allocator.go deleted file mode 100644 index 0f3b6a3ef1b..00000000000 --- a/pkg/controller/nodeipam/ipam/multi_cidr_range_allocator.go +++ /dev/null @@ -1,1322 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ipam - -import ( - "container/heap" - "context" - "errors" - "fmt" - "math" - "net" - "sync" - "time" - - v1 "k8s.io/api/core/v1" - networkingv1alpha1 "k8s.io/api/networking/v1alpha1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/selection" - "k8s.io/apimachinery/pkg/types" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - informers "k8s.io/client-go/informers/core/v1" - networkinginformers "k8s.io/client-go/informers/networking/v1alpha1" - clientset "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - v1core "k8s.io/client-go/kubernetes/typed/core/v1" - corelisters "k8s.io/client-go/listers/core/v1" - networkinglisters "k8s.io/client-go/listers/networking/v1alpha1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" - nodeutil "k8s.io/component-helpers/node/util" - "k8s.io/klog/v2" - cidrset "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/multicidrset" - controllerutil "k8s.io/kubernetes/pkg/controller/util/node" - "k8s.io/kubernetes/pkg/util/slice" - netutil "k8s.io/utils/net" -) - -const ( - defaultClusterCIDRKey = "kubernetes.io/clusterCIDR" - defaultClusterCIDRValue = "default" - defaultClusterCIDRName = "default-cluster-cidr" - defaultClusterCIDRAPIVersion = "networking.k8s.io/v1alpha1" - clusterCIDRFinalizer = "networking.k8s.io/cluster-cidr-finalizer" - ipv4MaxCIDRMask = 32 - ipv6MaxCIDRMask = 128 - minPerNodeHostBits = 4 -) - -// CIDRs are reserved, then node resource is patched with them. -// multiCIDRNodeReservedCIDRs holds the reservation info for a node. -type multiCIDRNodeReservedCIDRs struct { - nodeReservedCIDRs - clusterCIDR *cidrset.ClusterCIDR -} - -type multiCIDRRangeAllocator struct { - client clientset.Interface - // nodeLister is able to list/get nodes and is populated by the shared informer passed to controller. - nodeLister corelisters.NodeLister - // nodesSynced returns true if the node shared informer has been synced at least once. - nodesSynced cache.InformerSynced - // clusterCIDRLister is able to list/get clustercidrs and is populated by the shared informer passed to controller. - clusterCIDRLister networkinglisters.ClusterCIDRLister - // clusterCIDRSynced returns true if the clustercidr shared informer has been synced at least once. - clusterCIDRSynced cache.InformerSynced - // Channel that is used to pass updating Nodes and their reserved CIDRs to the background. - // This increases a throughput of CIDR assignment by not blocking on long operations. - nodeCIDRUpdateChannel chan multiCIDRNodeReservedCIDRs - broadcaster record.EventBroadcaster - recorder record.EventRecorder - // queues are where incoming work is placed to de-dup and to allow "easy" - // rate limited requeues on errors - cidrQueue workqueue.RateLimitingInterface - nodeQueue workqueue.RateLimitingInterface - - // lock guards cidrMap to avoid races in CIDR allocation. - lock *sync.Mutex - // cidrMap maps ClusterCIDR labels to internal ClusterCIDR objects. - cidrMap map[string][]*cidrset.ClusterCIDR -} - -// NewMultiCIDRRangeAllocator returns a CIDRAllocator to allocate CIDRs for node (one for each ip family). -// Caller must always pass in a list of existing nodes to the new allocator. -// NodeList is only nil in testing. -func NewMultiCIDRRangeAllocator( - ctx context.Context, - client clientset.Interface, - nodeInformer informers.NodeInformer, - clusterCIDRInformer networkinginformers.ClusterCIDRInformer, - allocatorParams CIDRAllocatorParams, - nodeList *v1.NodeList, - testCIDRMap map[string][]*cidrset.ClusterCIDR, -) (CIDRAllocator, error) { - logger := klog.FromContext(ctx) - if client == nil { - logger.Error(nil, "kubeClient is nil when starting multi CIDRRangeAllocator") - klog.FlushAndExit(klog.ExitFlushTimeout, 1) - } - - eventBroadcaster := record.NewBroadcaster() - eventSource := v1.EventSource{ - Component: "multiCIDRRangeAllocator", - } - recorder := eventBroadcaster.NewRecorder(scheme.Scheme, eventSource) - - ra := &multiCIDRRangeAllocator{ - client: client, - nodeLister: nodeInformer.Lister(), - nodesSynced: nodeInformer.Informer().HasSynced, - clusterCIDRLister: clusterCIDRInformer.Lister(), - clusterCIDRSynced: clusterCIDRInformer.Informer().HasSynced, - nodeCIDRUpdateChannel: make(chan multiCIDRNodeReservedCIDRs, cidrUpdateQueueSize), - broadcaster: eventBroadcaster, - recorder: recorder, - cidrQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "multi_cidr_range_allocator_cidr"), - nodeQueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "multi_cidr_range_allocator_node"), - lock: &sync.Mutex{}, - cidrMap: make(map[string][]*cidrset.ClusterCIDR, 0), - } - - // testCIDRMap is only set for testing purposes. - if len(testCIDRMap) > 0 { - ra.cidrMap = testCIDRMap - logger.Info("TestCIDRMap should only be set for testing purposes, if this is seen in production logs, it might be a misconfiguration or a bug") - } - - ccList, err := listClusterCIDRs(ctx, client) - if err != nil { - return nil, err - } - - if ccList == nil { - ccList = &networkingv1alpha1.ClusterCIDRList{} - } - createDefaultClusterCIDR(logger, ccList, allocatorParams) - - // Regenerate the cidrMaps from the existing ClusterCIDRs. - for _, clusterCIDR := range ccList.Items { - logger.Info("Regenerating existing ClusterCIDR", "clusterCIDR", clusterCIDR) - // Create an event for invalid ClusterCIDRs, do not crash on failures. - if err := ra.reconcileBootstrap(ctx, &clusterCIDR); err != nil { - logger.Error(err, "Error while regenerating existing ClusterCIDR") - ra.recorder.Event(&clusterCIDR, "Warning", "InvalidClusterCIDR encountered while regenerating ClusterCIDR during bootstrap.", err.Error()) - } - } - - clusterCIDRInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - key, err := cache.MetaNamespaceKeyFunc(obj) - if err == nil { - ra.cidrQueue.Add(key) - } - }, - UpdateFunc: func(old, new interface{}) { - key, err := cache.MetaNamespaceKeyFunc(new) - if err == nil { - ra.cidrQueue.Add(key) - } - }, - DeleteFunc: func(obj interface{}) { - // IndexerInformer uses a delta nodeQueue, therefore for deletes we have to use this - // key function. - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err == nil { - ra.cidrQueue.Add(key) - } - }, - }) - - if allocatorParams.ServiceCIDR != nil { - ra.filterOutServiceRange(logger, allocatorParams.ServiceCIDR) - } else { - logger.Info("No Service CIDR provided. Skipping filtering out service addresses") - } - - if allocatorParams.SecondaryServiceCIDR != nil { - ra.filterOutServiceRange(logger, allocatorParams.SecondaryServiceCIDR) - } else { - logger.Info("No Secondary Service CIDR provided. Skipping filtering out secondary service addresses") - } - - if nodeList != nil { - for _, node := range nodeList.Items { - if len(node.Spec.PodCIDRs) == 0 { - logger.V(4).Info("Node has no CIDR, ignoring", "node", klog.KObj(&node)) - continue - } - logger.Info("Node has CIDR, occupying it in CIDR map", "node", klog.KObj(&node), "podCIDRs", node.Spec.PodCIDRs) - if err := ra.occupyCIDRs(logger, &node); err != nil { - // This will happen if: - // 1. We find garbage in the podCIDRs field. Retrying is useless. - // 2. CIDR out of range: This means ClusterCIDR is not yet created - // This error will keep crashing controller-manager until the - // appropriate ClusterCIDR has been created - return nil, err - } - } - } - - nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: func(obj interface{}) { - key, err := cache.MetaNamespaceKeyFunc(obj) - if err == nil { - ra.nodeQueue.Add(key) - } - }, - UpdateFunc: func(old, new interface{}) { - key, err := cache.MetaNamespaceKeyFunc(new) - if err == nil { - ra.nodeQueue.Add(key) - } - }, - DeleteFunc: func(obj interface{}) { - // The informer cache no longer has the object, and since Node doesn't have a finalizer, - // we don't see the Update with DeletionTimestamp != 0. - // TODO: instead of executing the operation directly in the handler, build a small cache with key node.Name - // and value PodCIDRs use ReleaseCIDR on the reconcile loop so we can retry on `ReleaseCIDR` failures. - ra.ReleaseCIDR(logger, obj.(*v1.Node)) - // IndexerInformer uses a delta nodeQueue, therefore for deletes we have to use this - // key function. - key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(obj) - if err == nil { - ra.nodeQueue.Add(key) - } - }, - }) - - return ra, nil -} - -func (r *multiCIDRRangeAllocator) Run(ctx context.Context) { - defer utilruntime.HandleCrash() - - // Start event processing pipeline. - logger := klog.FromContext(ctx) - r.broadcaster.StartStructuredLogging(0) - logger.Info("Started sending events to API Server") - r.broadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: r.client.CoreV1().Events("")}) - defer r.broadcaster.Shutdown() - - defer r.cidrQueue.ShutDown() - defer r.nodeQueue.ShutDown() - - logger.Info("Starting Multi CIDR Range allocator") - defer logger.Info("Shutting down Multi CIDR Range allocator") - - if !cache.WaitForNamedCacheSync("multi_cidr_range_allocator", ctx.Done(), r.nodesSynced, r.clusterCIDRSynced) { - return - } - - for i := 0; i < cidrUpdateWorkers; i++ { - go wait.UntilWithContext(ctx, r.runCIDRWorker, time.Second) - go wait.UntilWithContext(ctx, r.runNodeWorker, time.Second) - } - - <-ctx.Done() -} - -// runWorker is a long-running function that will continually call the -// processNextWorkItem function in order to read and process a message on the -// cidrQueue. -func (r *multiCIDRRangeAllocator) runCIDRWorker(ctx context.Context) { - for r.processNextCIDRWorkItem(ctx) { - } -} - -// processNextWorkItem will read a single work item off the cidrQueue and -// attempt to process it, by calling the syncHandler. -func (r *multiCIDRRangeAllocator) processNextCIDRWorkItem(ctx context.Context) bool { - logger := klog.FromContext(ctx) - obj, shutdown := r.cidrQueue.Get() - if shutdown { - return false - } - - // We wrap this block in a func so we can defer c.cidrQueue.Done. - err := func(ctx context.Context, obj interface{}) error { - // We call Done here so the cidrQueue knows we have finished - // processing this item. We also must remember to call Forget if we - // do not want this work item being re-queued. For example, we do - // not call Forget if a transient error occurs, instead the item is - // put back on the cidrQueue and attempted again after a back-off - // period. - defer r.cidrQueue.Done(obj) - var key string - var ok bool - // We expect strings to come off the cidrQueue. These are of the - // form namespace/name. We do this as the delayed nature of the - // cidrQueue means the items in the informer cache may actually be - // more up to date that when the item was initially put onto the - // cidrQueue. - if key, ok = obj.(string); !ok { - // As the item in the cidrQueue is actually invalid, we call - // Forget here else we'd go into a loop of attempting to - // process a work item that is invalid. - r.cidrQueue.Forget(obj) - utilruntime.HandleError(fmt.Errorf("expected string in cidrQueue but got %#v", obj)) - return nil - } - // Run the syncHandler, passing it the namespace/name string of the - // Foo resource to be synced. - if err := r.syncClusterCIDR(ctx, key); err != nil { - // Put the item back on the cidrQueue to handle any transient errors. - r.cidrQueue.AddRateLimited(key) - return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) - } - // Finally, if no error occurs we Forget this item so it does not - // get cidrQueued again until another change happens. - r.cidrQueue.Forget(obj) - logger.Info("Successfully synced", "key", key) - return nil - }(ctx, obj) - - if err != nil { - utilruntime.HandleError(err) - return true - } - - return true -} - -func (r *multiCIDRRangeAllocator) runNodeWorker(ctx context.Context) { - for r.processNextNodeWorkItem(ctx) { - } -} - -// processNextWorkItem will read a single work item off the cidrQueue and -// attempt to process it, by calling the syncHandler. -func (r *multiCIDRRangeAllocator) processNextNodeWorkItem(ctx context.Context) bool { - obj, shutdown := r.nodeQueue.Get() - if shutdown { - return false - } - - // We wrap this block in a func so we can defer c.cidrQueue.Done. - err := func(logger klog.Logger, obj interface{}) error { - // We call Done here so the workNodeQueue knows we have finished - // processing this item. We also must remember to call Forget if we - // do not want this work item being re-queued. For example, we do - // not call Forget if a transient error occurs, instead the item is - // put back on the nodeQueue and attempted again after a back-off - // period. - defer r.nodeQueue.Done(obj) - var key string - var ok bool - // We expect strings to come off the workNodeQueue. These are of the - // form namespace/name. We do this as the delayed nature of the - // workNodeQueue means the items in the informer cache may actually be - // more up to date that when the item was initially put onto the - // workNodeQueue. - if key, ok = obj.(string); !ok { - // As the item in the workNodeQueue is actually invalid, we call - // Forget here else we'd go into a loop of attempting to - // process a work item that is invalid. - r.nodeQueue.Forget(obj) - utilruntime.HandleError(fmt.Errorf("expected string in workNodeQueue but got %#v", obj)) - return nil - } - // Run the syncHandler, passing it the namespace/name string of the - // Foo resource to be synced. - if err := r.syncNode(logger, key); err != nil { - // Put the item back on the cidrQueue to handle any transient errors. - r.nodeQueue.AddRateLimited(key) - return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error()) - } - // Finally, if no error occurs we Forget this item so it does not - // get nodeQueue again until another change happens. - r.nodeQueue.Forget(obj) - logger.Info("Successfully synced", "key", key) - return nil - }(klog.FromContext(ctx), obj) - - if err != nil { - utilruntime.HandleError(err) - return true - } - - return true -} - -func (r *multiCIDRRangeAllocator) syncNode(logger klog.Logger, key string) error { - startTime := time.Now() - defer func() { - logger.V(4).Info("Finished syncing Node request", "node", key, "elapsed", time.Since(startTime)) - }() - - node, err := r.nodeLister.Get(key) - if apierrors.IsNotFound(err) { - logger.V(3).Info("node has been deleted", "node", key) - // TODO: obtain the node object information to call ReleaseCIDR from here - // and retry if there is an error. - return nil - } - if err != nil { - return err - } - // Check the DeletionTimestamp to determine if object is under deletion. - if !node.DeletionTimestamp.IsZero() { - logger.V(3).Info("node is being deleted", "node", key) - return r.ReleaseCIDR(logger, node) - } - return r.AllocateOrOccupyCIDR(logger, node) -} - -// needToAddFinalizer checks if a finalizer should be added to the object. -func needToAddFinalizer(obj metav1.Object, finalizer string) bool { - return obj.GetDeletionTimestamp() == nil && !slice.ContainsString(obj.GetFinalizers(), - finalizer, nil) -} - -func (r *multiCIDRRangeAllocator) syncClusterCIDR(ctx context.Context, key string) error { - startTime := time.Now() - logger := klog.FromContext(ctx) - defer func() { - logger.V(4).Info("Finished syncing clusterCIDR request", "key", key, "latency", time.Since(startTime)) - }() - - clusterCIDR, err := r.clusterCIDRLister.Get(key) - if apierrors.IsNotFound(err) { - logger.V(3).Info("clusterCIDR has been deleted", "key", key) - return nil - } - - if err != nil { - return err - } - - // Check the DeletionTimestamp to determine if object is under deletion. - if !clusterCIDR.DeletionTimestamp.IsZero() { - return r.reconcileDelete(ctx, clusterCIDR) - } - return r.reconcileCreate(ctx, clusterCIDR) -} - -// occupyCIDRs marks node.PodCIDRs[...] as used in allocator's tracked cidrSet. -func (r *multiCIDRRangeAllocator) occupyCIDRs(logger klog.Logger, node *v1.Node) error { - - err := func(node *v1.Node) error { - - if len(node.Spec.PodCIDRs) == 0 { - return nil - } - clusterCIDRList, err := r.orderedMatchingClusterCIDRs(logger, node, true) - if err != nil { - return err - } - - for _, clusterCIDR := range clusterCIDRList { - occupiedCount := 0 - - for _, cidr := range node.Spec.PodCIDRs { - _, podCIDR, err := netutil.ParseCIDRSloppy(cidr) - if err != nil { - return fmt.Errorf("failed to parse CIDR %s on Node %v: %w", cidr, node.Name, err) - } - - logger.Info("occupy CIDR for node", "CIDR", cidr, "node", klog.KObj(node)) - - if err := r.Occupy(clusterCIDR, podCIDR); err != nil { - logger.V(3).Info("Could not occupy cidr, trying next range", "podCIDRs", node.Spec.PodCIDRs, "err", err) - break - } - - occupiedCount++ - } - - // Mark CIDRs as occupied only if the CCC is able to occupy all the node CIDRs. - if occupiedCount == len(node.Spec.PodCIDRs) { - clusterCIDR.AssociatedNodes[node.Name] = true - return nil - } - } - - return fmt.Errorf("could not occupy cidrs: %v, No matching ClusterCIDRs found", node.Spec.PodCIDRs) - }(node) - - return err -} - -// associatedCIDRSet returns the CIDRSet, based on the ip family of the CIDR. -func (r *multiCIDRRangeAllocator) associatedCIDRSet(clusterCIDR *cidrset.ClusterCIDR, cidr *net.IPNet) (*cidrset.MultiCIDRSet, error) { - switch { - case netutil.IsIPv4CIDR(cidr): - return clusterCIDR.IPv4CIDRSet, nil - case netutil.IsIPv6CIDR(cidr): - return clusterCIDR.IPv6CIDRSet, nil - default: - return nil, fmt.Errorf("invalid cidr: %v", cidr) - } -} - -// Occupy marks the CIDR as occupied in the allocatedCIDRMap of the cidrSet. -func (r *multiCIDRRangeAllocator) Occupy(clusterCIDR *cidrset.ClusterCIDR, cidr *net.IPNet) error { - currCIDRSet, err := r.associatedCIDRSet(clusterCIDR, cidr) - if err != nil { - return err - } - - if err := currCIDRSet.Occupy(cidr); err != nil { - return fmt.Errorf("unable to occupy cidr %v in cidrSet", cidr) - } - - return nil -} - -// Release marks the CIDR as free in the cidrSet used bitmap, -// Also removes the CIDR from the allocatedCIDRSet. -func (r *multiCIDRRangeAllocator) Release(logger klog.Logger, clusterCIDR *cidrset.ClusterCIDR, cidr *net.IPNet) error { - currCIDRSet, err := r.associatedCIDRSet(clusterCIDR, cidr) - if err != nil { - return err - } - - if err := currCIDRSet.Release(cidr); err != nil { - logger.Info("Unable to release cidr in cidrSet", "CIDR", cidr) - return err - } - - return nil -} - -// AllocateOrOccupyCIDR allocates a CIDR to the node if the node doesn't have a -// CIDR already allocated, occupies the CIDR and marks as used if the node -// already has a PodCIDR assigned. -// WARNING: If you're adding any return calls or defer any more work from this -// function you have to make sure to update nodesInProcessing properly with the -// disposition of the node when the work is done. -func (r *multiCIDRRangeAllocator) AllocateOrOccupyCIDR(logger klog.Logger, node *v1.Node) error { - r.lock.Lock() - defer r.lock.Unlock() - - if node == nil { - return nil - } - - if len(node.Spec.PodCIDRs) > 0 { - return r.occupyCIDRs(logger, node) - } - - cidrs, clusterCIDR, err := r.prioritizedCIDRs(logger, node) - if err != nil { - controllerutil.RecordNodeStatusChange(logger, r.recorder, node, "CIDRNotAvailable") - return fmt.Errorf("failed to get cidrs for node %s", node.Name) - } - - if len(cidrs) == 0 { - controllerutil.RecordNodeStatusChange(logger, r.recorder, node, "CIDRNotAvailable") - return fmt.Errorf("no cidrSets with matching labels found for node %s", node.Name) - } - - // allocate and queue the assignment. - allocated := multiCIDRNodeReservedCIDRs{ - nodeReservedCIDRs: nodeReservedCIDRs{ - nodeName: node.Name, - allocatedCIDRs: cidrs, - }, - clusterCIDR: clusterCIDR, - } - - return r.updateCIDRsAllocation(logger, allocated) -} - -// ReleaseCIDR marks node.podCIDRs[...] as unused in our tracked cidrSets. -func (r *multiCIDRRangeAllocator) ReleaseCIDR(logger klog.Logger, node *v1.Node) error { - r.lock.Lock() - defer r.lock.Unlock() - - if node == nil || len(node.Spec.PodCIDRs) == 0 { - return nil - } - - clusterCIDR, err := r.allocatedClusterCIDR(logger, node) - if err != nil { - return err - } - - for _, cidr := range node.Spec.PodCIDRs { - _, podCIDR, err := netutil.ParseCIDRSloppy(cidr) - if err != nil { - return fmt.Errorf("failed to parse CIDR %q on Node %q: %w", cidr, node.Name, err) - } - - logger.Info("release CIDR for node", "CIDR", cidr, "node", klog.KObj(node)) - if err := r.Release(logger, clusterCIDR, podCIDR); err != nil { - return fmt.Errorf("failed to release cidr %q from clusterCIDR %q for node %q: %w", cidr, clusterCIDR.Name, node.Name, err) - } - } - - // Remove the node from the ClusterCIDR AssociatedNodes. - delete(clusterCIDR.AssociatedNodes, node.Name) - - return nil -} - -// Marks all CIDRs with subNetMaskSize that belongs to serviceCIDR as used across all cidrs -// so that they won't be assignable. -func (r *multiCIDRRangeAllocator) filterOutServiceRange(logger klog.Logger, serviceCIDR *net.IPNet) { - // Checks if service CIDR has a nonempty intersection with cluster - // CIDR. It is the case if either clusterCIDR contains serviceCIDR with - // clusterCIDR's Mask applied (this means that clusterCIDR contains - // serviceCIDR) or vice versa (which means that serviceCIDR contains - // clusterCIDR). - for _, clusterCIDRList := range r.cidrMap { - for _, clusterCIDR := range clusterCIDRList { - if err := r.occupyServiceCIDR(clusterCIDR, serviceCIDR); err != nil { - logger.Error(err, "Unable to occupy service CIDR") - } - } - } -} - -func (r *multiCIDRRangeAllocator) occupyServiceCIDR(clusterCIDR *cidrset.ClusterCIDR, serviceCIDR *net.IPNet) error { - - cidrSet, err := r.associatedCIDRSet(clusterCIDR, serviceCIDR) - if err != nil { - return err - } - - cidr := cidrSet.ClusterCIDR - - // No need to occupy as Service CIDR doesn't intersect with the current ClusterCIDR. - if !cidr.Contains(serviceCIDR.IP.Mask(cidr.Mask)) && !serviceCIDR.Contains(cidr.IP.Mask(serviceCIDR.Mask)) { - return nil - } - - if err := r.Occupy(clusterCIDR, serviceCIDR); err != nil { - return fmt.Errorf("error filtering out service cidr %v from cluster cidr %v: %w", cidr, serviceCIDR, err) - } - - return nil -} - -// updateCIDRsAllocation assigns CIDR to Node and sends an update to the API server. -func (r *multiCIDRRangeAllocator) updateCIDRsAllocation(logger klog.Logger, data multiCIDRNodeReservedCIDRs) error { - err := func(data multiCIDRNodeReservedCIDRs) error { - cidrsString := ipnetToStringList(data.allocatedCIDRs) - node, err := r.nodeLister.Get(data.nodeName) - if err != nil { - logger.Error(err, "Failed while getting node for updating Node.Spec.PodCIDRs", "node", klog.KRef("", data.nodeName)) - return err - } - - // if cidr list matches the proposed, - // then we possibly updated this node - // and just failed to ack the success. - if len(node.Spec.PodCIDRs) == len(data.allocatedCIDRs) { - match := true - for idx, cidr := range cidrsString { - if node.Spec.PodCIDRs[idx] != cidr { - match = false - break - } - } - if match { - logger.V(4).Info("Node already has allocated CIDR. It matches the proposed one.", "node", klog.KObj(node), "CIDRs", data.allocatedCIDRs) - return nil - } - } - - // node has cidrs allocated, release the reserved. - if len(node.Spec.PodCIDRs) != 0 { - logger.Error(nil, "Node already has a CIDR allocated. Releasing the new one", "node", klog.KObj(node), "podCIDRs", node.Spec.PodCIDRs) - for _, cidr := range data.allocatedCIDRs { - if err := r.Release(logger, data.clusterCIDR, cidr); err != nil { - return fmt.Errorf("failed to release cidr %s from clusterCIDR %s for node: %s: %w", cidr, data.clusterCIDR.Name, node.Name, err) - } - } - return nil - } - - // If we reached here, it means that the node has no CIDR currently assigned. So we set it. - for i := 0; i < cidrUpdateRetries; i++ { - if err = nodeutil.PatchNodeCIDRs(r.client, types.NodeName(node.Name), cidrsString); err == nil { - data.clusterCIDR.AssociatedNodes[node.Name] = true - logger.Info("Set node PodCIDR", "node", klog.KObj(node), "podCIDR", cidrsString) - return nil - } - } - // failed release back to the pool. - logger.Error(err, "Failed to update node PodCIDR after attempts", "node", klog.KObj(node), "podCIDR", cidrsString, "retries", cidrUpdateRetries) - controllerutil.RecordNodeStatusChange(logger, r.recorder, node, "CIDRAssignmentFailed") - // We accept the fact that we may leak CIDRs here. This is safer than releasing - // them in case when we don't know if request went through. - // NodeController restart will return all falsely allocated CIDRs to the pool. - if !apierrors.IsServerTimeout(err) { - logger.Error(err, "CIDR assignment for node failed. Releasing allocated CIDR", "node", klog.KObj(node)) - for _, cidr := range data.allocatedCIDRs { - if err := r.Release(logger, data.clusterCIDR, cidr); err != nil { - return fmt.Errorf("failed to release cidr %q from clusterCIDR %q for node: %q: %w", cidr, data.clusterCIDR.Name, node.Name, err) - } - } - } - return err - }(data) - - return err -} - -// defaultNodeSelector generates a label with defaultClusterCIDRKey as the key and -// defaultClusterCIDRValue as the value, it is an internal nodeSelector matching all -// nodes. Only used if no ClusterCIDR selects the node. -func defaultNodeSelector() *v1.NodeSelector { - return &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: defaultClusterCIDRKey, - Operator: v1.NodeSelectorOpIn, - Values: []string{defaultClusterCIDRValue}, - }, - }, - }, - }, - } -} - -// prioritizedCIDRs returns a list of CIDRs to be allocated to the node. -// Returns 1 CIDR if single stack. -// Returns 2 CIDRs , 1 from each ip family if dual stack. -func (r *multiCIDRRangeAllocator) prioritizedCIDRs(logger klog.Logger, node *v1.Node) ([]*net.IPNet, *cidrset.ClusterCIDR, error) { - clusterCIDRList, err := r.orderedMatchingClusterCIDRs(logger, node, true) - if err != nil { - return nil, nil, fmt.Errorf("unable to get a clusterCIDR for node %s: %w", node.Name, err) - } - - for _, clusterCIDR := range clusterCIDRList { - cidrs := make([]*net.IPNet, 0) - if clusterCIDR.IPv4CIDRSet != nil { - cidr, err := r.allocateCIDR(clusterCIDR, clusterCIDR.IPv4CIDRSet) - if err != nil { - logger.V(3).Info("Unable to allocate IPv4 CIDR, trying next range", "err", err) - continue - } - cidrs = append(cidrs, cidr) - } - - if clusterCIDR.IPv6CIDRSet != nil { - cidr, err := r.allocateCIDR(clusterCIDR, clusterCIDR.IPv6CIDRSet) - if err != nil { - logger.V(3).Info("Unable to allocate IPv6 CIDR, trying next range", "err", err) - continue - } - cidrs = append(cidrs, cidr) - } - - return cidrs, clusterCIDR, nil - } - return nil, nil, fmt.Errorf("unable to get a clusterCIDR for node %s, no available CIDRs", node.Name) -} - -func (r *multiCIDRRangeAllocator) allocateCIDR(clusterCIDR *cidrset.ClusterCIDR, cidrSet *cidrset.MultiCIDRSet) (*net.IPNet, error) { - - for evaluated := 0; evaluated < cidrSet.MaxCIDRs; evaluated++ { - candidate, lastEvaluated, err := cidrSet.NextCandidate() - if err != nil { - return nil, err - } - - evaluated += lastEvaluated - - if r.cidrInAllocatedList(candidate) { - continue - } - - // Deep Check. - if r.cidrOverlapWithAllocatedList(candidate) { - continue - } - - // Mark the CIDR as occupied in the map. - if err := r.Occupy(clusterCIDR, candidate); err != nil { - return nil, err - } - // Increment the evaluated count metric. - cidrSet.UpdateEvaluatedCount(evaluated) - return candidate, nil - } - return nil, &cidrset.CIDRRangeNoCIDRsRemainingErr{ - CIDR: cidrSet.Label, - } -} - -func (r *multiCIDRRangeAllocator) cidrInAllocatedList(cidr *net.IPNet) bool { - for _, clusterCIDRList := range r.cidrMap { - for _, clusterCIDR := range clusterCIDRList { - cidrSet, _ := r.associatedCIDRSet(clusterCIDR, cidr) - if cidrSet != nil { - if ok := cidrSet.AllocatedCIDRMap[cidr.String()]; ok { - return true - } - } - } - } - return false -} - -func (r *multiCIDRRangeAllocator) cidrOverlapWithAllocatedList(cidr *net.IPNet) bool { - for _, clusterCIDRList := range r.cidrMap { - for _, clusterCIDR := range clusterCIDRList { - cidrSet, _ := r.associatedCIDRSet(clusterCIDR, cidr) - if cidrSet != nil { - for allocated := range cidrSet.AllocatedCIDRMap { - _, allocatedCIDR, _ := netutil.ParseCIDRSloppy(allocated) - if cidr.Contains(allocatedCIDR.IP.Mask(cidr.Mask)) || allocatedCIDR.Contains(cidr.IP.Mask(allocatedCIDR.Mask)) { - return true - } - } - } - } - } - return false -} - -// allocatedClusterCIDR returns the ClusterCIDR from which the node CIDRs were allocated. -func (r *multiCIDRRangeAllocator) allocatedClusterCIDR(logger klog.Logger, node *v1.Node) (*cidrset.ClusterCIDR, error) { - clusterCIDRList, err := r.orderedMatchingClusterCIDRs(logger, node, false) - if err != nil { - return nil, fmt.Errorf("unable to get a clusterCIDR for node %s: %w", node.Name, err) - } - - for _, clusterCIDR := range clusterCIDRList { - if ok := clusterCIDR.AssociatedNodes[node.Name]; ok { - return clusterCIDR, nil - } - } - return nil, fmt.Errorf("no clusterCIDR found associated with node: %s", node.Name) -} - -// orderedMatchingClusterCIDRs returns a list of all the ClusterCIDRs matching the node labels. -// The list is ordered with the following priority, which act as tie-breakers. -// P0: ClusterCIDR with higher number of matching labels has the highest priority. -// P1: ClusterCIDR having cidrSet with fewer allocatable Pod CIDRs has higher priority. -// P2: ClusterCIDR with a PerNodeMaskSize having fewer IPs has higher priority. -// P3: ClusterCIDR having label with lower alphanumeric value has higher priority. -// P4: ClusterCIDR with a cidrSet having a smaller IP address value has a higher priority. -// -// orderedMatchingClusterCIDRs takes `occupy` as an argument, it determines whether the function -// is called during an occupy or a release operation. For a release operation, a ClusterCIDR must -// be added to the matching ClusterCIDRs list, irrespective of whether the ClusterCIDR is terminating. -func (r *multiCIDRRangeAllocator) orderedMatchingClusterCIDRs(logger klog.Logger, node *v1.Node, occupy bool) ([]*cidrset.ClusterCIDR, error) { - matchingCIDRs := make([]*cidrset.ClusterCIDR, 0) - pq := make(PriorityQueue, 0) - - for label, clusterCIDRList := range r.cidrMap { - labelsMatch, matchCnt, err := r.matchCIDRLabels(logger, node, label) - if err != nil { - return nil, err - } - - if !labelsMatch { - continue - } - - for _, clusterCIDR := range clusterCIDRList { - pqItem := &PriorityQueueItem{ - clusterCIDR: clusterCIDR, - labelMatchCount: matchCnt, - selectorString: label, - } - - // Only push the CIDRsets which are not marked for termination. - // Always push the CIDRsets when marked for release. - if !occupy || !clusterCIDR.Terminating { - heap.Push(&pq, pqItem) - } - } - } - - // Remove the ClusterCIDRs from the PriorityQueue. - // They arrive in descending order of matchCnt, - // if matchCnt is equal it is ordered in ascending order of labels. - for pq.Len() > 0 { - pqItem := heap.Pop(&pq).(*PriorityQueueItem) - matchingCIDRs = append(matchingCIDRs, pqItem.clusterCIDR) - } - - // Append the catch all CIDR config. - defaultSelector, err := nodeSelectorAsSelector(defaultNodeSelector()) - if err != nil { - return nil, err - } - if clusterCIDRList, ok := r.cidrMap[defaultSelector.String()]; ok { - matchingCIDRs = append(matchingCIDRs, clusterCIDRList...) - } - return matchingCIDRs, nil -} - -// matchCIDRLabels Matches the Node labels to CIDR Configs. -// Returns true only if all the labels match, also returns the count of matching labels. -func (r *multiCIDRRangeAllocator) matchCIDRLabels(logger klog.Logger, node *v1.Node, label string) (bool, int, error) { - var labelSet labels.Set - var matchCnt int - labelsMatch := false - - ls, err := labels.Parse(label) - if err != nil { - logger.Error(err, "Unable to parse label to labels.Selector", "label", label) - return labelsMatch, 0, err - } - reqs, selectable := ls.Requirements() - - labelSet = node.ObjectMeta.Labels - if selectable { - matchCnt = 0 - for _, req := range reqs { - if req.Matches(labelSet) { - matchCnt += 1 - } - } - if matchCnt == len(reqs) { - labelsMatch = true - } - } - return labelsMatch, matchCnt, nil -} - -// Methods for handling ClusterCIDRs. - -// createDefaultClusterCIDR creates a default ClusterCIDR if --cluster-cidr has -// been configured. It converts the --cluster-cidr and --per-node-mask-size* flags -// to appropriate ClusterCIDR fields. -func createDefaultClusterCIDR(logger klog.Logger, existingConfigList *networkingv1alpha1.ClusterCIDRList, - allocatorParams CIDRAllocatorParams) { - // Create default ClusterCIDR only if --cluster-cidr has been configured - if len(allocatorParams.ClusterCIDRs) == 0 { - return - } - - for _, clusterCIDR := range existingConfigList.Items { - if clusterCIDR.Name == defaultClusterCIDRName { - // Default ClusterCIDR already exists, no further action required. - logger.V(3).Info("Default ClusterCIDR already exists", "defaultClusterCIDRName", defaultClusterCIDRName) - return - } - } - - // Create a default ClusterCIDR as it is not already created. - defaultCIDRConfig := &networkingv1alpha1.ClusterCIDR{ - TypeMeta: metav1.TypeMeta{ - APIVersion: defaultClusterCIDRAPIVersion, - Kind: "ClusterCIDR", - }, - ObjectMeta: metav1.ObjectMeta{ - Name: defaultClusterCIDRName, - }, - Spec: networkingv1alpha1.ClusterCIDRSpec{ - PerNodeHostBits: minPerNodeHostBits, - }, - } - - ipv4PerNodeHostBits := int32(math.MinInt32) - ipv6PerNodeHostBits := int32(math.MinInt32) - isDualstack := false - if len(allocatorParams.ClusterCIDRs) == 2 { - isDualstack = true - } - - for i, cidr := range allocatorParams.ClusterCIDRs { - if netutil.IsIPv4CIDR(cidr) { - defaultCIDRConfig.Spec.IPv4 = cidr.String() - ipv4PerNodeHostBits = ipv4MaxCIDRMask - int32(allocatorParams.NodeCIDRMaskSizes[i]) - if !isDualstack && ipv4PerNodeHostBits > minPerNodeHostBits { - defaultCIDRConfig.Spec.PerNodeHostBits = ipv4PerNodeHostBits - } - } else if netutil.IsIPv6CIDR(cidr) { - defaultCIDRConfig.Spec.IPv6 = cidr.String() - ipv6PerNodeHostBits = ipv6MaxCIDRMask - int32(allocatorParams.NodeCIDRMaskSizes[i]) - if !isDualstack && ipv6PerNodeHostBits > minPerNodeHostBits { - defaultCIDRConfig.Spec.PerNodeHostBits = ipv6PerNodeHostBits - } - } - } - - if isDualstack { - // In case of dualstack CIDRs, currently the default values for PerNodeMaskSize are - // 24 for IPv4 (PerNodeHostBits=8) and 64 for IPv6(PerNodeHostBits=64), there is no - // requirement for the PerNodeHostBits to be equal for IPv4 and IPv6, However with - // the introduction of ClusterCIDRs, we enforce the requirement for a single - // PerNodeHostBits field, thus we choose the minimum PerNodeHostBits value, to avoid - // overflow for IPv4 CIDRs. - if ipv4PerNodeHostBits >= minPerNodeHostBits && ipv4PerNodeHostBits <= ipv6PerNodeHostBits { - defaultCIDRConfig.Spec.PerNodeHostBits = ipv4PerNodeHostBits - } else if ipv6PerNodeHostBits >= minPerNodeHostBits && ipv6PerNodeHostBits <= ipv4MaxCIDRMask { - defaultCIDRConfig.Spec.PerNodeHostBits = ipv6PerNodeHostBits - } - } - - existingConfigList.Items = append(existingConfigList.Items, *defaultCIDRConfig) - - return -} - -// reconcileCreate handles create ClusterCIDR events. -func (r *multiCIDRRangeAllocator) reconcileCreate(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDR) error { - r.lock.Lock() - defer r.lock.Unlock() - - logger := klog.FromContext(ctx) - if needToAddFinalizer(clusterCIDR, clusterCIDRFinalizer) { - logger.V(3).Info("Creating ClusterCIDR", "clusterCIDR", clusterCIDR.Name) - if err := r.createClusterCIDR(ctx, clusterCIDR, false); err != nil { - logger.Error(err, "Unable to create ClusterCIDR", "clusterCIDR", clusterCIDR.Name) - return err - } - } - return nil -} - -// reconcileBootstrap handles creation of existing ClusterCIDRs. -// adds a finalizer if not already present. -func (r *multiCIDRRangeAllocator) reconcileBootstrap(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDR) error { - r.lock.Lock() - defer r.lock.Unlock() - - logger := klog.FromContext(ctx) - terminating := false - // Create the ClusterCIDR only if the Spec has not been modified. - if clusterCIDR.Generation > 1 { - terminating = true - err := fmt.Errorf("CIDRs from ClusterCIDR %s will not be used for allocation as it was modified", clusterCIDR.Name) - logger.Error(err, "ClusterCIDR Modified") - } - - logger.V(2).Info("Creating ClusterCIDR during bootstrap", "clusterCIDR", clusterCIDR.Name) - if err := r.createClusterCIDR(ctx, clusterCIDR, terminating); err != nil { - logger.Error(err, "Unable to create ClusterCIDR", "clusterCIDR", clusterCIDR.Name) - return err - } - - return nil -} - -// createClusterCIDR creates and maps the cidrSets in the cidrMap. -func (r *multiCIDRRangeAllocator) createClusterCIDR(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDR, terminating bool) error { - nodeSelector, err := r.nodeSelectorKey(clusterCIDR) - if err != nil { - return fmt.Errorf("unable to get labelSelector key: %w", err) - } - - clusterCIDRSet, err := r.createClusterCIDRSet(clusterCIDR, terminating) - if err != nil { - return fmt.Errorf("invalid ClusterCIDR: %w", err) - } - - if clusterCIDRSet.IPv4CIDRSet == nil && clusterCIDRSet.IPv6CIDRSet == nil { - return errors.New("invalid ClusterCIDR: must provide IPv4 and/or IPv6 config") - } - - if err := r.mapClusterCIDRSet(r.cidrMap, nodeSelector, clusterCIDRSet); err != nil { - return fmt.Errorf("unable to map clusterCIDRSet: %w", err) - } - - // Make a copy so we don't mutate the shared informer cache. - updatedClusterCIDR := clusterCIDR.DeepCopy() - if needToAddFinalizer(clusterCIDR, clusterCIDRFinalizer) { - updatedClusterCIDR.ObjectMeta.Finalizers = append(clusterCIDR.ObjectMeta.Finalizers, clusterCIDRFinalizer) - } - - logger := klog.FromContext(ctx) - if updatedClusterCIDR.ResourceVersion == "" { - // Create is only used for creating default ClusterCIDR. - if _, err := r.client.NetworkingV1alpha1().ClusterCIDRs().Create(ctx, updatedClusterCIDR, metav1.CreateOptions{}); err != nil { - logger.V(2).Info("Error creating ClusterCIDR", "clusterCIDR", klog.KObj(clusterCIDR), "err", err) - return err - } - } else { - // Update the ClusterCIDR object when called from reconcileCreate. - if _, err := r.client.NetworkingV1alpha1().ClusterCIDRs().Update(ctx, updatedClusterCIDR, metav1.UpdateOptions{}); err != nil { - logger.V(2).Info("Error creating ClusterCIDR", "clusterCIDR", clusterCIDR.Name, "err", err) - return err - } - } - - return nil -} - -// createClusterCIDRSet creates and returns new cidrset.ClusterCIDR based on ClusterCIDR API object. -func (r *multiCIDRRangeAllocator) createClusterCIDRSet(clusterCIDR *networkingv1alpha1.ClusterCIDR, terminating bool) (*cidrset.ClusterCIDR, error) { - - clusterCIDRSet := &cidrset.ClusterCIDR{ - Name: clusterCIDR.Name, - AssociatedNodes: make(map[string]bool, 0), - Terminating: terminating, - } - - if clusterCIDR.Spec.IPv4 != "" { - _, ipv4CIDR, err := netutil.ParseCIDRSloppy(clusterCIDR.Spec.IPv4) - if err != nil { - return nil, fmt.Errorf("unable to parse provided IPv4 CIDR: %w", err) - } - clusterCIDRSet.IPv4CIDRSet, err = cidrset.NewMultiCIDRSet(ipv4CIDR, int(clusterCIDR.Spec.PerNodeHostBits)) - if err != nil { - return nil, fmt.Errorf("unable to create IPv4 cidrSet: %w", err) - } - } - - if clusterCIDR.Spec.IPv6 != "" { - _, ipv6CIDR, err := netutil.ParseCIDRSloppy(clusterCIDR.Spec.IPv6) - if err != nil { - return nil, fmt.Errorf("unable to parse provided IPv6 CIDR: %w", err) - } - clusterCIDRSet.IPv6CIDRSet, err = cidrset.NewMultiCIDRSet(ipv6CIDR, int(clusterCIDR.Spec.PerNodeHostBits)) - if err != nil { - return nil, fmt.Errorf("unable to create IPv6 cidrSet: %w", err) - } - } - - return clusterCIDRSet, nil -} - -// mapClusterCIDRSet maps the ClusterCIDRSet to the provided labelSelector in the cidrMap. -func (r *multiCIDRRangeAllocator) mapClusterCIDRSet(cidrMap map[string][]*cidrset.ClusterCIDR, nodeSelector string, clusterCIDRSet *cidrset.ClusterCIDR) error { - if clusterCIDRSet == nil { - return errors.New("invalid clusterCIDRSet, clusterCIDRSet cannot be nil") - } - - if clusterCIDRSetList, ok := cidrMap[nodeSelector]; ok { - cidrMap[nodeSelector] = append(clusterCIDRSetList, clusterCIDRSet) - } else { - cidrMap[nodeSelector] = []*cidrset.ClusterCIDR{clusterCIDRSet} - } - return nil -} - -// reconcileDelete releases the assigned ClusterCIDR and removes the finalizer -// if the deletion timestamp is set. -func (r *multiCIDRRangeAllocator) reconcileDelete(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDR) error { - r.lock.Lock() - defer r.lock.Unlock() - - logger := klog.FromContext(ctx) - if slice.ContainsString(clusterCIDR.GetFinalizers(), clusterCIDRFinalizer, nil) { - logger.V(2).Info("Releasing ClusterCIDR", "clusterCIDR", clusterCIDR.Name) - if err := r.deleteClusterCIDR(logger, clusterCIDR); err != nil { - logger.V(2).Info("Error while deleting ClusterCIDR", "err", err) - return err - } - // Remove the finalizer as delete is successful. - cccCopy := clusterCIDR.DeepCopy() - cccCopy.ObjectMeta.Finalizers = slice.RemoveString(cccCopy.ObjectMeta.Finalizers, clusterCIDRFinalizer, nil) - if _, err := r.client.NetworkingV1alpha1().ClusterCIDRs().Update(ctx, cccCopy, metav1.UpdateOptions{}); err != nil { - logger.V(2).Info("Error removing finalizer for ClusterCIDR", "clusterCIDR", clusterCIDR.Name, "err", err) - return err - } - logger.V(2).Info("Removed finalizer for ClusterCIDR", "clusterCIDR", clusterCIDR.Name) - } - return nil -} - -// deleteClusterCIDR Deletes and unmaps the ClusterCIDRs from the cidrMap. -func (r *multiCIDRRangeAllocator) deleteClusterCIDR(logger klog.Logger, clusterCIDR *networkingv1alpha1.ClusterCIDR) error { - - labelSelector, err := r.nodeSelectorKey(clusterCIDR) - if err != nil { - return fmt.Errorf("unable to delete cidr: %w", err) - } - - clusterCIDRSetList, ok := r.cidrMap[labelSelector] - if !ok { - logger.Info("Label not found in CIDRMap, proceeding with delete", "labelSelector", labelSelector) - return nil - } - - for i, clusterCIDRSet := range clusterCIDRSetList { - if clusterCIDRSet.Name != clusterCIDR.Name { - continue - } - - // Mark clusterCIDRSet as terminating. - clusterCIDRSet.Terminating = true - - // Allow deletion only if no nodes are associated with the ClusterCIDR. - if len(clusterCIDRSet.AssociatedNodes) > 0 { - return fmt.Errorf("ClusterCIDRSet %s marked as terminating, won't be deleted until all associated nodes are deleted", clusterCIDR.Name) - } - - // Remove the label from the map if this was the only clusterCIDR associated - // with it. - if len(clusterCIDRSetList) == 1 { - delete(r.cidrMap, labelSelector) - return nil - } - - clusterCIDRSetList = append(clusterCIDRSetList[:i], clusterCIDRSetList[i+1:]...) - r.cidrMap[labelSelector] = clusterCIDRSetList - return nil - } - logger.V(2).Info("clusterCIDR not found, proceeding with delete", "clusterCIDR", clusterCIDR.Name, "label", labelSelector) - return nil -} - -func (r *multiCIDRRangeAllocator) nodeSelectorKey(clusterCIDR *networkingv1alpha1.ClusterCIDR) (string, error) { - var nodeSelector labels.Selector - var err error - - if clusterCIDR.Spec.NodeSelector != nil { - nodeSelector, err = nodeSelectorAsSelector(clusterCIDR.Spec.NodeSelector) - } else { - nodeSelector, err = nodeSelectorAsSelector(defaultNodeSelector()) - } - - if err != nil { - return "", err - } - - return nodeSelector.String(), nil -} - -func listClusterCIDRs(ctx context.Context, kubeClient clientset.Interface) (*networkingv1alpha1.ClusterCIDRList, error) { - var clusterCIDRList *networkingv1alpha1.ClusterCIDRList - // We must poll because apiserver might not be up. This error causes - // controller manager to restart. - startTimestamp := time.Now() - - // start with 2s, multiply the duration by 1.6 each step, 11 steps = 9.7 minutes - backoff := wait.Backoff{ - Duration: 2 * time.Second, - Factor: 1.6, - Steps: 11, - } - - logger := klog.FromContext(ctx) - if pollErr := wait.ExponentialBackoff(backoff, func() (bool, error) { - var err error - clusterCIDRList, err = kubeClient.NetworkingV1alpha1().ClusterCIDRs().List(ctx, metav1.ListOptions{ - FieldSelector: fields.Everything().String(), - LabelSelector: labels.Everything().String(), - }) - if err != nil { - logger.Error(err, "Failed to list all clusterCIDRs") - return false, nil - } - return true, nil - }); pollErr != nil { - logger.Error(nil, "Failed to list clusterCIDRs", "latency", time.Now().Sub(startTimestamp)) - return nil, fmt.Errorf("failed to list all clusterCIDRs in %v, cannot proceed without updating CIDR map", - apiserverStartupGracePeriod) - } - return clusterCIDRList, nil -} - -// nodeSelectorRequirementsAsLabelRequirements converts the NodeSelectorRequirement -// type to a labels.Requirement type. -func nodeSelectorRequirementsAsLabelRequirements(nsr v1.NodeSelectorRequirement) (*labels.Requirement, error) { - var op selection.Operator - switch nsr.Operator { - case v1.NodeSelectorOpIn: - op = selection.In - case v1.NodeSelectorOpNotIn: - op = selection.NotIn - case v1.NodeSelectorOpExists: - op = selection.Exists - case v1.NodeSelectorOpDoesNotExist: - op = selection.DoesNotExist - case v1.NodeSelectorOpGt: - op = selection.GreaterThan - case v1.NodeSelectorOpLt: - op = selection.LessThan - default: - return nil, fmt.Errorf("%q is not a valid node selector operator", nsr.Operator) - } - return labels.NewRequirement(nsr.Key, op, nsr.Values) -} - -// TODO: nodeSelect and labelSelector semantics are different and the function -// doesn't translate them correctly, this has to be fixed before Beta -// xref: https://issues.k8s.io/116419 -// nodeSelectorAsSelector converts the NodeSelector api type into a struct that -// implements labels.Selector -// Note: This function should be kept in sync with the selector methods in -// pkg/labels/selector.go -func nodeSelectorAsSelector(ns *v1.NodeSelector) (labels.Selector, error) { - if ns == nil { - return labels.Nothing(), nil - } - if len(ns.NodeSelectorTerms) == 0 { - return labels.Everything(), nil - } - var requirements []labels.Requirement - - for _, nsTerm := range ns.NodeSelectorTerms { - for _, expr := range nsTerm.MatchExpressions { - req, err := nodeSelectorRequirementsAsLabelRequirements(expr) - if err != nil { - return nil, err - } - requirements = append(requirements, *req) - } - - for _, field := range nsTerm.MatchFields { - req, err := nodeSelectorRequirementsAsLabelRequirements(field) - if err != nil { - return nil, err - } - requirements = append(requirements, *req) - } - } - - selector := labels.NewSelector() - selector = selector.Add(requirements...) - return selector, nil -} diff --git a/pkg/controller/nodeipam/ipam/multi_cidr_range_allocator_test.go b/pkg/controller/nodeipam/ipam/multi_cidr_range_allocator_test.go deleted file mode 100644 index df680a575d2..00000000000 --- a/pkg/controller/nodeipam/ipam/multi_cidr_range_allocator_test.go +++ /dev/null @@ -1,1876 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package ipam - -import ( - "context" - "fmt" - "net" - "testing" - "time" - - "github.com/stretchr/testify/assert" - v1 "k8s.io/api/core/v1" - networkingv1alpha1 "k8s.io/api/networking/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/rand" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/informers" - "k8s.io/client-go/kubernetes/fake" - k8stesting "k8s.io/client-go/testing" - "k8s.io/client-go/tools/cache" - "k8s.io/klog/v2/ktesting" - "k8s.io/kubernetes/pkg/controller" - cidrset "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/multicidrset" - "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test" - "k8s.io/kubernetes/pkg/controller/testutil" - utilnet "k8s.io/utils/net" -) - -type testCaseMultiCIDR struct { - description string - fakeNodeHandler *testutil.FakeNodeHandler - allocatorParams CIDRAllocatorParams - testCIDRMap map[string][]*cidrset.ClusterCIDR - // key is index of the cidr allocated. - expectedAllocatedCIDR map[int]string - allocatedCIDRs map[int][]string - // should controller creation fail? - ctrlCreateFail bool -} - -type testClusterCIDR struct { - perNodeHostBits int32 - ipv4CIDR string - ipv6CIDR string - name string -} - -type testNodeSelectorRequirement struct { - key string - operator v1.NodeSelectorOperator - values []string -} - -func getTestNodeSelector(requirements []testNodeSelectorRequirement) string { - testNodeSelector := &v1.NodeSelector{} - - for _, nsr := range requirements { - nst := v1.NodeSelectorTerm{ - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: nsr.key, - Operator: nsr.operator, - Values: nsr.values, - }, - }, - } - testNodeSelector.NodeSelectorTerms = append(testNodeSelector.NodeSelectorTerms, nst) - } - - selector, _ := nodeSelectorAsSelector(testNodeSelector) - return selector.String() -} - -func getTestCidrMap(testClusterCIDRMap map[string][]*testClusterCIDR) map[string][]*cidrset.ClusterCIDR { - cidrMap := make(map[string][]*cidrset.ClusterCIDR, 0) - for labels, testClusterCIDRList := range testClusterCIDRMap { - clusterCIDRList := make([]*cidrset.ClusterCIDR, 0) - for _, testClusterCIDR := range testClusterCIDRList { - clusterCIDR := &cidrset.ClusterCIDR{ - Name: testClusterCIDR.name, - AssociatedNodes: make(map[string]bool, 0), - } - - if testClusterCIDR.ipv4CIDR != "" { - _, testCIDR, _ := utilnet.ParseCIDRSloppy(testClusterCIDR.ipv4CIDR) - testCIDRSet, _ := cidrset.NewMultiCIDRSet(testCIDR, int(testClusterCIDR.perNodeHostBits)) - clusterCIDR.IPv4CIDRSet = testCIDRSet - } - if testClusterCIDR.ipv6CIDR != "" { - _, testCIDR, _ := utilnet.ParseCIDRSloppy(testClusterCIDR.ipv6CIDR) - testCIDRSet, _ := cidrset.NewMultiCIDRSet(testCIDR, int(testClusterCIDR.perNodeHostBits)) - clusterCIDR.IPv6CIDRSet = testCIDRSet - } - clusterCIDRList = append(clusterCIDRList, clusterCIDR) - } - cidrMap[labels] = clusterCIDRList - } - return cidrMap -} - -func getClusterCIDRList(nodeName string, cidrMap map[string][]*cidrset.ClusterCIDR) ([]*cidrset.ClusterCIDR, error) { - labelSelector := getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{nodeName}, - }, - }) - if clusterCIDRList, ok := cidrMap[labelSelector]; ok { - return clusterCIDRList, nil - } - return nil, fmt.Errorf("unable to get clusterCIDR for node: %s", nodeName) -} - -func TestMultiCIDROccupyPreExistingCIDR(t *testing.T) { - // all tests operate on a single node. - testCaseMultiCIDRs := []testCaseMultiCIDR{ - { - description: "success, single stack no node allocation", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "single-stack-cidr", - perNodeHostBits: 8, - ipv4CIDR: "10.10.0.0/16", - }, - }, - }), - allocatedCIDRs: nil, - expectedAllocatedCIDR: nil, - ctrlCreateFail: false, - }, - { - description: "success, dual stack no node allocation", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "dual-stack-cidr", - perNodeHostBits: 8, - ipv4CIDR: "10.10.0.0/16", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - }), - allocatedCIDRs: nil, - expectedAllocatedCIDR: nil, - ctrlCreateFail: false, - }, - { - description: "success, single stack correct node allocation", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - Spec: v1.NodeSpec{ - PodCIDRs: []string{"10.10.0.1/24"}, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "single-stack-cidr-allocated", - perNodeHostBits: 8, - ipv4CIDR: "10.10.0.0/16", - }, - }, - }), - allocatedCIDRs: nil, - expectedAllocatedCIDR: nil, - ctrlCreateFail: false, - }, - { - description: "success, dual stack both allocated correctly", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - Spec: v1.NodeSpec{ - PodCIDRs: []string{"10.10.0.1/24", "ace:cab:deca::1/120"}, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "dual-stack-cidr-allocated", - perNodeHostBits: 8, - ipv4CIDR: "10.10.0.0/16", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - }), - allocatedCIDRs: nil, - expectedAllocatedCIDR: nil, - ctrlCreateFail: false, - }, - // failure cases. - { - description: "fail, single stack incorrect node allocation", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - Spec: v1.NodeSpec{ - PodCIDRs: []string{"172.10.0.1/24"}, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "single-stack-cidr-allocate-fail", - perNodeHostBits: 8, - ipv4CIDR: "10.10.0.0/16", - }, - }, - }), - allocatedCIDRs: nil, - expectedAllocatedCIDR: nil, - ctrlCreateFail: true, - }, - { - description: "fail, dualstack node allocating from non existing cidr", - - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - Spec: v1.NodeSpec{ - PodCIDRs: []string{"10.10.0.1/24", "a00::/86"}, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "dual-stack-cidr-allocate-fail", - perNodeHostBits: 8, - ipv4CIDR: "10.10.0.0/16", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - }), - allocatedCIDRs: nil, - expectedAllocatedCIDR: nil, - ctrlCreateFail: true, - }, - { - description: "fail, dualstack node allocating bad v4", - - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - Spec: v1.NodeSpec{ - PodCIDRs: []string{"172.10.0.1/24", "ace:cab:deca::1/120"}, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "dual-stack-cidr-bad-v4", - perNodeHostBits: 8, - ipv4CIDR: "10.10.0.0/16", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - }), - allocatedCIDRs: nil, - expectedAllocatedCIDR: nil, - ctrlCreateFail: true, - }, - { - description: "fail, dualstack node allocating bad v6", - - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - Spec: v1.NodeSpec{ - PodCIDRs: []string{"10.10.0.1/24", "cdd::/86"}, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "dual-stack-cidr-bad-v6", - perNodeHostBits: 8, - ipv4CIDR: "10.10.0.0/16", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - }), - allocatedCIDRs: nil, - expectedAllocatedCIDR: nil, - ctrlCreateFail: true, - }, - } - - // test function - _, ctx := ktesting.NewTestContext(t) - for _, tc := range testCaseMultiCIDRs { - t.Run(tc.description, func(t *testing.T) { - // Initialize the range allocator. - fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler) - fakeClient := &fake.Clientset{} - fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc()) - fakeClusterCIDRInformer := fakeInformerFactory.Networking().V1alpha1().ClusterCIDRs() - nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{}) - - _, err := NewMultiCIDRRangeAllocator(ctx, tc.fakeNodeHandler, fakeNodeInformer, fakeClusterCIDRInformer, tc.allocatorParams, nodeList, tc.testCIDRMap) - if err == nil && tc.ctrlCreateFail { - t.Fatalf("creating range allocator was expected to fail, but it did not") - } - if err != nil && !tc.ctrlCreateFail { - t.Fatalf("creating range allocator was expected to succeed, but it did not") - } - }) - } -} - -func TestMultiCIDRAllocateOrOccupyCIDRSuccess(t *testing.T) { - // Non-parallel test (overrides global var). - oldNodePollInterval := nodePollInterval - nodePollInterval = test.NodePollInterval - defer func() { - nodePollInterval = oldNodePollInterval - }() - - // all tests operate on a single node. - testCaseMultiCIDRs := []testCaseMultiCIDR{ - { - description: "When there's no ServiceCIDR return first CIDR in range", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "single-stack-cidr", - perNodeHostBits: 2, - ipv4CIDR: "127.123.234.0/24", - }, - }, - }), - expectedAllocatedCIDR: map[int]string{ - 0: "127.123.234.0/30", - }, - }, - { - description: "Correctly filter out ServiceCIDR", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") - return serviceCIDR - }(), - SecondaryServiceCIDR: nil, - NodeCIDRMaskSizes: []int{30}, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "single-stack-cidr", - perNodeHostBits: 2, - ipv4CIDR: "127.123.234.0/24", - }, - }, - }), - // it should return first /30 CIDR after service range. - expectedAllocatedCIDR: map[int]string{ - 0: "127.123.234.64/30", - }, - }, - { - description: "Correctly ignore already allocated CIDRs", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") - return serviceCIDR - }(), - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "single-stack-cidr", - perNodeHostBits: 2, - ipv4CIDR: "127.123.234.0/24", - }, - }, - }), - allocatedCIDRs: map[int][]string{ - 0: {"127.123.234.64/30", "127.123.234.68/30", "127.123.234.72/30", "127.123.234.80/30"}, - }, - expectedAllocatedCIDR: map[int]string{ - 0: "127.123.234.76/30", - }, - }, - { - description: "Dualstack CIDRs, prioritize clusterCIDR with higher label match count", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - "testLabel-1": "label1", - "testLabel-2": "label2", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") - return serviceCIDR - }(), - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "dual-stack-cidr-1", - perNodeHostBits: 8, - ipv4CIDR: "10.0.0.0/8", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-1", - operator: v1.NodeSelectorOpIn, - values: []string{"label1"}, - }, - }): { - { - name: "dual-stack-cidr-2", - perNodeHostBits: 8, - ipv4CIDR: "127.123.234.0/8", - ipv6CIDR: "abc:def:deca::/112", - }, - }, - }), - expectedAllocatedCIDR: map[int]string{ - 0: "127.0.0.0/24", - 1: "abc:def:deca::/120", - }, - }, - { - description: "Dualstack CIDRs, prioritize clusterCIDR with higher label match count, overlapping CIDRs", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - "testLabel-1": "label1", - "testLabel-2": "label2", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") - return serviceCIDR - }(), - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "dual-stack-cidr-1", - perNodeHostBits: 8, - ipv4CIDR: "10.0.0.0/8", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-1", - operator: v1.NodeSelectorOpIn, - values: []string{"label1"}, - }, - }): { - { - name: "dual-stack-cidr-2", - perNodeHostBits: 8, - ipv4CIDR: "10.0.0.0/16", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - }), - allocatedCIDRs: map[int][]string{ - 0: {"10.0.0.0/24", "10.0.1.0/24", "10.0.2.0/24", "10.0.4.0/24"}, - 1: {"ace:cab:deca::/120"}, - }, - expectedAllocatedCIDR: map[int]string{ - 0: "10.0.3.0/24", - 1: "ace:cab:deca::100/120", - }, - }, - { - description: "Dualstack CIDRs, clusterCIDR with equal label match count, prioritize clusterCIDR with fewer allocatable pod CIDRs", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - "testLabel-1": "label1", - "testLabel-2": "label2", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") - return serviceCIDR - }(), - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-1", - operator: v1.NodeSelectorOpIn, - values: []string{"label1"}, - }, - }): { - { - name: "dual-stack-cidr-1", - perNodeHostBits: 8, - ipv4CIDR: "127.123.234.0/8", - ipv6CIDR: "abc:def:deca::/112", - }, - }, - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-2", - operator: v1.NodeSelectorOpIn, - values: []string{"label2"}, - }, - }): { - { - name: "dual-stack-cidr-2", - perNodeHostBits: 8, - ipv4CIDR: "10.0.0.0/24", - ipv6CIDR: "ace:cab:deca::/120", - }, - }, - }), - expectedAllocatedCIDR: map[int]string{ - 0: "10.0.0.0/24", - 1: "ace:cab:deca::/120", - }, - }, - { - description: "Dualstack CIDRs, clusterCIDR with equal label count, non comparable allocatable pod CIDRs, prioritize clusterCIDR with lower perNodeMaskSize", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - "testLabel-1": "label1", - "testLabel-2": "label2", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") - return serviceCIDR - }(), - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-1", - operator: v1.NodeSelectorOpIn, - values: []string{"label1"}, - }, - }): { - { - name: "dual-stack-cidr-1", - perNodeHostBits: 8, - ipv4CIDR: "127.123.234.0/23", - }, - }, - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-2", - operator: v1.NodeSelectorOpIn, - values: []string{"label2"}, - }, - }): { - { - name: "dual-stack-cidr-2", - perNodeHostBits: 8, - ipv4CIDR: "10.0.0.0/16", - ipv6CIDR: "ace:cab:deca::/120", - }, - }, - }), - expectedAllocatedCIDR: map[int]string{ - 0: "10.0.0.0/24", - 1: "ace:cab:deca::/120", - }, - }, - { - description: "Dualstack CIDRs, clusterCIDR with equal label count and allocatable pod CIDRs, prioritize clusterCIDR with lower perNodeMaskSize", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - "testLabel-1": "label1", - "testLabel-2": "label2", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") - return serviceCIDR - }(), - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-1", - operator: v1.NodeSelectorOpIn, - values: []string{"label1"}, - }, - }): { - { - name: "dual-stack-cidr-1", - perNodeHostBits: 8, - ipv4CIDR: "127.123.234.0/24", - ipv6CIDR: "abc:def:deca::/120", - }, - }, - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-2", - operator: v1.NodeSelectorOpIn, - values: []string{"label2"}, - }, - }): { - { - name: "dual-stack-cidr-2", - perNodeHostBits: 0, - ipv4CIDR: "10.0.0.0/32", - ipv6CIDR: "ace:cab:deca::/128", - }, - }, - }), - expectedAllocatedCIDR: map[int]string{ - 0: "10.0.0.0/32", - 1: "ace:cab:deca::/128", - }, - }, - { - description: "Dualstack CIDRs, clusterCIDR with equal label count, allocatable pod CIDRs and allocatable IPs, prioritize clusterCIDR with lower alphanumeric label", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - "testLabel-1": "label1", - "testLabel-2": "label2", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") - return serviceCIDR - }(), - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-1", - operator: v1.NodeSelectorOpIn, - values: []string{"label1"}, - }, - }): { - { - name: "dual-stack-cidr-1", - perNodeHostBits: 8, - ipv4CIDR: "127.123.234.0/16", - ipv6CIDR: "abc:def:deca::/112", - }, - }, - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-2", - operator: v1.NodeSelectorOpIn, - values: []string{"label2"}, - }, - }): { - { - name: "dual-stack-cidr-2", - perNodeHostBits: 8, - ipv4CIDR: "10.0.0.0/16", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - }), - expectedAllocatedCIDR: map[int]string{ - 0: "127.123.0.0/24", - 1: "abc:def:deca::/120", - }, - }, - { - description: "Dualstack CIDRs, clusterCIDR with equal label count, allocatable pod CIDRs, allocatable IPs and labels, prioritize clusterCIDR with smaller IP", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - "testLabel-1": "label1", - "testLabel-2": "label2", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: func() *net.IPNet { - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") - return serviceCIDR - }(), - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-1", - operator: v1.NodeSelectorOpIn, - values: []string{"label1"}, - }, - }): { - { - name: "dual-stack-cidr-1", - perNodeHostBits: 8, - ipv4CIDR: "127.123.234.0/16", - ipv6CIDR: "abc:def:deca::/112", - }, - }, - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - { - key: "testLabel-1", - operator: v1.NodeSelectorOpIn, - values: []string{"label1"}, - }, - }): { - { - name: "dual-stack-cidr-2", - perNodeHostBits: 8, - ipv4CIDR: "10.0.0.0/16", - ipv6CIDR: "ace:cab:deca::/112", - }, - }, - }), - expectedAllocatedCIDR: map[int]string{ - 0: "10.0.0.0/24", - 1: "ace:cab:deca::/120", - }, - }, - { - description: "no double counting", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "nodepool1", - }, - }, - Spec: v1.NodeSpec{ - PodCIDRs: []string{"10.10.0.0/24"}, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node1", - Labels: map[string]string{ - "testLabel-0": "nodepool1", - }, - }, - Spec: v1.NodeSpec{ - PodCIDRs: []string{"10.10.2.0/24"}, - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node2", - Labels: map[string]string{ - "testLabel-0": "nodepool1", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"nodepool1"}, - }, - }): { - { - name: "no-double-counting", - perNodeHostBits: 8, - ipv4CIDR: "10.10.0.0/22", - }, - }, - }), - expectedAllocatedCIDR: map[int]string{ - 0: "10.10.1.0/24", - }, - }, - } - - logger, ctx := ktesting.NewTestContext(t) - - // test function - testFunc := func(tc testCaseMultiCIDR) { - nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{}) - // Initialize the range allocator. - - fakeClient := &fake.Clientset{} - fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc()) - fakeClusterCIDRInformer := fakeInformerFactory.Networking().V1alpha1().ClusterCIDRs() - allocator, err := NewMultiCIDRRangeAllocator(ctx, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), fakeClusterCIDRInformer, tc.allocatorParams, nodeList, tc.testCIDRMap) - if err != nil { - t.Errorf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err) - return - } - rangeAllocator, ok := allocator.(*multiCIDRRangeAllocator) - if !ok { - t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) - return - } - rangeAllocator.nodesSynced = test.AlwaysReady - rangeAllocator.recorder = testutil.NewFakeRecorder() - - // this is a bit of white box testing - // pre allocate the CIDRs as per the test - for _, allocatedList := range tc.allocatedCIDRs { - for _, allocated := range allocatedList { - _, cidr, err := utilnet.ParseCIDRSloppy(allocated) - if err != nil { - t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err) - } - - clusterCIDRList, err := getClusterCIDRList("node0", rangeAllocator.cidrMap) - if err != nil { - t.Fatalf("%v: unexpected error when getting associated clusterCIDR for node %v %v", tc.description, "node0", err) - } - - occupied := false - for _, clusterCIDR := range clusterCIDRList { - if err := rangeAllocator.Occupy(clusterCIDR, cidr); err == nil { - occupied = true - break - } - } - if !occupied { - t.Fatalf("%v: unable to occupy CIDR %v", tc.description, allocated) - } - } - } - - updateCount := 0 - for _, node := range tc.fakeNodeHandler.Existing { - if node.Spec.PodCIDRs == nil { - updateCount++ - } - if err := allocator.AllocateOrOccupyCIDR(logger, node); err != nil { - t.Errorf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) - } - } - if updateCount != 1 { - t.Fatalf("test error: all tests must update exactly one node") - } - if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, updateCount, wait.ForeverTestTimeout); err != nil { - t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err) - } - - if len(tc.expectedAllocatedCIDR) == 0 { - // nothing further expected - return - } - for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() { - if len(updatedNode.Spec.PodCIDRs) == 0 { - continue // not assigned yet - } - //match - for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDR { - if updatedNode.Spec.PodCIDRs[podCIDRIdx] != expectedPodCIDR { - t.Errorf("%v: Unable to find allocated CIDR %v, found updated Nodes with CIDRs: %v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs) - break - } - } - } - } - - // run the test cases - for _, tc := range testCaseMultiCIDRs { - testFunc(tc) - } -} - -func TestMultiCIDRAllocateOrOccupyCIDRFailure(t *testing.T) { - testCaseMultiCIDRs := []testCaseMultiCIDR{ - { - description: "When there's no ServiceCIDR return first CIDR in range", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "allocate-fail", - perNodeHostBits: 2, - ipv4CIDR: "127.123.234.0/28", - }, - }, - }), - allocatedCIDRs: map[int][]string{ - 0: {"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, - }, - }, - } - - logger, ctx := ktesting.NewTestContext(t) - - testFunc := func(tc testCaseMultiCIDR) { - fakeClient := &fake.Clientset{} - fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc()) - fakeClusterCIDRInformer := fakeInformerFactory.Networking().V1alpha1().ClusterCIDRs() - - // Initialize the range allocator. - allocator, err := NewMultiCIDRRangeAllocator(ctx, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), fakeClusterCIDRInformer, tc.allocatorParams, nil, tc.testCIDRMap) - if err != nil { - t.Logf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err) - } - rangeAllocator, ok := allocator.(*multiCIDRRangeAllocator) - if !ok { - t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) - return - } - rangeAllocator.nodesSynced = test.AlwaysReady - rangeAllocator.recorder = testutil.NewFakeRecorder() - - // this is a bit of white box testing - // pre allocate the CIDRs as per the test - for _, allocatedList := range tc.allocatedCIDRs { - for _, allocated := range allocatedList { - _, cidr, err := utilnet.ParseCIDRSloppy(allocated) - if err != nil { - t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err) - } - - clusterCIDRList, err := getClusterCIDRList("node0", rangeAllocator.cidrMap) - if err != nil { - t.Fatalf("%v: unexpected error when getting associated clusterCIDR for node %v %v", tc.description, "node0", err) - } - - occupied := false - for _, clusterCIDR := range clusterCIDRList { - if err := rangeAllocator.Occupy(clusterCIDR, cidr); err == nil { - occupied = true - break - } - } - if !occupied { - t.Fatalf("%v: unable to occupy CIDR %v", tc.description, allocated) - } - } - } - - if err := allocator.AllocateOrOccupyCIDR(logger, tc.fakeNodeHandler.Existing[0]); err == nil { - t.Errorf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err) - } - // We don't expect any updates, so just sleep for some time - time.Sleep(time.Second) - if len(tc.fakeNodeHandler.GetUpdatedNodesCopy()) != 0 { - t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.GetUpdatedNodesCopy()) - } - if len(tc.expectedAllocatedCIDR) == 0 { - // nothing further expected - return - } - for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() { - if len(updatedNode.Spec.PodCIDRs) == 0 { - continue // not assigned yet - } - //match - for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDR { - if updatedNode.Spec.PodCIDRs[podCIDRIdx] == expectedPodCIDR { - t.Errorf("%v: found cidr %v that should not be allocated on node with CIDRs:%v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs) - break - } - } - } - } - for _, tc := range testCaseMultiCIDRs { - testFunc(tc) - } -} - -type releasetestCaseMultiCIDR struct { - description string - fakeNodeHandler *testutil.FakeNodeHandler - testCIDRMap map[string][]*cidrset.ClusterCIDR - allocatorParams CIDRAllocatorParams - expectedAllocatedCIDRFirstRound map[int]string - expectedAllocatedCIDRSecondRound map[int]string - allocatedCIDRs map[int][]string - cidrsToRelease [][]string -} - -func TestMultiCIDRReleaseCIDRSuccess(t *testing.T) { - // Non-parallel test (overrides global var) - oldNodePollInterval := nodePollInterval - nodePollInterval = test.NodePollInterval - defer func() { - nodePollInterval = oldNodePollInterval - }() - - testCaseMultiCIDRs := []releasetestCaseMultiCIDR{ - { - description: "Correctly release preallocated CIDR", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "cidr-release", - perNodeHostBits: 2, - ipv4CIDR: "127.123.234.0/28", - }, - }, - }), - allocatedCIDRs: map[int][]string{ - 0: {"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, - }, - expectedAllocatedCIDRFirstRound: nil, - cidrsToRelease: [][]string{ - {"127.123.234.4/30"}, - }, - expectedAllocatedCIDRSecondRound: map[int]string{ - 0: "127.123.234.4/30", - }, - }, - { - description: "Correctly recycle CIDR", - fakeNodeHandler: &testutil.FakeNodeHandler{ - Existing: []*v1.Node{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "node0", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - }, - }, - Clientset: fake.NewSimpleClientset(), - }, - allocatorParams: CIDRAllocatorParams{ - ServiceCIDR: nil, - SecondaryServiceCIDR: nil, - }, - testCIDRMap: getTestCidrMap( - map[string][]*testClusterCIDR{ - getTestNodeSelector([]testNodeSelectorRequirement{ - { - key: "testLabel-0", - operator: v1.NodeSelectorOpIn, - values: []string{"node0"}, - }, - }): { - { - name: "cidr-release", - perNodeHostBits: 2, - ipv4CIDR: "127.123.234.0/28", - }, - }, - }), - allocatedCIDRs: map[int][]string{ - 0: {"127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, - }, - expectedAllocatedCIDRFirstRound: map[int]string{ - 0: "127.123.234.0/30", - }, - cidrsToRelease: [][]string{ - {"127.123.234.0/30"}, - }, - expectedAllocatedCIDRSecondRound: map[int]string{ - 0: "127.123.234.0/30", - }, - }, - } - logger, ctx := ktesting.NewTestContext(t) - testFunc := func(tc releasetestCaseMultiCIDR) { - fakeClient := &fake.Clientset{} - fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc()) - fakeClusterCIDRInformer := fakeInformerFactory.Networking().V1alpha1().ClusterCIDRs() - // Initialize the range allocator. - allocator, _ := NewMultiCIDRRangeAllocator(ctx, tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), fakeClusterCIDRInformer, tc.allocatorParams, nil, tc.testCIDRMap) - rangeAllocator, ok := allocator.(*multiCIDRRangeAllocator) - if !ok { - t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) - return - } - rangeAllocator.nodesSynced = test.AlwaysReady - rangeAllocator.recorder = testutil.NewFakeRecorder() - - // this is a bit of white box testing - for _, allocatedList := range tc.allocatedCIDRs { - for _, allocated := range allocatedList { - _, cidr, err := utilnet.ParseCIDRSloppy(allocated) - if err != nil { - t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err) - } - - clusterCIDRList, err := getClusterCIDRList("node0", rangeAllocator.cidrMap) - if err != nil { - t.Fatalf("%v: unexpected error when getting associated clusterCIDR for node %v %v", tc.description, "node0", err) - } - - occupied := false - for _, clusterCIDR := range clusterCIDRList { - if err := rangeAllocator.Occupy(clusterCIDR, cidr); err == nil { - occupied = true - clusterCIDR.AssociatedNodes["fakeNode"] = true - break - } - } - if !occupied { - t.Fatalf("%v: unable to occupy CIDR %v", tc.description, allocated) - } - } - } - - err := allocator.AllocateOrOccupyCIDR(logger, tc.fakeNodeHandler.Existing[0]) - if len(tc.expectedAllocatedCIDRFirstRound) != 0 { - if err != nil { - t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) - } - if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil { - t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err) - } - } else { - if err == nil { - t.Fatalf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err) - } - // We don't expect any updates here - time.Sleep(time.Second) - if len(tc.fakeNodeHandler.GetUpdatedNodesCopy()) != 0 { - t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.GetUpdatedNodesCopy()) - } - } - - for _, cidrToRelease := range tc.cidrsToRelease { - - nodeToRelease := v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: "fakeNode", - Labels: map[string]string{ - "testLabel-0": "node0", - }, - }, - } - nodeToRelease.Spec.PodCIDRs = cidrToRelease - err = allocator.ReleaseCIDR(logger, &nodeToRelease) - if err != nil { - t.Fatalf("%v: unexpected error in ReleaseCIDR: %v", tc.description, err) - } - } - if err = allocator.AllocateOrOccupyCIDR(logger, tc.fakeNodeHandler.Existing[0]); err != nil { - t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) - } - if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil { - t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err) - } - - if len(tc.expectedAllocatedCIDRSecondRound) == 0 { - // nothing further expected - return - } - for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() { - if len(updatedNode.Spec.PodCIDRs) == 0 { - continue // not assigned yet - } - //match - for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDRSecondRound { - if updatedNode.Spec.PodCIDRs[podCIDRIdx] != expectedPodCIDR { - t.Errorf("%v: found cidr %v that should not be allocated on node with CIDRs:%v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs) - break - } - } - } - } - - for _, tc := range testCaseMultiCIDRs { - testFunc(tc) - } -} - -// ClusterCIDR tests. - -var alwaysReady = func() bool { return true } - -type clusterCIDRController struct { - *multiCIDRRangeAllocator - clusterCIDRStore cache.Store -} - -func newController(ctx context.Context) (*fake.Clientset, *clusterCIDRController) { - client := fake.NewSimpleClientset() - - informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) - cccInformer := informerFactory.Networking().V1alpha1().ClusterCIDRs() - cccIndexer := cccInformer.Informer().GetIndexer() - - nodeInformer := informerFactory.Core().V1().Nodes() - - // These reactors are required to mock functionality that would be covered - // automatically if we weren't using the fake client. - client.PrependReactor("create", "clustercidrs", k8stesting.ReactionFunc(func(action k8stesting.Action) (bool, runtime.Object, error) { - clusterCIDR := action.(k8stesting.CreateAction).GetObject().(*networkingv1alpha1.ClusterCIDR) - - if clusterCIDR.ObjectMeta.GenerateName != "" { - clusterCIDR.ObjectMeta.Name = fmt.Sprintf("%s-%s", clusterCIDR.ObjectMeta.GenerateName, rand.String(8)) - clusterCIDR.ObjectMeta.GenerateName = "" - } - clusterCIDR.Generation = 1 - cccIndexer.Add(clusterCIDR) - - return false, clusterCIDR, nil - })) - client.PrependReactor("update", "clustercidrs", k8stesting.ReactionFunc(func(action k8stesting.Action) (bool, runtime.Object, error) { - clusterCIDR := action.(k8stesting.CreateAction).GetObject().(*networkingv1alpha1.ClusterCIDR) - clusterCIDR.Generation++ - cccIndexer.Update(clusterCIDR) - - return false, clusterCIDR, nil - })) - - _, clusterCIDR, _ := utilnet.ParseCIDRSloppy("192.168.0.0/16") - _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("10.1.0.0/16") - - allocatorParams := CIDRAllocatorParams{ - ClusterCIDRs: []*net.IPNet{clusterCIDR}, - ServiceCIDR: serviceCIDR, - SecondaryServiceCIDR: nil, - NodeCIDRMaskSizes: []int{24}, - } - testCIDRMap := make(map[string][]*cidrset.ClusterCIDR, 0) - - // Initialize the range allocator. - ra, _ := NewMultiCIDRRangeAllocator(ctx, client, nodeInformer, cccInformer, allocatorParams, nil, testCIDRMap) - cccController := ra.(*multiCIDRRangeAllocator) - - cccController.clusterCIDRSynced = alwaysReady - - return client, &clusterCIDRController{ - cccController, - informerFactory.Networking().V1alpha1().ClusterCIDRs().Informer().GetStore(), - } -} - -// Ensure default ClusterCIDR is created during bootstrap. -func TestClusterCIDRDefault(t *testing.T) { - defaultCCC := makeClusterCIDR(defaultClusterCIDRName, "192.168.0.0/16", "", 8, nil) - _, ctx := ktesting.NewTestContext(t) - client, _ := newController(ctx) - createdCCC, err := client.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), defaultClusterCIDRName, metav1.GetOptions{}) - assert.Nil(t, err, "Expected no error getting clustercidr objects") - assert.Equal(t, defaultCCC.Spec, createdCCC.Spec) -} - -// Ensure SyncClusterCIDR creates a new valid ClusterCIDR. -func TestSyncClusterCIDRCreate(t *testing.T) { - tests := []struct { - name string - ccc *networkingv1alpha1.ClusterCIDR - wantErr bool - }{ - { - name: "valid IPv4 ClusterCIDR with no NodeSelector", - ccc: makeClusterCIDR("ipv4-ccc", "10.2.0.0/16", "", 8, nil), - wantErr: false, - }, - { - name: "valid IPv4 ClusterCIDR with NodeSelector", - ccc: makeClusterCIDR("ipv4-ccc-label", "10.3.0.0/16", "", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), - wantErr: false, - }, - { - name: "valid IPv4 ClusterCIDR with overlapping CIDRs", - ccc: makeClusterCIDR("ipv4-ccc-overlap", "10.2.0.0/24", "", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), - wantErr: false, - }, - { - name: "valid IPv6 ClusterCIDR with no NodeSelector", - ccc: makeClusterCIDR("ipv6-ccc", "", "fd00:1::/112", 8, nil), - wantErr: false, - }, - { - name: "valid IPv6 ClusterCIDR with NodeSelector", - ccc: makeClusterCIDR("ipv6-ccc-label", "", "fd00:2::/112", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), - wantErr: false, - }, - { - name: "valid IPv6 ClusterCIDR with overlapping CIDRs", - ccc: makeClusterCIDR("ipv6-ccc-overlap", "", "fd00:1:1::/112", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), - wantErr: false, - }, - { - name: "valid Dualstack ClusterCIDR with no NodeSelector", - ccc: makeClusterCIDR("dual-ccc", "10.2.0.0/16", "fd00:1::/112", 8, nil), - wantErr: false, - }, - { - name: "valid DualStack ClusterCIDR with NodeSelector", - ccc: makeClusterCIDR("dual-ccc-label", "10.3.0.0/16", "fd00:2::/112", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), - wantErr: false, - }, - { - name: "valid Dualstack ClusterCIDR with overlapping CIDRs", - ccc: makeClusterCIDR("dual-ccc-overlap", "10.2.0.0/16", "fd00:1:1::/112", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), - wantErr: false, - }, - // invalid ClusterCIDRs. - { - name: "invalid ClusterCIDR with both IPv4 and IPv6 CIDRs nil", - ccc: makeClusterCIDR("invalid-ccc", "", "", 0, nil), - wantErr: true, - }, - { - name: "invalid IPv4 ClusterCIDR", - ccc: makeClusterCIDR("invalid-ipv4-ccc", "1000.2.0.0/16", "", 8, nil), - wantErr: true, - }, - { - name: "invalid IPv6 ClusterCIDR", - ccc: makeClusterCIDR("invalid-ipv6-ccc", "", "aaaaa:1:1::/112", 8, nil), - wantErr: true, - }, - { - name: "invalid dualstack ClusterCIDR", - ccc: makeClusterCIDR("invalid-dual-ccc", "10.2.0.0/16", "aaaaa:1:1::/112", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), - wantErr: true, - }, - } - _, ctx := ktesting.NewTestContext(t) - client, cccController := newController(ctx) - for _, tc := range tests { - cccController.clusterCIDRStore.Add(tc.ccc) - err := cccController.syncClusterCIDR(ctx, tc.ccc.Name) - if tc.wantErr { - assert.Error(t, err) - continue - } - assert.NoError(t, err) - expectActions(t, client.Actions(), 1, "create", "clustercidrs") - - createdCCC, err := client.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), tc.ccc.Name, metav1.GetOptions{}) - assert.Nil(t, err, "Expected no error getting clustercidr object") - assert.Equal(t, tc.ccc.Spec, createdCCC.Spec) - assert.Equal(t, []string{clusterCIDRFinalizer}, createdCCC.Finalizers) - } -} - -// Ensure syncClusterCIDR for ClusterCIDR delete removes the ClusterCIDR. -func TestSyncClusterCIDRDelete(t *testing.T) { - _, ctx := ktesting.NewTestContext(t) - _, cccController := newController(ctx) - - testCCC := makeClusterCIDR("testing-1", "10.1.0.0/16", "", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})) - - cccController.clusterCIDRStore.Add(testCCC) - err := cccController.syncClusterCIDR(ctx, testCCC.Name) - assert.NoError(t, err) - - deletionTimestamp := metav1.Now() - testCCC.DeletionTimestamp = &deletionTimestamp - cccController.clusterCIDRStore.Update(testCCC) - err = cccController.syncClusterCIDR(ctx, testCCC.Name) - assert.NoError(t, err) -} - -// Ensure syncClusterCIDR for ClusterCIDR delete does not remove ClusterCIDR -// if a node is associated with the ClusterCIDR. -func TestSyncClusterCIDRDeleteWithNodesAssociated(t *testing.T) { - _, ctx := ktesting.NewTestContext(t) - client, cccController := newController(ctx) - - testCCC := makeClusterCIDR("testing-1", "10.1.0.0/16", "", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})) - - cccController.clusterCIDRStore.Add(testCCC) - err := cccController.syncClusterCIDR(ctx, testCCC.Name) - assert.NoError(t, err) - - // Mock the IPAM controller behavior associating node with ClusterCIDR. - nodeSelectorKey, _ := cccController.nodeSelectorKey(testCCC) - clusterCIDRs, _ := cccController.cidrMap[nodeSelectorKey] - clusterCIDRs[0].AssociatedNodes["test-node"] = true - - createdCCC, err := client.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), testCCC.Name, metav1.GetOptions{}) - assert.Nil(t, err, "Expected no error getting clustercidr object") - - deletionTimestamp := metav1.Now() - createdCCC.DeletionTimestamp = &deletionTimestamp - cccController.clusterCIDRStore.Update(createdCCC) - err = cccController.syncClusterCIDR(ctx, createdCCC.Name) - assert.Error(t, err, fmt.Sprintf("ClusterCIDR %s marked as terminating, won't be deleted until all associated nodes are deleted", createdCCC.Name)) -} - -func expectActions(t *testing.T, actions []k8stesting.Action, num int, verb, resource string) { - t.Helper() - // if actions are less, the below logic will panic. - if num > len(actions) { - t.Fatalf("len of actions %v is unexpected. Expected to be at least %v", len(actions), num+1) - } - - for i := 0; i < num; i++ { - relativePos := len(actions) - i - 1 - assert.Equal(t, verb, actions[relativePos].GetVerb(), "Expected action -%d verb to be %s", i, verb) - assert.Equal(t, resource, actions[relativePos].GetResource().Resource, "Expected action -%d resource to be %s", i, resource) - } -} - -func makeNodeSelector(key string, op v1.NodeSelectorOperator, values []string) *v1.NodeSelector { - return &v1.NodeSelector{ - NodeSelectorTerms: []v1.NodeSelectorTerm{ - { - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: key, - Operator: op, - Values: values, - }, - }, - }, - }, - } -} - -// makeClusterCIDR returns a mock ClusterCIDR object. -func makeClusterCIDR(cccName, ipv4CIDR, ipv6CIDR string, perNodeHostBits int32, nodeSelector *v1.NodeSelector) *networkingv1alpha1.ClusterCIDR { - testCCC := &networkingv1alpha1.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: cccName}, - Spec: networkingv1alpha1.ClusterCIDRSpec{}, - } - - testCCC.Spec.PerNodeHostBits = perNodeHostBits - - if ipv4CIDR != "" { - testCCC.Spec.IPv4 = ipv4CIDR - } - - if ipv6CIDR != "" { - testCCC.Spec.IPv6 = ipv6CIDR - } - - if nodeSelector != nil { - testCCC.Spec.NodeSelector = nodeSelector - } - - return testCCC -} diff --git a/pkg/controller/nodeipam/node_ipam_controller.go b/pkg/controller/nodeipam/node_ipam_controller.go index cf547f81d75..c98671b1d51 100644 --- a/pkg/controller/nodeipam/node_ipam_controller.go +++ b/pkg/controller/nodeipam/node_ipam_controller.go @@ -24,7 +24,6 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" coreinformers "k8s.io/client-go/informers/core/v1" - networkinginformers "k8s.io/client-go/informers/networking/v1alpha1" clientset "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" corelisters "k8s.io/client-go/listers/core/v1" @@ -83,7 +82,6 @@ type Controller struct { func NewNodeIpamController( ctx context.Context, nodeInformer coreinformers.NodeInformer, - clusterCIDRInformer networkinginformers.ClusterCIDRInformer, cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterCIDRs []*net.IPNet, @@ -139,7 +137,7 @@ func NewNodeIpamController( NodeCIDRMaskSizes: nodeCIDRMaskSizes, } - ic.cidrAllocator, err = ipam.New(ctx, kubeClient, cloud, nodeInformer, clusterCIDRInformer, ic.allocatorType, allocatorParams) + ic.cidrAllocator, err = ipam.New(ctx, kubeClient, cloud, nodeInformer, ic.allocatorType, allocatorParams) if err != nil { return nil, err } diff --git a/pkg/controller/nodeipam/node_ipam_controller_test.go b/pkg/controller/nodeipam/node_ipam_controller_test.go index f15cf6b7336..0c11c5220af 100644 --- a/pkg/controller/nodeipam/node_ipam_controller_test.go +++ b/pkg/controller/nodeipam/node_ipam_controller_test.go @@ -28,14 +28,11 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilfeature "k8s.io/apiserver/pkg/util/feature" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes/fake" - featuregatetesting "k8s.io/component-base/featuregate/testing" "k8s.io/klog/v2/ktesting" "k8s.io/kubernetes/pkg/controller/nodeipam/ipam" "k8s.io/kubernetes/pkg/controller/testutil" - "k8s.io/kubernetes/pkg/features" "k8s.io/legacy-cloud-providers/gce" netutils "k8s.io/utils/net" ) @@ -51,7 +48,6 @@ func newTestNodeIpamController(ctx context.Context, clusterCIDR []*net.IPNet, se fakeClient := &fake.Clientset{} fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, 0) fakeNodeInformer := fakeInformerFactory.Core().V1().Nodes() - fakeClusterCIDRInformer := fakeInformerFactory.Networking().V1alpha1().ClusterCIDRs() for _, node := range fakeNodeHandler.Existing { fakeNodeInformer.Informer().GetStore().Add(node) @@ -60,7 +56,7 @@ func newTestNodeIpamController(ctx context.Context, clusterCIDR []*net.IPNet, se fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) return NewNodeIpamController( ctx, - fakeNodeInformer, fakeClusterCIDRInformer, fakeGCE, clientSet, + fakeNodeInformer, fakeGCE, clientSet, clusterCIDR, serviceCIDR, secondaryServiceCIDR, nodeCIDRMaskSizes, allocatorType, ) } @@ -120,42 +116,3 @@ func TestNewNodeIpamControllerWithCIDRMasks(t *testing.T) { }) } } - -// MultiCIDRRangeAllocatorType need enable feature gate -func TestNewNodeIpamControllerWithCIDRMasks2(t *testing.T) { - emptyServiceCIDR := "" - for _, tc := range []struct { - desc string - clusterCIDR string - serviceCIDR string - secondaryServiceCIDR string - maskSize []int - allocatorType ipam.CIDRAllocatorType - }{ - {"valid_multi_cidr_range_allocator", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.MultiCIDRRangeAllocatorType}, - {"valid_multi_cidr_range_allocator_dualstack", "10.0.0.0/21,2000::/48", "10.1.0.0/21", emptyServiceCIDR, []int{24, 64}, ipam.MultiCIDRRangeAllocatorType}, - } { - test := tc - _, ctx := ktesting.NewTestContext(t) - t.Run(test.desc, func(t *testing.T) { - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - - clusterCidrs, err := netutils.ParseCIDRs(strings.Split(test.clusterCIDR, ",")) - if err != nil { - clusterCidrs = nil - } - _, serviceCIDRIpNet, err := netutils.ParseCIDRSloppy(test.serviceCIDR) - if err != nil { - serviceCIDRIpNet = nil - } - _, secondaryServiceCIDRIpNet, err := netutils.ParseCIDRSloppy(test.secondaryServiceCIDR) - if err != nil { - secondaryServiceCIDRIpNet = nil - } - _, err = newTestNodeIpamController(ctx, clusterCidrs, serviceCIDRIpNet, secondaryServiceCIDRIpNet, test.maskSize, test.allocatorType) - if err != nil { - t.Errorf("Test %s, got error %v", test.desc, err) - } - }) - } -} diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index 09c0b6eab20..1c02615ad5f 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -584,13 +584,6 @@ const ( // Enables new performance-improving code in kube-proxy iptables mode MinimizeIPTablesRestore featuregate.Feature = "MinimizeIPTablesRestore" - // owner: @sarveshr7 - // kep: https://kep.k8s.io/2593 - // alpha: v1.25 - // - // Enables the MultiCIDR Range allocator. - MultiCIDRRangeAllocator featuregate.Feature = "MultiCIDRRangeAllocator" - // owner: @aojea // kep: https://kep.k8s.io/1880 // alpha: v1.27 @@ -1111,8 +1104,6 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS MinimizeIPTablesRestore: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.30 - MultiCIDRRangeAllocator: {Default: false, PreRelease: featuregate.Alpha}, - MultiCIDRServiceAllocator: {Default: false, PreRelease: featuregate.Alpha}, NewVolumeManagerReconstruction: {Default: true, PreRelease: featuregate.Beta}, diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index 89546cde2d6..b1087e2aad4 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -738,9 +738,6 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/networking/v1.NetworkPolicyPort": schema_k8sio_api_networking_v1_NetworkPolicyPort(ref), "k8s.io/api/networking/v1.NetworkPolicySpec": schema_k8sio_api_networking_v1_NetworkPolicySpec(ref), "k8s.io/api/networking/v1.ServiceBackendPort": schema_k8sio_api_networking_v1_ServiceBackendPort(ref), - "k8s.io/api/networking/v1alpha1.ClusterCIDR": schema_k8sio_api_networking_v1alpha1_ClusterCIDR(ref), - "k8s.io/api/networking/v1alpha1.ClusterCIDRList": schema_k8sio_api_networking_v1alpha1_ClusterCIDRList(ref), - "k8s.io/api/networking/v1alpha1.ClusterCIDRSpec": schema_k8sio_api_networking_v1alpha1_ClusterCIDRSpec(ref), "k8s.io/api/networking/v1alpha1.IPAddress": schema_k8sio_api_networking_v1alpha1_IPAddress(ref), "k8s.io/api/networking/v1alpha1.IPAddressList": schema_k8sio_api_networking_v1alpha1_IPAddressList(ref), "k8s.io/api/networking/v1alpha1.IPAddressSpec": schema_k8sio_api_networking_v1alpha1_IPAddressSpec(ref), @@ -37128,146 +37125,6 @@ func schema_k8sio_api_networking_v1_ServiceBackendPort(ref common.ReferenceCallb } } -func schema_k8sio_api_networking_v1alpha1_ClusterCIDR(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), - }, - }, - "spec": { - SchemaProps: spec.SchemaProps{ - Description: "spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/networking/v1alpha1.ClusterCIDRSpec"), - }, - }, - }, - }, - }, - Dependencies: []string{ - "k8s.io/api/networking/v1alpha1.ClusterCIDRSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, - } -} - -func schema_k8sio_api_networking_v1alpha1_ClusterCIDRList(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClusterCIDRList contains a list of ClusterCIDR.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "kind": { - SchemaProps: spec.SchemaProps{ - Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - Type: []string{"string"}, - Format: "", - }, - }, - "apiVersion": { - SchemaProps: spec.SchemaProps{ - Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - Type: []string{"string"}, - Format: "", - }, - }, - "metadata": { - SchemaProps: spec.SchemaProps{ - Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - Default: map[string]interface{}{}, - Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), - }, - }, - "items": { - SchemaProps: spec.SchemaProps{ - Description: "items is the list of ClusterCIDRs.", - Type: []string{"array"}, - Items: &spec.SchemaOrArray{ - Schema: &spec.Schema{ - SchemaProps: spec.SchemaProps{ - Default: map[string]interface{}{}, - Ref: ref("k8s.io/api/networking/v1alpha1.ClusterCIDR"), - }, - }, - }, - }, - }, - }, - Required: []string{"items"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/networking/v1alpha1.ClusterCIDR", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, - } -} - -func schema_k8sio_api_networking_v1alpha1_ClusterCIDRSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { - return common.OpenAPIDefinition{ - Schema: spec.Schema{ - SchemaProps: spec.SchemaProps{ - Description: "ClusterCIDRSpec defines the desired state of ClusterCIDR.", - Type: []string{"object"}, - Properties: map[string]spec.Schema{ - "nodeSelector": { - SchemaProps: spec.SchemaProps{ - Description: "nodeSelector defines which nodes the config is applicable to. An empty or nil nodeSelector selects all nodes. This field is immutable.", - Ref: ref("k8s.io/api/core/v1.NodeSelector"), - }, - }, - "perNodeHostBits": { - SchemaProps: spec.SchemaProps{ - Description: "perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", - Default: 0, - Type: []string{"integer"}, - Format: "int32", - }, - }, - "ipv4": { - SchemaProps: spec.SchemaProps{ - Description: "ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - "ipv6": { - SchemaProps: spec.SchemaProps{ - Description: "ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - Default: "", - Type: []string{"string"}, - Format: "", - }, - }, - }, - Required: []string{"perNodeHostBits"}, - }, - }, - Dependencies: []string{ - "k8s.io/api/core/v1.NodeSelector"}, - } -} - func schema_k8sio_api_networking_v1alpha1_IPAddress(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/pkg/kubeapiserver/default_storage_factory_builder.go b/pkg/kubeapiserver/default_storage_factory_builder.go index f1946415d85..b8cd6413cef 100644 --- a/pkg/kubeapiserver/default_storage_factory_builder.go +++ b/pkg/kubeapiserver/default_storage_factory_builder.go @@ -71,7 +71,6 @@ func NewStorageFactoryConfig() *StorageFactoryConfig { // apisstorage.Resource("csistoragecapacities").WithVersion("v1beta1"), admissionregistration.Resource("validatingadmissionpolicies").WithVersion("v1beta1"), admissionregistration.Resource("validatingadmissionpolicybindings").WithVersion("v1beta1"), - networking.Resource("clustercidrs").WithVersion("v1alpha1"), networking.Resource("ipaddresses").WithVersion("v1alpha1"), certificates.Resource("clustertrustbundles").WithVersion("v1alpha1"), } diff --git a/pkg/printers/internalversion/printers.go b/pkg/printers/internalversion/printers.go index 239471c2aeb..916b3cf2941 100644 --- a/pkg/printers/internalversion/printers.go +++ b/pkg/printers/internalversion/printers.go @@ -607,18 +607,6 @@ func AddHandlers(h printers.PrintHandler) { } _ = h.TableHandler(scaleColumnDefinitions, printScale) - clusterCIDRColumnDefinitions := []metav1.TableColumnDefinition{ - {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, - {Name: "PerNodeHostBits", Type: "string", Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["perNodeHostBits"]}, - {Name: "IPv4", Type: "string", Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["ipv4"]}, - {Name: "IPv6", Type: "string", Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["ipv6"]}, - {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, - {Name: "NodeSelector", Type: "string", Priority: 1, Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["nodeSelector"]}, - } - - _ = h.TableHandler(clusterCIDRColumnDefinitions, printClusterCIDR) - _ = h.TableHandler(clusterCIDRColumnDefinitions, printClusterCIDRList) - resourceClassColumnDefinitions := []metav1.TableColumnDefinition{ {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, {Name: "DriverName", Type: "string", Description: resourcev1alpha2.ResourceClass{}.SwaggerDoc()["driverName"]}, @@ -2800,57 +2788,6 @@ func printPriorityLevelConfigurationList(list *flowcontrol.PriorityLevelConfigur return rows, nil } -func printClusterCIDR(obj *networking.ClusterCIDR, options printers.GenerateOptions) ([]metav1.TableRow, error) { - row := metav1.TableRow{ - Object: runtime.RawExtension{Object: obj}, - } - ipv4 := "" - ipv6 := "" - - if obj.Spec.IPv4 != "" { - ipv4 = obj.Spec.IPv4 - } - if obj.Spec.IPv6 != "" { - ipv6 = obj.Spec.IPv6 - } - - row.Cells = append(row.Cells, obj.Name, fmt.Sprint(obj.Spec.PerNodeHostBits), ipv4, ipv6, translateTimestampSince(obj.CreationTimestamp)) - if options.Wide { - nodeSelector := "" - if obj.Spec.NodeSelector != nil { - allTerms := make([]string, 0) - for _, term := range obj.Spec.NodeSelector.NodeSelectorTerms { - if len(term.MatchExpressions) > 0 { - matchExpressions := fmt.Sprintf("MatchExpressions: %v", term.MatchExpressions) - allTerms = append(allTerms, matchExpressions) - } - - if len(term.MatchFields) > 0 { - matchFields := fmt.Sprintf("MatchFields: %v", term.MatchFields) - allTerms = append(allTerms, matchFields) - } - } - nodeSelector = strings.Join(allTerms, ",") - } - - row.Cells = append(row.Cells, nodeSelector) - } - - return []metav1.TableRow{row}, nil -} - -func printClusterCIDRList(list *networking.ClusterCIDRList, options printers.GenerateOptions) ([]metav1.TableRow, error) { - rows := make([]metav1.TableRow, 0, len(list.Items)) - for i := range list.Items { - r, err := printClusterCIDR(&list.Items[i], options) - if err != nil { - return nil, err - } - rows = append(rows, r...) - } - return rows, nil -} - func printIPAddress(obj *networking.IPAddress, options printers.GenerateOptions) ([]metav1.TableRow, error) { row := metav1.TableRow{ Object: runtime.RawExtension{Object: obj}, diff --git a/pkg/printers/internalversion/printers_test.go b/pkg/printers/internalversion/printers_test.go index 6580181c45d..397d66aa141 100644 --- a/pkg/printers/internalversion/printers_test.go +++ b/pkg/printers/internalversion/printers_test.go @@ -6406,280 +6406,6 @@ func TestTableRowDeepCopyShouldNotPanic(t *testing.T) { } } -func TestPrintClusterCIDR(t *testing.T) { - ipv4CIDR := "10.1.0.0/16" - perNodeHostBits := int32(8) - ipv6CIDR := "fd00:1:1::/64" - - tests := []struct { - ccc networking.ClusterCIDR - options printers.GenerateOptions - expected []metav1.TableRow - }{ - { - // Test name, IPv4 only with no node selector. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test1"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv4: ipv4CIDR, - }, - }, - options: printers.GenerateOptions{}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age. - expected: []metav1.TableRow{{Cells: []interface{}{"test1", "8", ipv4CIDR, "", ""}}}, - }, - { - // Test name, IPv4 only with node selector, Not wide. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test2"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv4: ipv4CIDR, - // Does NOT get printed. - NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), - }, - }, - options: printers.GenerateOptions{}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age. - expected: []metav1.TableRow{{Cells: []interface{}{"test2", "8", ipv4CIDR, "", ""}}}, - }, - { - // Test name, IPv4 only with no node selector, wide. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test3"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv4: ipv4CIDR, - }, - }, - options: printers.GenerateOptions{Wide: true}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector . - expected: []metav1.TableRow{{Cells: []interface{}{"test3", "8", ipv4CIDR, "", "", ""}}}, - }, - { - // Test name, IPv4 only with node selector, wide. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test4"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv4: ipv4CIDR, - NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), - }, - }, - options: printers.GenerateOptions{Wide: true}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector . - expected: []metav1.TableRow{{Cells: []interface{}{"test4", "8", ipv4CIDR, "", "", "MatchExpressions: [{foo In [bar]}]"}}}, - }, - { - // Test name, IPv6 only with no node selector. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test5"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv6: ipv6CIDR, - }, - }, - options: printers.GenerateOptions{}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age - expected: []metav1.TableRow{{Cells: []interface{}{"test5", "8", "", ipv6CIDR, ""}}}, - }, - { - // Test name, IPv6 only with node selector, Not wide. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test6"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv6: ipv6CIDR, - // Does NOT get printed. - NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), - }, - }, - options: printers.GenerateOptions{}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age. - expected: []metav1.TableRow{{Cells: []interface{}{"test6", "8", "", ipv6CIDR, ""}}}, - }, - { - // Test name, IPv6 only with no node selector, wide. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test7"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv6: ipv6CIDR, - }, - }, - options: printers.GenerateOptions{Wide: true}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector . - expected: []metav1.TableRow{{Cells: []interface{}{"test7", "8", "", ipv6CIDR, "", ""}}}, - }, - { - // Test name, IPv6 only with node selector, wide. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test8"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv6: ipv6CIDR, - NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), - }, - }, - options: printers.GenerateOptions{Wide: true}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector . - expected: []metav1.TableRow{{Cells: []interface{}{"test8", "8", "", ipv6CIDR, "", "MatchExpressions: [{foo In [bar]}]"}}}, - }, - { - // Test name, DualStack with no node selector. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test9"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv4: ipv4CIDR, - IPv6: ipv6CIDR, - }, - }, - options: printers.GenerateOptions{}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age. - expected: []metav1.TableRow{{Cells: []interface{}{"test9", "8", ipv4CIDR, ipv6CIDR, ""}}}, - }, - { - // Test name,DualStack with node selector, Not wide. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test10"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv4: ipv4CIDR, - IPv6: ipv6CIDR, - // Does NOT get printed. - NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), - }, - }, - options: printers.GenerateOptions{}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age. - expected: []metav1.TableRow{{Cells: []interface{}{"test10", "8", ipv4CIDR, ipv6CIDR, ""}}}, - }, - { - // Test name, DualStack with no node selector, wide. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test11"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv4: ipv4CIDR, - IPv6: ipv6CIDR, - }, - }, - options: printers.GenerateOptions{Wide: true}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector. - expected: []metav1.TableRow{{Cells: []interface{}{"test11", "8", ipv4CIDR, ipv6CIDR, "", ""}}}, - }, - { - // Test name, DualStack with node selector, wide. - ccc: networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{Name: "test12"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv4: ipv4CIDR, - IPv6: ipv6CIDR, - NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), - }, - }, - options: printers.GenerateOptions{Wide: true}, - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector . - expected: []metav1.TableRow{{Cells: []interface{}{"test12", "8", ipv4CIDR, ipv6CIDR, "", "MatchExpressions: [{foo In [bar]}]"}}}, - }, - } - - for i, test := range tests { - rows, err := printClusterCIDR(&test.ccc, test.options) - if err != nil { - t.Fatal(err) - } - for i := range rows { - rows[i].Object.Object = nil - } - if !reflect.DeepEqual(test.expected, rows) { - t.Errorf("%d mismatch: %s", i, cmp.Diff(test.expected, rows)) - } - } -} - -func makeNodeSelector(key string, op api.NodeSelectorOperator, values []string) *api.NodeSelector { - return &api.NodeSelector{ - NodeSelectorTerms: []api.NodeSelectorTerm{ - { - MatchExpressions: []api.NodeSelectorRequirement{ - { - Key: key, - Operator: op, - Values: values, - }, - }, - }, - }, - } -} - -func TestPrintClusterCIDRList(t *testing.T) { - - cccList := networking.ClusterCIDRList{ - Items: []networking.ClusterCIDR{ - { - ObjectMeta: metav1.ObjectMeta{Name: "ccc1"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: int32(8), - IPv4: "10.1.0.0/16", - IPv6: "fd00:1:1::/64", - NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), - }, - }, - { - ObjectMeta: metav1.ObjectMeta{Name: "ccc2"}, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: int32(8), - IPv4: "10.2.0.0/16", - IPv6: "fd00:2:1::/64", - NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), - }, - }, - }, - } - - tests := []struct { - options printers.GenerateOptions - expected []metav1.TableRow - }{ - { - // Test name, DualStack with node selector, wide. - options: printers.GenerateOptions{Wide: false}, - expected: []metav1.TableRow{ - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age. - {Cells: []interface{}{"ccc1", "8", "10.1.0.0/16", "fd00:1:1::/64", ""}}, - {Cells: []interface{}{"ccc2", "8", "10.2.0.0/16", "fd00:2:1::/64", ""}}, - }, - }, - { - // Test name, DualStack with node selector, wide. - options: printers.GenerateOptions{Wide: true}, - expected: []metav1.TableRow{ - // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector. - {Cells: []interface{}{"ccc1", "8", "10.1.0.0/16", "fd00:1:1::/64", "", "MatchExpressions: [{foo In [bar]}]"}}, - {Cells: []interface{}{"ccc2", "8", "10.2.0.0/16", "fd00:2:1::/64", "", "MatchExpressions: [{foo In [bar]}]"}}, - }, - }, - } - - for _, test := range tests { - rows, err := printClusterCIDRList(&cccList, test.options) - if err != nil { - t.Fatalf("Error printing service list: %#v", err) - } - for i := range rows { - rows[i].Object.Object = nil - } - if !reflect.DeepEqual(test.expected, rows) { - t.Errorf("mismatch: %s", cmp.Diff(test.expected, rows)) - } - } -} - func TestPrintIPAddress(t *testing.T) { ip := networking.IPAddress{ ObjectMeta: metav1.ObjectMeta{ diff --git a/pkg/registry/networking/clustercidr/doc.go b/pkg/registry/networking/clustercidr/doc.go deleted file mode 100644 index ebd30f63304..00000000000 --- a/pkg/registry/networking/clustercidr/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clustercidr // import "k8s.io/kubernetes/pkg/registry/networking/clustercidr" diff --git a/pkg/registry/networking/clustercidr/storage/storage.go b/pkg/registry/networking/clustercidr/storage/storage.go deleted file mode 100644 index 3c0f44b9ff8..00000000000 --- a/pkg/registry/networking/clustercidr/storage/storage.go +++ /dev/null @@ -1,64 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apiserver/pkg/registry/generic" - genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" - "k8s.io/apiserver/pkg/registry/rest" - networkingapi "k8s.io/kubernetes/pkg/apis/networking" - "k8s.io/kubernetes/pkg/printers" - printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" - printerstorage "k8s.io/kubernetes/pkg/printers/storage" - "k8s.io/kubernetes/pkg/registry/networking/clustercidr" -) - -// REST implements a RESTStorage for ClusterCIDRs against etcd. -type REST struct { - *genericregistry.Store -} - -// NewREST returns a RESTStorage object that will work against ClusterCIDRs. -func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, error) { - store := &genericregistry.Store{ - NewFunc: func() runtime.Object { return &networkingapi.ClusterCIDR{} }, - NewListFunc: func() runtime.Object { return &networkingapi.ClusterCIDRList{} }, - DefaultQualifiedResource: networkingapi.Resource("clustercidrs"), - SingularQualifiedResource: networkingapi.Resource("clustercidr"), - - CreateStrategy: clustercidr.Strategy, - UpdateStrategy: clustercidr.Strategy, - DeleteStrategy: clustercidr.Strategy, - - TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, - } - options := &generic.StoreOptions{RESTOptions: optsGetter} - if err := store.CompleteWithOptions(options); err != nil { - return nil, err - } - - return &REST{store}, nil -} - -// Implement ShortNamesProvider. -var _ rest.ShortNamesProvider = &REST{} - -// ShortNames implements the ShortNamesProvider interface. Returns a list of short names for a resource. -func (r *REST) ShortNames() []string { - return []string{"cc"} -} diff --git a/pkg/registry/networking/clustercidr/storage/storage_test.go b/pkg/registry/networking/clustercidr/storage/storage_test.go deleted file mode 100644 index 774ec59f54d..00000000000 --- a/pkg/registry/networking/clustercidr/storage/storage_test.go +++ /dev/null @@ -1,196 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package storage - -import ( - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apiserver/pkg/registry/generic" - genericregistrytest "k8s.io/apiserver/pkg/registry/generic/testing" - etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/networking" - _ "k8s.io/kubernetes/pkg/apis/networking/install" - "k8s.io/kubernetes/pkg/registry/registrytest" -) - -func newStorage(t *testing.T) (*REST, *etcd3testing.EtcdTestServer) { - etcdStorage, server := registrytest.NewEtcdStorageForResource(t, networking.Resource("clustercidrs")) - restOptions := generic.RESTOptions{ - StorageConfig: etcdStorage, - Decorator: generic.UndecoratedStorage, - DeleteCollectionWorkers: 1, - ResourcePrefix: "clustercidrs", - } - clusterCIDRStorage, err := NewREST(restOptions) - if err != nil { - t.Fatalf("unexpected error from REST storage: %v", err) - } - return clusterCIDRStorage, server -} - -var ( - namespace = metav1.NamespaceNone - name = "foo-clustercidr" -) - -func newClusterCIDR() *networking.ClusterCIDR { - return &networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: int32(8), - IPv4: "10.1.0.0/16", - IPv6: "fd00:1:1::/64", - NodeSelector: &api.NodeSelector{ - NodeSelectorTerms: []api.NodeSelectorTerm{ - { - MatchExpressions: []api.NodeSelectorRequirement{ - { - Key: "foo", - Operator: api.NodeSelectorOpIn, - Values: []string{"bar"}, - }, - }, - }, - }, - }, - }, - } -} - -func validClusterCIDR() *networking.ClusterCIDR { - return newClusterCIDR() -} - -func TestCreate(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - - test := genericregistrytest.New(t, storage.Store) - test = test.ClusterScope() - validCC := validClusterCIDR() - noCIDRCC := validClusterCIDR() - noCIDRCC.Spec.IPv4 = "" - noCIDRCC.Spec.IPv6 = "" - invalidCCPerNodeHostBits := validClusterCIDR() - invalidCCPerNodeHostBits.Spec.PerNodeHostBits = 100 - invalidCCCIDR := validClusterCIDR() - invalidCCCIDR.Spec.IPv6 = "10.1.0.0/16" - - test.TestCreate( - // valid - validCC, - //invalid - noCIDRCC, - invalidCCPerNodeHostBits, - invalidCCCIDR, - ) -} - -func TestUpdate(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := genericregistrytest.New(t, storage.Store) - test = test.ClusterScope() - test.TestUpdate( - // valid - validClusterCIDR(), - // updateFunc - func(obj runtime.Object) runtime.Object { - object := obj.(*networking.ClusterCIDR) - object.Finalizers = []string{"test.k8s.io/test-finalizer"} - return object - }, - // invalid updateFunc: ObjectMeta is not to be tampered with. - func(obj runtime.Object) runtime.Object { - object := obj.(*networking.ClusterCIDR) - object.Name = "" - return object - }, - ) -} - -func TestDelete(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := genericregistrytest.New(t, storage.Store) - test = test.ClusterScope() - test.TestDelete(validClusterCIDR()) -} - -func TestGet(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := genericregistrytest.New(t, storage.Store) - test = test.ClusterScope() - test.TestGet(validClusterCIDR()) -} - -func TestList(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := genericregistrytest.New(t, storage.Store) - test = test.ClusterScope() - test.TestList(validClusterCIDR()) -} - -func TestWatch(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - test := genericregistrytest.New(t, storage.Store) - test = test.ClusterScope() - test.TestWatch( - validClusterCIDR(), - // matching labels - []labels.Set{}, - // not matching labels - []labels.Set{ - {"a": "c"}, - {"foo": "bar"}, - }, - // matching fields - []fields.Set{ - {"metadata.name": name}, - }, - // not matching fields - []fields.Set{ - {"metadata.name": "bar"}, - {"name": name}, - }, - ) -} - -func TestShortNames(t *testing.T) { - storage, server := newStorage(t) - defer server.Terminate(t) - defer storage.Store.DestroyFunc() - expected := []string{"cc"} - registrytest.AssertShortNames(t, storage, expected) -} diff --git a/pkg/registry/networking/clustercidr/strategy.go b/pkg/registry/networking/clustercidr/strategy.go deleted file mode 100644 index a69a5f90413..00000000000 --- a/pkg/registry/networking/clustercidr/strategy.go +++ /dev/null @@ -1,82 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clustercidr - -import ( - "context" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/validation/field" - "k8s.io/apiserver/pkg/storage/names" - "k8s.io/kubernetes/pkg/api/legacyscheme" - "k8s.io/kubernetes/pkg/apis/networking" - "k8s.io/kubernetes/pkg/apis/networking/validation" -) - -// clusterCIDRStrategy implements verification logic for ClusterCIDRs. -type clusterCIDRStrategy struct { - runtime.ObjectTyper - names.NameGenerator -} - -// Strategy is the default logic that applies when creating and updating clusterCIDR objects. -var Strategy = clusterCIDRStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} - -// NamespaceScoped returns false because all clusterCIDRs do not need to be within a namespace. -func (clusterCIDRStrategy) NamespaceScoped() bool { - return false -} - -func (clusterCIDRStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {} - -func (clusterCIDRStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {} - -// Validate validates a new ClusterCIDR. -func (clusterCIDRStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList { - clusterCIDR := obj.(*networking.ClusterCIDR) - return validation.ValidateClusterCIDR(clusterCIDR) -} - -// WarningsOnCreate returns warnings for the creation of the given object. -func (clusterCIDRStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string { - return nil -} - -// Canonicalize normalizes the object after validation. -func (clusterCIDRStrategy) Canonicalize(obj runtime.Object) {} - -// AllowCreateOnUpdate is false for ClusterCIDR; this means POST is needed to create one. -func (clusterCIDRStrategy) AllowCreateOnUpdate() bool { - return false -} - -// ValidateUpdate is the default update validation for an end user. -func (clusterCIDRStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList { - validationErrorList := validation.ValidateClusterCIDR(obj.(*networking.ClusterCIDR)) - updateErrorList := validation.ValidateClusterCIDRUpdate(obj.(*networking.ClusterCIDR), old.(*networking.ClusterCIDR)) - return append(validationErrorList, updateErrorList...) -} - -// WarningsOnUpdate returns warnings for the given update. -func (clusterCIDRStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string { - return nil -} - -// AllowUnconditionalUpdate is the default update policy for ClusterCIDR objects. -func (clusterCIDRStrategy) AllowUnconditionalUpdate() bool { - return true -} diff --git a/pkg/registry/networking/clustercidr/strategy_test.go b/pkg/registry/networking/clustercidr/strategy_test.go deleted file mode 100644 index f3225377666..00000000000 --- a/pkg/registry/networking/clustercidr/strategy_test.go +++ /dev/null @@ -1,86 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clustercidr - -import ( - "testing" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - genericapirequest "k8s.io/apiserver/pkg/endpoints/request" - api "k8s.io/kubernetes/pkg/apis/core" - "k8s.io/kubernetes/pkg/apis/networking" -) - -func newClusterCIDR() networking.ClusterCIDR { - return networking.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo", - }, - Spec: networking.ClusterCIDRSpec{ - PerNodeHostBits: int32(8), - IPv4: "10.1.0.0/16", - IPv6: "fd00:1:1::/64", - NodeSelector: &api.NodeSelector{ - NodeSelectorTerms: []api.NodeSelectorTerm{ - { - MatchExpressions: []api.NodeSelectorRequirement{ - { - Key: "foo", - Operator: api.NodeSelectorOpIn, - Values: []string{"bar"}, - }, - }, - }, - }, - }, - }, - } -} - -func TestClusterCIDRStrategy(t *testing.T) { - ctx := genericapirequest.NewDefaultContext() - apiRequest := genericapirequest.RequestInfo{APIGroup: "networking.k8s.io", - APIVersion: "v1alpha1", - Resource: "clustercidrs", - } - ctx = genericapirequest.WithRequestInfo(ctx, &apiRequest) - if Strategy.NamespaceScoped() { - t.Errorf("ClusterCIDRs must be cluster scoped") - } - if Strategy.AllowCreateOnUpdate() { - t.Errorf("ClusterCIDRs should not allow create on update") - } - - ccc := newClusterCIDR() - Strategy.PrepareForCreate(ctx, &ccc) - - errs := Strategy.Validate(ctx, &ccc) - if len(errs) != 0 { - t.Errorf("Unexpected error validating %v", errs) - } - invalidCCC := newClusterCIDR() - invalidCCC.ResourceVersion = "4" - invalidCCC.Spec = networking.ClusterCIDRSpec{} - Strategy.PrepareForUpdate(ctx, &invalidCCC, &ccc) - errs = Strategy.ValidateUpdate(ctx, &invalidCCC, &ccc) - if len(errs) == 0 { - t.Errorf("Expected a validation error") - } - if invalidCCC.ResourceVersion != "4" { - t.Errorf("Incoming resource version on update should not be mutated") - } -} diff --git a/pkg/registry/networking/rest/storage_settings.go b/pkg/registry/networking/rest/storage_settings.go index 1ca958f9c8c..59482432c11 100644 --- a/pkg/registry/networking/rest/storage_settings.go +++ b/pkg/registry/networking/rest/storage_settings.go @@ -25,7 +25,6 @@ import ( serverstorage "k8s.io/apiserver/pkg/server/storage" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/networking" - clustercidrstore "k8s.io/kubernetes/pkg/registry/networking/clustercidr/storage" ingressstore "k8s.io/kubernetes/pkg/registry/networking/ingress/storage" ingressclassstore "k8s.io/kubernetes/pkg/registry/networking/ingressclass/storage" ipaddressstore "k8s.io/kubernetes/pkg/registry/networking/ipaddress/storage" @@ -90,14 +89,6 @@ func (p RESTStorageProvider) v1Storage(apiResourceConfigSource serverstorage.API func (p RESTStorageProvider) v1alpha1Storage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (map[string]rest.Storage, error) { storage := map[string]rest.Storage{} - // clustercidrs - if resource := "clustercidrs"; apiResourceConfigSource.ResourceEnabled(networkingapiv1alpha1.SchemeGroupVersion.WithResource(resource)) { - clusterCIDRCStorage, err := clustercidrstore.NewREST(restOptionsGetter) - if err != nil { - return storage, err - } - storage[resource] = clusterCIDRCStorage - } // ipaddress if resource := "ipaddresses"; apiResourceConfigSource.ResourceEnabled(networkingapiv1alpha1.SchemeGroupVersion.WithResource(resource)) { diff --git a/staging/src/k8s.io/api/networking/v1alpha1/generated.pb.go b/staging/src/k8s.io/api/networking/v1alpha1/generated.pb.go index d73be6ef843..2db37888525 100644 --- a/staging/src/k8s.io/api/networking/v1alpha1/generated.pb.go +++ b/staging/src/k8s.io/api/networking/v1alpha1/generated.pb.go @@ -25,7 +25,6 @@ import ( io "io" proto "github.com/gogo/protobuf/proto" - v11 "k8s.io/api/core/v1" math "math" math_bits "math/bits" @@ -44,94 +43,10 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -func (m *ClusterCIDR) Reset() { *m = ClusterCIDR{} } -func (*ClusterCIDR) ProtoMessage() {} -func (*ClusterCIDR) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{0} -} -func (m *ClusterCIDR) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterCIDR) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterCIDR.Merge(m, src) -} -func (m *ClusterCIDR) XXX_Size() int { - return m.Size() -} -func (m *ClusterCIDR) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterCIDR.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterCIDR proto.InternalMessageInfo - -func (m *ClusterCIDRList) Reset() { *m = ClusterCIDRList{} } -func (*ClusterCIDRList) ProtoMessage() {} -func (*ClusterCIDRList) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{1} -} -func (m *ClusterCIDRList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterCIDRList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterCIDRList.Merge(m, src) -} -func (m *ClusterCIDRList) XXX_Size() int { - return m.Size() -} -func (m *ClusterCIDRList) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterCIDRList.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterCIDRList proto.InternalMessageInfo - -func (m *ClusterCIDRSpec) Reset() { *m = ClusterCIDRSpec{} } -func (*ClusterCIDRSpec) ProtoMessage() {} -func (*ClusterCIDRSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{2} -} -func (m *ClusterCIDRSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterCIDRSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterCIDRSpec.Merge(m, src) -} -func (m *ClusterCIDRSpec) XXX_Size() int { - return m.Size() -} -func (m *ClusterCIDRSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterCIDRSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterCIDRSpec proto.InternalMessageInfo - func (m *IPAddress) Reset() { *m = IPAddress{} } func (*IPAddress) ProtoMessage() {} func (*IPAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{3} + return fileDescriptor_c1b7ac8d7d97acec, []int{0} } func (m *IPAddress) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -159,7 +74,7 @@ var xxx_messageInfo_IPAddress proto.InternalMessageInfo func (m *IPAddressList) Reset() { *m = IPAddressList{} } func (*IPAddressList) ProtoMessage() {} func (*IPAddressList) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{4} + return fileDescriptor_c1b7ac8d7d97acec, []int{1} } func (m *IPAddressList) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -187,7 +102,7 @@ var xxx_messageInfo_IPAddressList proto.InternalMessageInfo func (m *IPAddressSpec) Reset() { *m = IPAddressSpec{} } func (*IPAddressSpec) ProtoMessage() {} func (*IPAddressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{5} + return fileDescriptor_c1b7ac8d7d97acec, []int{2} } func (m *IPAddressSpec) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -215,7 +130,7 @@ var xxx_messageInfo_IPAddressSpec proto.InternalMessageInfo func (m *ParentReference) Reset() { *m = ParentReference{} } func (*ParentReference) ProtoMessage() {} func (*ParentReference) Descriptor() ([]byte, []int) { - return fileDescriptor_c1b7ac8d7d97acec, []int{6} + return fileDescriptor_c1b7ac8d7d97acec, []int{3} } func (m *ParentReference) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -241,9 +156,6 @@ func (m *ParentReference) XXX_DiscardUnknown() { var xxx_messageInfo_ParentReference proto.InternalMessageInfo func init() { - proto.RegisterType((*ClusterCIDR)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDR") - proto.RegisterType((*ClusterCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRList") - proto.RegisterType((*ClusterCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRSpec") proto.RegisterType((*IPAddress)(nil), "k8s.io.api.networking.v1alpha1.IPAddress") proto.RegisterType((*IPAddressList)(nil), "k8s.io.api.networking.v1alpha1.IPAddressList") proto.RegisterType((*IPAddressSpec)(nil), "k8s.io.api.networking.v1alpha1.IPAddressSpec") @@ -255,187 +167,39 @@ func init() { } var fileDescriptor_c1b7ac8d7d97acec = []byte{ - // 666 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x95, 0xcf, 0x4e, 0xdb, 0x4a, - 0x14, 0xc6, 0x63, 0x48, 0x24, 0x3c, 0x80, 0xc2, 0xf5, 0xe6, 0x46, 0x2c, 0x9c, 0x28, 0x77, 0xc3, - 0xd5, 0xbd, 0x78, 0x0a, 0x42, 0x51, 0xb7, 0x18, 0x24, 0x1a, 0xa9, 0x85, 0x74, 0x90, 0xba, 0xa8, - 0x58, 0x74, 0xe2, 0x1c, 0x1c, 0x37, 0xd8, 0x63, 0xcd, 0x8c, 0x53, 0x75, 0xd7, 0x47, 0xe8, 0xdb, - 0x74, 0xd5, 0x4a, 0xed, 0x8a, 0x25, 0x4b, 0x56, 0x51, 0x71, 0x5f, 0xa0, 0x8f, 0x50, 0xcd, 0xc4, - 0x89, 0x9d, 0xa4, 0xfc, 0xe9, 0x86, 0x1d, 0x73, 0xce, 0xef, 0x7c, 0xe7, 0x1c, 0x9f, 0x0f, 0x05, - 0x1d, 0x0d, 0x9e, 0x0a, 0x27, 0x60, 0x78, 0x90, 0x74, 0x81, 0x47, 0x20, 0x41, 0xe0, 0x21, 0x44, - 0x3d, 0xc6, 0x71, 0x96, 0xa0, 0x71, 0x80, 0x23, 0x90, 0xef, 0x18, 0x1f, 0x04, 0x91, 0x8f, 0x87, - 0x3b, 0xf4, 0x22, 0xee, 0xd3, 0x1d, 0xec, 0x43, 0x04, 0x9c, 0x4a, 0xe8, 0x39, 0x31, 0x67, 0x92, - 0x59, 0xf6, 0x98, 0x77, 0x68, 0x1c, 0x38, 0x39, 0xef, 0x4c, 0xf8, 0xcd, 0x6d, 0x3f, 0x90, 0xfd, - 0xa4, 0xeb, 0x78, 0x2c, 0xc4, 0x3e, 0xf3, 0x19, 0xd6, 0x65, 0xdd, 0xe4, 0x5c, 0xbf, 0xf4, 0x43, - 0xff, 0x35, 0x96, 0xdb, 0x6c, 0x16, 0xda, 0x7b, 0x8c, 0x03, 0x1e, 0x2e, 0xb4, 0xdc, 0xdc, 0xcb, - 0x99, 0x90, 0x7a, 0xfd, 0x20, 0x02, 0xfe, 0x1e, 0xc7, 0x03, 0x5f, 0x05, 0x04, 0x0e, 0x41, 0xd2, - 0xdf, 0x55, 0xe1, 0xdb, 0xaa, 0x78, 0x12, 0xc9, 0x20, 0x84, 0x85, 0x82, 0xd6, 0x7d, 0x05, 0xc2, - 0xeb, 0x43, 0x48, 0xe7, 0xeb, 0x9a, 0xdf, 0x0c, 0xb4, 0x7a, 0x70, 0x91, 0x08, 0x09, 0xfc, 0xa0, - 0x7d, 0x48, 0xac, 0x37, 0x68, 0x45, 0xcd, 0xd4, 0xa3, 0x92, 0xd6, 0x8c, 0x86, 0xb1, 0xb5, 0xba, - 0xfb, 0xc4, 0xc9, 0x3f, 0xda, 0x54, 0xda, 0x89, 0x07, 0xbe, 0x0a, 0x08, 0x47, 0xd1, 0xce, 0x70, - 0xc7, 0x39, 0xe9, 0xbe, 0x05, 0x4f, 0xbe, 0x00, 0x49, 0x5d, 0xeb, 0x72, 0x54, 0x2f, 0xa5, 0xa3, - 0x3a, 0xca, 0x63, 0x64, 0xaa, 0x6a, 0xbd, 0x44, 0x65, 0x11, 0x83, 0x57, 0x5b, 0xd2, 0xea, 0xd8, - 0xb9, 0xfb, 0x24, 0x4e, 0x61, 0xb8, 0xd3, 0x18, 0x3c, 0x77, 0x2d, 0x13, 0x2f, 0xab, 0x17, 0xd1, - 0x52, 0xcd, 0xaf, 0x06, 0xaa, 0x16, 0xb8, 0xe7, 0x81, 0x90, 0xd6, 0xd9, 0xc2, 0x22, 0xce, 0xc3, - 0x16, 0x51, 0xd5, 0x7a, 0x8d, 0x8d, 0xac, 0xd3, 0xca, 0x24, 0x52, 0x58, 0xa2, 0x83, 0x2a, 0x81, - 0x84, 0x50, 0xd4, 0x96, 0x1a, 0xcb, 0x5b, 0xab, 0xbb, 0xff, 0xfd, 0xc1, 0x16, 0xee, 0x7a, 0xa6, - 0x5b, 0x69, 0x2b, 0x05, 0x32, 0x16, 0x6a, 0xfe, 0x9c, 0xdd, 0x41, 0x6d, 0x67, 0xbd, 0x42, 0x6b, - 0x11, 0xeb, 0xc1, 0x29, 0x5c, 0x80, 0x27, 0x19, 0xcf, 0xf6, 0x68, 0x14, 0x9b, 0x29, 0xdb, 0xa9, - 0xa9, 0x8f, 0x0b, 0x9c, 0xbb, 0x91, 0x8e, 0xea, 0x6b, 0xc5, 0x08, 0x99, 0xd1, 0xb1, 0xf6, 0x51, - 0x35, 0x06, 0xae, 0x80, 0x67, 0x4c, 0x48, 0x37, 0x90, 0x42, 0x5f, 0xa3, 0xe2, 0xfe, 0x9d, 0x8d, - 0x56, 0xed, 0xcc, 0xa6, 0xc9, 0x3c, 0x6f, 0x35, 0x50, 0x39, 0x88, 0x87, 0x7b, 0xb5, 0xe5, 0x86, - 0xb1, 0x65, 0xe6, 0x47, 0x69, 0x77, 0x86, 0x7b, 0x44, 0x67, 0x32, 0xa2, 0x55, 0x2b, 0x2f, 0x10, - 0x2d, 0x4d, 0xb4, 0x9a, 0x5f, 0x0c, 0x64, 0xb6, 0x3b, 0xfb, 0xbd, 0x1e, 0x07, 0x21, 0x1e, 0xc1, - 0x79, 0x27, 0x33, 0xce, 0xdb, 0xbe, 0xef, 0x66, 0xd3, 0xd1, 0x6e, 0xf5, 0xdd, 0x67, 0x03, 0xad, - 0x4f, 0xa9, 0x47, 0x70, 0xdd, 0xf1, 0xac, 0xeb, 0xfe, 0x7d, 0xf0, 0x06, 0xb7, 0x78, 0x2e, 0x2c, - 0x8c, 0xaf, 0x0d, 0x77, 0x86, 0xcc, 0x98, 0x72, 0x88, 0x24, 0x81, 0xf3, 0x6c, 0xfe, 0x7b, 0xff, - 0x41, 0x3b, 0x93, 0x02, 0xe0, 0x10, 0x79, 0xe0, 0xae, 0xa7, 0xa3, 0xba, 0x39, 0x0d, 0x92, 0x5c, - 0xb0, 0xf9, 0xc9, 0x40, 0xd5, 0x39, 0xda, 0xfa, 0x07, 0x55, 0x7c, 0xce, 0x92, 0x58, 0x77, 0x33, - 0xf3, 0x39, 0x8f, 0x54, 0x90, 0x8c, 0x73, 0xd6, 0xff, 0x68, 0x85, 0x83, 0x60, 0x09, 0xf7, 0x40, - 0x1f, 0xcf, 0xcc, 0xbf, 0x12, 0xc9, 0xe2, 0x64, 0x4a, 0x58, 0x18, 0x99, 0x11, 0x0d, 0x41, 0xc4, - 0xd4, 0x83, 0xcc, 0x9f, 0x7f, 0x65, 0xb8, 0x79, 0x3c, 0x49, 0x90, 0x9c, 0x51, 0x4e, 0x55, 0x8f, - 0x79, 0xa7, 0x2a, 0x96, 0xe8, 0x8c, 0x7b, 0x78, 0x79, 0x63, 0x97, 0xae, 0x6e, 0xec, 0xd2, 0xf5, - 0x8d, 0x5d, 0xfa, 0x90, 0xda, 0xc6, 0x65, 0x6a, 0x1b, 0x57, 0xa9, 0x6d, 0x5c, 0xa7, 0xb6, 0xf1, - 0x3d, 0xb5, 0x8d, 0x8f, 0x3f, 0xec, 0xd2, 0x6b, 0xfb, 0xee, 0x5f, 0xa3, 0x5f, 0x01, 0x00, 0x00, - 0xff, 0xff, 0x33, 0x03, 0x4c, 0x8e, 0xc7, 0x06, 0x00, 0x00, -} - -func (m *ClusterCIDR) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterCIDR) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ClusterCIDRList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterCIDRList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ClusterCIDRSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterCIDRSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.IPv6) - copy(dAtA[i:], m.IPv6) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPv6))) - i-- - dAtA[i] = 0x22 - i -= len(m.IPv4) - copy(dAtA[i:], m.IPv4) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPv4))) - i-- - dAtA[i] = 0x1a - i = encodeVarintGenerated(dAtA, i, uint64(m.PerNodeHostBits)) - i-- - dAtA[i] = 0x10 - if m.NodeSelector != nil { - { - size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + // 509 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0x4d, 0x6f, 0xd3, 0x40, + 0x10, 0x8d, 0xdb, 0x44, 0x8a, 0xb7, 0x44, 0x80, 0x4f, 0x51, 0x0e, 0x9b, 0x28, 0x5c, 0x8a, 0x44, + 0x77, 0x49, 0x85, 0x10, 0x57, 0x22, 0xa4, 0xaa, 0x12, 0xb4, 0x95, 0xb9, 0xa1, 0x1e, 0xd8, 0x38, + 0x53, 0xc7, 0x18, 0xef, 0xae, 0x76, 0xd7, 0x41, 0xdc, 0xf8, 0x09, 0xfc, 0x1b, 0x4e, 0x70, 0xce, + 0xb1, 0xc7, 0x9e, 0x22, 0x62, 0xfe, 0x08, 0xda, 0x8d, 0x63, 0x57, 0x8d, 0xfa, 0x71, 0xf3, 0xbc, + 0x79, 0xef, 0xcd, 0xbc, 0x59, 0x19, 0x1d, 0xa5, 0x6f, 0x34, 0x49, 0x04, 0x4d, 0xf3, 0x09, 0x28, + 0x0e, 0x06, 0x34, 0x9d, 0x03, 0x9f, 0x0a, 0x45, 0xcb, 0x06, 0x93, 0x09, 0xe5, 0x60, 0xbe, 0x09, + 0x95, 0x26, 0x3c, 0xa6, 0xf3, 0x11, 0xfb, 0x2a, 0x67, 0x6c, 0x44, 0x63, 0xe0, 0xa0, 0x98, 0x81, + 0x29, 0x91, 0x4a, 0x18, 0x11, 0xe0, 0x35, 0x9f, 0x30, 0x99, 0x90, 0x9a, 0x4f, 0x36, 0xfc, 0xde, + 0x41, 0x9c, 0x98, 0x59, 0x3e, 0x21, 0x91, 0xc8, 0x68, 0x2c, 0x62, 0x41, 0x9d, 0x6c, 0x92, 0x5f, + 0xb8, 0xca, 0x15, 0xee, 0x6b, 0x6d, 0xd7, 0x7b, 0x55, 0x8f, 0xcf, 0x58, 0x34, 0x4b, 0x38, 0xa8, + 0xef, 0x54, 0xa6, 0xb1, 0x05, 0x34, 0xcd, 0xc0, 0x30, 0x3a, 0xdf, 0x5a, 0xa2, 0x47, 0x6f, 0x53, + 0xa9, 0x9c, 0x9b, 0x24, 0x83, 0x2d, 0xc1, 0xeb, 0xfb, 0x04, 0x3a, 0x9a, 0x41, 0xc6, 0x6e, 0xea, + 0x86, 0x7f, 0x3c, 0xe4, 0x1f, 0x9f, 0xbd, 0x9d, 0x4e, 0x15, 0x68, 0x1d, 0x7c, 0x46, 0x6d, 0xbb, + 0xd1, 0x94, 0x19, 0xd6, 0xf5, 0x06, 0xde, 0xfe, 0xde, 0xe1, 0x4b, 0x52, 0x9f, 0xa3, 0x32, 0x26, + 0x32, 0x8d, 0x2d, 0xa0, 0x89, 0x65, 0x93, 0xf9, 0x88, 0x9c, 0x4e, 0xbe, 0x40, 0x64, 0x3e, 0x80, + 0x61, 0xe3, 0x60, 0xb1, 0xec, 0x37, 0x8a, 0x65, 0x1f, 0xd5, 0x58, 0x58, 0xb9, 0x06, 0xa7, 0xa8, + 0xa9, 0x25, 0x44, 0xdd, 0x1d, 0xe7, 0x7e, 0x40, 0xee, 0x3e, 0x36, 0xa9, 0x56, 0xfb, 0x28, 0x21, + 0x1a, 0x3f, 0x2a, 0xad, 0x9b, 0xb6, 0x0a, 0x9d, 0xd1, 0xf0, 0xb7, 0x87, 0x3a, 0x15, 0xeb, 0x7d, + 0xa2, 0x4d, 0x70, 0xbe, 0x15, 0x82, 0x3c, 0x2c, 0x84, 0x55, 0xbb, 0x08, 0x4f, 0xca, 0x39, 0xed, + 0x0d, 0x72, 0x2d, 0xc0, 0x09, 0x6a, 0x25, 0x06, 0x32, 0xdd, 0xdd, 0x19, 0xec, 0xee, 0xef, 0x1d, + 0x3e, 0x7f, 0x70, 0x82, 0x71, 0xa7, 0x74, 0x6d, 0x1d, 0x5b, 0x7d, 0xb8, 0xb6, 0x19, 0x66, 0xd7, + 0xd6, 0xb7, 0xb1, 0x82, 0x73, 0xe4, 0x4b, 0xa6, 0x80, 0x9b, 0x10, 0x2e, 0xca, 0xfd, 0xe9, 0x7d, + 0x43, 0xce, 0x36, 0x02, 0x50, 0xc0, 0x23, 0x18, 0x77, 0x8a, 0x65, 0xdf, 0xaf, 0xc0, 0xb0, 0x36, + 0x1c, 0xfe, 0xf2, 0xd0, 0xe3, 0x1b, 0xec, 0xe0, 0x19, 0x6a, 0xc5, 0x4a, 0xe4, 0xd2, 0x4d, 0xf3, + 0xeb, 0x3d, 0x8f, 0x2c, 0x18, 0xae, 0x7b, 0xc1, 0x0b, 0xd4, 0x56, 0xa0, 0x45, 0xae, 0x22, 0x70, + 0x8f, 0xe7, 0xd7, 0x57, 0x0a, 0x4b, 0x3c, 0xac, 0x18, 0x01, 0x45, 0x3e, 0x67, 0x19, 0x68, 0xc9, + 0x22, 0xe8, 0xee, 0x3a, 0xfa, 0xd3, 0x92, 0xee, 0x9f, 0x6c, 0x1a, 0x61, 0xcd, 0x09, 0x06, 0xa8, + 0x69, 0x8b, 0x6e, 0xd3, 0x71, 0xab, 0x87, 0xb6, 0xdc, 0xd0, 0x75, 0xc6, 0xef, 0x16, 0x2b, 0xdc, + 0xb8, 0x5c, 0xe1, 0xc6, 0xd5, 0x0a, 0x37, 0x7e, 0x14, 0xd8, 0x5b, 0x14, 0xd8, 0xbb, 0x2c, 0xb0, + 0x77, 0x55, 0x60, 0xef, 0x6f, 0x81, 0xbd, 0x9f, 0xff, 0x70, 0xe3, 0x13, 0xbe, 0xfb, 0x6f, 0xff, + 0x1f, 0x00, 0x00, 0xff, 0xff, 0xde, 0x6a, 0x6d, 0x5e, 0x27, 0x04, 0x00, 0x00, } func (m *IPAddress) Marshal() (dAtA []byte, err error) { @@ -617,54 +381,6 @@ func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { dAtA[offset] = uint8(v) return base } -func (m *ClusterCIDR) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ClusterCIDRList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ClusterCIDRSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.NodeSelector != nil { - l = m.NodeSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.PerNodeHostBits)) - l = len(m.IPv4) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IPv6) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *IPAddress) Size() (n int) { if m == nil { return 0 @@ -731,46 +447,6 @@ func sovGenerated(x uint64) (n int) { func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *ClusterCIDR) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterCIDR{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterCIDRSpec", "ClusterCIDRSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterCIDRList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ClusterCIDR{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterCIDR", "ClusterCIDR", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ClusterCIDRList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ClusterCIDRSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterCIDRSpec{`, - `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, - `PerNodeHostBits:` + fmt.Sprintf("%v", this.PerNodeHostBits) + `,`, - `IPv4:` + fmt.Sprintf("%v", this.IPv4) + `,`, - `IPv6:` + fmt.Sprintf("%v", this.IPv6) + `,`, - `}`, - }, "") - return s -} func (this *IPAddress) String() string { if this == nil { return "nil" @@ -829,408 +505,6 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDR: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDR: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDRList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ClusterCIDR{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterCIDRSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodeSelector == nil { - m.NodeSelector = &v11.NodeSelector{} - } - if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PerNodeHostBits", wireType) - } - m.PerNodeHostBits = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PerNodeHostBits |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPv4", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IPv4 = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPv6", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IPv6 = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *IPAddress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/staging/src/k8s.io/api/networking/v1alpha1/generated.proto b/staging/src/k8s.io/api/networking/v1alpha1/generated.proto index 82fda0b9911..d6e8376a380 100644 --- a/staging/src/k8s.io/api/networking/v1alpha1/generated.proto +++ b/staging/src/k8s.io/api/networking/v1alpha1/generated.proto @@ -21,7 +21,6 @@ syntax = "proto2"; package k8s.io.api.networking.v1alpha1; -import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -29,69 +28,6 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "k8s.io/api/networking/v1alpha1"; -// ClusterCIDR represents a single configuration for per-Node Pod CIDR -// allocations when the MultiCIDRRangeAllocator is enabled (see the config for -// kube-controller-manager). A cluster may have any number of ClusterCIDR -// resources, all of which will be considered when allocating a CIDR for a -// Node. A ClusterCIDR is eligible to be used for a given Node when the node -// selector matches the node in question and has free CIDRs to allocate. In -// case of multiple matching ClusterCIDR resources, the allocator will attempt -// to break ties using internal heuristics, but any ClusterCIDR whose node -// selector matches the Node may be used. -message ClusterCIDR { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec is the desired state of the ClusterCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional ClusterCIDRSpec spec = 2; -} - -// ClusterCIDRList contains a list of ClusterCIDR. -message ClusterCIDRList { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of ClusterCIDRs. - repeated ClusterCIDR items = 2; -} - -// ClusterCIDRSpec defines the desired state of ClusterCIDR. -message ClusterCIDRSpec { - // nodeSelector defines which nodes the config is applicable to. - // An empty or nil nodeSelector selects all nodes. - // This field is immutable. - // +optional - optional k8s.io.api.core.v1.NodeSelector nodeSelector = 1; - - // perNodeHostBits defines the number of host bits to be configured per node. - // A subnet mask determines how much of the address is used for network bits - // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the - // address into 24 bits for the network portion and 8 bits for the host portion. - // To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). - // Minimum value is 4 (16 IPs). - // This field is immutable. - // +required - optional int32 perNodeHostBits = 2; - - // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - optional string ipv4 = 3; - - // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - optional string ipv6 = 4; -} - // IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs // that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. // An IP address can be represented in different formats, to guarantee the uniqueness of the IP, diff --git a/staging/src/k8s.io/api/networking/v1alpha1/register.go b/staging/src/k8s.io/api/networking/v1alpha1/register.go index 8dda6394d47..f45f8ed1ecc 100644 --- a/staging/src/k8s.io/api/networking/v1alpha1/register.go +++ b/staging/src/k8s.io/api/networking/v1alpha1/register.go @@ -52,8 +52,6 @@ var ( // Adds the list of known types to the given scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &ClusterCIDR{}, - &ClusterCIDRList{}, &IPAddress{}, &IPAddressList{}, ) diff --git a/staging/src/k8s.io/api/networking/v1alpha1/types.go b/staging/src/k8s.io/api/networking/v1alpha1/types.go index 9bc137bd120..8c431e5b5c6 100644 --- a/staging/src/k8s.io/api/networking/v1alpha1/types.go +++ b/staging/src/k8s.io/api/networking/v1alpha1/types.go @@ -17,85 +17,9 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.25 - -// ClusterCIDR represents a single configuration for per-Node Pod CIDR -// allocations when the MultiCIDRRangeAllocator is enabled (see the config for -// kube-controller-manager). A cluster may have any number of ClusterCIDR -// resources, all of which will be considered when allocating a CIDR for a -// Node. A ClusterCIDR is eligible to be used for a given Node when the node -// selector matches the node in question and has free CIDRs to allocate. In -// case of multiple matching ClusterCIDR resources, the allocator will attempt -// to break ties using internal heuristics, but any ClusterCIDR whose node -// selector matches the Node may be used. -type ClusterCIDR struct { - metav1.TypeMeta `json:",inline"` - - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec is the desired state of the ClusterCIDR. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Spec ClusterCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// ClusterCIDRSpec defines the desired state of ClusterCIDR. -type ClusterCIDRSpec struct { - // nodeSelector defines which nodes the config is applicable to. - // An empty or nil nodeSelector selects all nodes. - // This field is immutable. - // +optional - NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` - - // perNodeHostBits defines the number of host bits to be configured per node. - // A subnet mask determines how much of the address is used for network bits - // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the - // address into 24 bits for the network portion and 8 bits for the host portion. - // To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). - // Minimum value is 4 (16 IPs). - // This field is immutable. - // +required - PerNodeHostBits int32 `json:"perNodeHostBits" protobuf:"varint,2,opt,name=perNodeHostBits"` - - // ipv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - IPv4 string `json:"ipv4" protobuf:"bytes,3,opt,name=ipv4"` - - // ipv6 defines an IPv6 IP block in CIDR notation(e.g. "2001:db8::/64"). - // At least one of ipv4 and ipv6 must be specified. - // This field is immutable. - // +optional - IPv6 string `json:"ipv6" protobuf:"bytes,4,opt,name=ipv6"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:prerelease-lifecycle-gen:introduced=1.25 - -// ClusterCIDRList contains a list of ClusterCIDR. -type ClusterCIDRList struct { - metav1.TypeMeta `json:",inline"` - - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of ClusterCIDRs. - Items []ClusterCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` -} - // +genclient // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/staging/src/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go index 35105b80bfa..1fca9449844 100644 --- a/staging/src/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go +++ b/staging/src/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go @@ -27,38 +27,6 @@ package v1alpha1 // Those methods can be generated by using hack/update-codegen.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_ClusterCIDR = map[string]string{ - "": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", -} - -func (ClusterCIDR) SwaggerDoc() map[string]string { - return map_ClusterCIDR -} - -var map_ClusterCIDRList = map[string]string{ - "": "ClusterCIDRList contains a list of ClusterCIDR.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is the list of ClusterCIDRs.", -} - -func (ClusterCIDRList) SwaggerDoc() map[string]string { - return map_ClusterCIDRList -} - -var map_ClusterCIDRSpec = map[string]string{ - "": "ClusterCIDRSpec defines the desired state of ClusterCIDR.", - "nodeSelector": "nodeSelector defines which nodes the config is applicable to. An empty or nil nodeSelector selects all nodes. This field is immutable.", - "perNodeHostBits": "perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", - "ipv4": "ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - "ipv6": "ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", -} - -func (ClusterCIDRSpec) SwaggerDoc() map[string]string { - return map_ClusterCIDRSpec -} - var map_IPAddress = map[string]string{ "": "IPAddress represents a single IP of a single IP Family. The object is designed to be used by APIs that operate on IP addresses. The object is used by the Service core API for allocation of IP addresses. An IP address can be represented in different formats, to guarantee the uniqueness of the IP, the name of the object is the IP address in canonical format, four decimal digits separated by dots suppressing leading zeros for IPv4 and the representation defined by RFC 5952 for IPv6. Valid: 192.168.1.5 or 2001:db8::1 or 2001:db8:aaaa:bbbb:cccc:dddd:eeee:1 Invalid: 10.01.2.3 or 2001:db8:0:0:0::1", "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", diff --git a/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go index 97db2eacc95..05b66cbfc73 100644 --- a/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go +++ b/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go @@ -22,91 +22,9 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDR) DeepCopyInto(out *ClusterCIDR) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDR. -func (in *ClusterCIDR) DeepCopy() *ClusterCIDR { - if in == nil { - return nil - } - out := new(ClusterCIDR) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterCIDR) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDRList) DeepCopyInto(out *ClusterCIDRList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterCIDR, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRList. -func (in *ClusterCIDRList) DeepCopy() *ClusterCIDRList { - if in == nil { - return nil - } - out := new(ClusterCIDRList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterCIDRList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterCIDRSpec) DeepCopyInto(out *ClusterCIDRSpec) { - *out = *in - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = new(v1.NodeSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRSpec. -func (in *ClusterCIDRSpec) DeepCopy() *ClusterCIDRSpec { - if in == nil { - return nil - } - out := new(ClusterCIDRSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IPAddress) DeepCopyInto(out *IPAddress) { *out = *in diff --git a/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go b/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go index 60438ba59fc..8b500f2ea10 100644 --- a/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go +++ b/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -21,42 +21,6 @@ limitations under the License. package v1alpha1 -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *ClusterCIDR) APILifecycleIntroduced() (major, minor int) { - return 1, 25 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *ClusterCIDR) APILifecycleDeprecated() (major, minor int) { - return 1, 28 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *ClusterCIDR) APILifecycleRemoved() (major, minor int) { - return 1, 31 -} - -// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. -func (in *ClusterCIDRList) APILifecycleIntroduced() (major, minor int) { - return 1, 25 -} - -// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. -func (in *ClusterCIDRList) APILifecycleDeprecated() (major, minor int) { - return 1, 28 -} - -// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. -// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. -func (in *ClusterCIDRList) APILifecycleRemoved() (major, minor int) { - return 1, 31 -} - // APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. // It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. func (in *IPAddress) APILifecycleIntroduced() (major, minor int) { diff --git a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.json b/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.json deleted file mode 100644 index 59fa006b52c..00000000000 --- a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "kind": "ClusterCIDR", - "apiVersion": "networking.k8s.io/v1alpha1", - "metadata": { - "name": "nameValue", - "generateName": "generateNameValue", - "namespace": "namespaceValue", - "selfLink": "selfLinkValue", - "uid": "uidValue", - "resourceVersion": "resourceVersionValue", - "generation": 7, - "creationTimestamp": "2008-01-01T01:01:01Z", - "deletionTimestamp": "2009-01-01T01:01:01Z", - "deletionGracePeriodSeconds": 10, - "labels": { - "labelsKey": "labelsValue" - }, - "annotations": { - "annotationsKey": "annotationsValue" - }, - "ownerReferences": [ - { - "apiVersion": "apiVersionValue", - "kind": "kindValue", - "name": "nameValue", - "uid": "uidValue", - "controller": true, - "blockOwnerDeletion": true - } - ], - "finalizers": [ - "finalizersValue" - ], - "managedFields": [ - { - "manager": "managerValue", - "operation": "operationValue", - "apiVersion": "apiVersionValue", - "time": "2004-01-01T01:01:01Z", - "fieldsType": "fieldsTypeValue", - "fieldsV1": {}, - "subresource": "subresourceValue" - } - ] - }, - "spec": { - "nodeSelector": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "keyValue", - "operator": "operatorValue", - "values": [ - "valuesValue" - ] - } - ], - "matchFields": [ - { - "key": "keyValue", - "operator": "operatorValue", - "values": [ - "valuesValue" - ] - } - ] - } - ] - }, - "perNodeHostBits": 2, - "ipv4": "ipv4Value", - "ipv6": "ipv6Value" - } -} \ No newline at end of file diff --git a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.pb b/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.pb deleted file mode 100644 index a4e9113897a..00000000000 Binary files a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.pb and /dev/null differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.yaml b/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.yaml deleted file mode 100644 index fe7a1341fe1..00000000000 --- a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.yaml +++ /dev/null @@ -1,50 +0,0 @@ -apiVersion: networking.k8s.io/v1alpha1 -kind: ClusterCIDR -metadata: - annotations: - annotationsKey: annotationsValue - creationTimestamp: "2008-01-01T01:01:01Z" - deletionGracePeriodSeconds: 10 - deletionTimestamp: "2009-01-01T01:01:01Z" - finalizers: - - finalizersValue - generateName: generateNameValue - generation: 7 - labels: - labelsKey: labelsValue - managedFields: - - apiVersion: apiVersionValue - fieldsType: fieldsTypeValue - fieldsV1: {} - manager: managerValue - operation: operationValue - subresource: subresourceValue - time: "2004-01-01T01:01:01Z" - name: nameValue - namespace: namespaceValue - ownerReferences: - - apiVersion: apiVersionValue - blockOwnerDeletion: true - controller: true - kind: kindValue - name: nameValue - uid: uidValue - resourceVersion: resourceVersionValue - selfLink: selfLinkValue - uid: uidValue -spec: - ipv4: ipv4Value - ipv6: ipv6Value - nodeSelector: - nodeSelectorTerms: - - matchExpressions: - - key: keyValue - operator: operatorValue - values: - - valuesValue - matchFields: - - key: keyValue - operator: operatorValue - values: - - valuesValue - perNodeHostBits: 2 diff --git a/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.ClusterCIDR.json b/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.ClusterCIDR.json deleted file mode 100644 index 59fa006b52c..00000000000 --- a/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.ClusterCIDR.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "kind": "ClusterCIDR", - "apiVersion": "networking.k8s.io/v1alpha1", - "metadata": { - "name": "nameValue", - "generateName": "generateNameValue", - "namespace": "namespaceValue", - "selfLink": "selfLinkValue", - "uid": "uidValue", - "resourceVersion": "resourceVersionValue", - "generation": 7, - "creationTimestamp": "2008-01-01T01:01:01Z", - "deletionTimestamp": "2009-01-01T01:01:01Z", - "deletionGracePeriodSeconds": 10, - "labels": { - "labelsKey": "labelsValue" - }, - "annotations": { - "annotationsKey": "annotationsValue" - }, - "ownerReferences": [ - { - "apiVersion": "apiVersionValue", - "kind": "kindValue", - "name": "nameValue", - "uid": "uidValue", - "controller": true, - "blockOwnerDeletion": true - } - ], - "finalizers": [ - "finalizersValue" - ], - "managedFields": [ - { - "manager": "managerValue", - "operation": "operationValue", - "apiVersion": "apiVersionValue", - "time": "2004-01-01T01:01:01Z", - "fieldsType": "fieldsTypeValue", - "fieldsV1": {}, - "subresource": "subresourceValue" - } - ] - }, - "spec": { - "nodeSelector": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "keyValue", - "operator": "operatorValue", - "values": [ - "valuesValue" - ] - } - ], - "matchFields": [ - { - "key": "keyValue", - "operator": "operatorValue", - "values": [ - "valuesValue" - ] - } - ] - } - ] - }, - "perNodeHostBits": 2, - "ipv4": "ipv4Value", - "ipv6": "ipv6Value" - } -} \ No newline at end of file diff --git a/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.ClusterCIDR.pb b/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.ClusterCIDR.pb deleted file mode 100644 index a4e9113897a..00000000000 Binary files a/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.ClusterCIDR.pb and /dev/null differ diff --git a/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.ClusterCIDR.yaml b/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.ClusterCIDR.yaml deleted file mode 100644 index fe7a1341fe1..00000000000 --- a/staging/src/k8s.io/api/testdata/v1.27.0/networking.k8s.io.v1alpha1.ClusterCIDR.yaml +++ /dev/null @@ -1,50 +0,0 @@ -apiVersion: networking.k8s.io/v1alpha1 -kind: ClusterCIDR -metadata: - annotations: - annotationsKey: annotationsValue - creationTimestamp: "2008-01-01T01:01:01Z" - deletionGracePeriodSeconds: 10 - deletionTimestamp: "2009-01-01T01:01:01Z" - finalizers: - - finalizersValue - generateName: generateNameValue - generation: 7 - labels: - labelsKey: labelsValue - managedFields: - - apiVersion: apiVersionValue - fieldsType: fieldsTypeValue - fieldsV1: {} - manager: managerValue - operation: operationValue - subresource: subresourceValue - time: "2004-01-01T01:01:01Z" - name: nameValue - namespace: namespaceValue - ownerReferences: - - apiVersion: apiVersionValue - blockOwnerDeletion: true - controller: true - kind: kindValue - name: nameValue - uid: uidValue - resourceVersion: resourceVersionValue - selfLink: selfLinkValue - uid: uidValue -spec: - ipv4: ipv4Value - ipv6: ipv6Value - nodeSelector: - nodeSelectorTerms: - - matchExpressions: - - key: keyValue - operator: operatorValue - values: - - valuesValue - matchFields: - - key: keyValue - operator: operatorValue - values: - - valuesValue - perNodeHostBits: 2 diff --git a/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.ClusterCIDR.json b/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.ClusterCIDR.json deleted file mode 100644 index 59fa006b52c..00000000000 --- a/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.ClusterCIDR.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "kind": "ClusterCIDR", - "apiVersion": "networking.k8s.io/v1alpha1", - "metadata": { - "name": "nameValue", - "generateName": "generateNameValue", - "namespace": "namespaceValue", - "selfLink": "selfLinkValue", - "uid": "uidValue", - "resourceVersion": "resourceVersionValue", - "generation": 7, - "creationTimestamp": "2008-01-01T01:01:01Z", - "deletionTimestamp": "2009-01-01T01:01:01Z", - "deletionGracePeriodSeconds": 10, - "labels": { - "labelsKey": "labelsValue" - }, - "annotations": { - "annotationsKey": "annotationsValue" - }, - "ownerReferences": [ - { - "apiVersion": "apiVersionValue", - "kind": "kindValue", - "name": "nameValue", - "uid": "uidValue", - "controller": true, - "blockOwnerDeletion": true - } - ], - "finalizers": [ - "finalizersValue" - ], - "managedFields": [ - { - "manager": "managerValue", - "operation": "operationValue", - "apiVersion": "apiVersionValue", - "time": "2004-01-01T01:01:01Z", - "fieldsType": "fieldsTypeValue", - "fieldsV1": {}, - "subresource": "subresourceValue" - } - ] - }, - "spec": { - "nodeSelector": { - "nodeSelectorTerms": [ - { - "matchExpressions": [ - { - "key": "keyValue", - "operator": "operatorValue", - "values": [ - "valuesValue" - ] - } - ], - "matchFields": [ - { - "key": "keyValue", - "operator": "operatorValue", - "values": [ - "valuesValue" - ] - } - ] - } - ] - }, - "perNodeHostBits": 2, - "ipv4": "ipv4Value", - "ipv6": "ipv6Value" - } -} \ No newline at end of file diff --git a/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.ClusterCIDR.pb b/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.ClusterCIDR.pb deleted file mode 100644 index a4e9113897a..00000000000 Binary files a/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.ClusterCIDR.pb and /dev/null differ diff --git a/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.ClusterCIDR.yaml b/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.ClusterCIDR.yaml deleted file mode 100644 index fe7a1341fe1..00000000000 --- a/staging/src/k8s.io/api/testdata/v1.28.0/networking.k8s.io.v1alpha1.ClusterCIDR.yaml +++ /dev/null @@ -1,50 +0,0 @@ -apiVersion: networking.k8s.io/v1alpha1 -kind: ClusterCIDR -metadata: - annotations: - annotationsKey: annotationsValue - creationTimestamp: "2008-01-01T01:01:01Z" - deletionGracePeriodSeconds: 10 - deletionTimestamp: "2009-01-01T01:01:01Z" - finalizers: - - finalizersValue - generateName: generateNameValue - generation: 7 - labels: - labelsKey: labelsValue - managedFields: - - apiVersion: apiVersionValue - fieldsType: fieldsTypeValue - fieldsV1: {} - manager: managerValue - operation: operationValue - subresource: subresourceValue - time: "2004-01-01T01:01:01Z" - name: nameValue - namespace: namespaceValue - ownerReferences: - - apiVersion: apiVersionValue - blockOwnerDeletion: true - controller: true - kind: kindValue - name: nameValue - uid: uidValue - resourceVersion: resourceVersionValue - selfLink: selfLinkValue - uid: uidValue -spec: - ipv4: ipv4Value - ipv6: ipv6Value - nodeSelector: - nodeSelectorTerms: - - matchExpressions: - - key: keyValue - operator: operatorValue - values: - - valuesValue - matchFields: - - key: keyValue - operator: operatorValue - values: - - valuesValue - perNodeHostBits: 2 diff --git a/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go b/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go index 1fcbac1d647..7ab420c4561 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -10252,41 +10252,6 @@ var schemaYAML = typed.YAMLObject(`types: - name: number type: scalar: numeric -- name: io.k8s.api.networking.v1alpha1.ClusterCIDR - map: - fields: - - name: apiVersion - type: - scalar: string - - name: kind - type: - scalar: string - - name: metadata - type: - namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta - default: {} - - name: spec - type: - namedType: io.k8s.api.networking.v1alpha1.ClusterCIDRSpec - default: {} -- name: io.k8s.api.networking.v1alpha1.ClusterCIDRSpec - map: - fields: - - name: ipv4 - type: - scalar: string - default: "" - - name: ipv6 - type: - scalar: string - default: "" - - name: nodeSelector - type: - namedType: io.k8s.api.core.v1.NodeSelector - - name: perNodeHostBits - type: - scalar: numeric - default: 0 - name: io.k8s.api.networking.v1alpha1.IPAddress map: fields: diff --git a/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go b/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go deleted file mode 100644 index ad0eae9198e..00000000000 --- a/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go +++ /dev/null @@ -1,247 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - networkingv1alpha1 "k8s.io/api/networking/v1alpha1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - managedfields "k8s.io/apimachinery/pkg/util/managedfields" - internal "k8s.io/client-go/applyconfigurations/internal" - v1 "k8s.io/client-go/applyconfigurations/meta/v1" -) - -// ClusterCIDRApplyConfiguration represents an declarative configuration of the ClusterCIDR type for use -// with apply. -type ClusterCIDRApplyConfiguration struct { - v1.TypeMetaApplyConfiguration `json:",inline"` - *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` - Spec *ClusterCIDRSpecApplyConfiguration `json:"spec,omitempty"` -} - -// ClusterCIDR constructs an declarative configuration of the ClusterCIDR type for use with -// apply. -func ClusterCIDR(name string) *ClusterCIDRApplyConfiguration { - b := &ClusterCIDRApplyConfiguration{} - b.WithName(name) - b.WithKind("ClusterCIDR") - b.WithAPIVersion("networking.k8s.io/v1alpha1") - return b -} - -// ExtractClusterCIDR extracts the applied configuration owned by fieldManager from -// clusterCIDR. If no managedFields are found in clusterCIDR for fieldManager, a -// ClusterCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable), -// APIVersion and Kind populated. It is possible that no managed fields were found for because other -// field managers have taken ownership of all the fields previously owned by fieldManager, or because -// the fieldManager never owned fields any fields. -// clusterCIDR must be a unmodified ClusterCIDR API object that was retrieved from the Kubernetes API. -// ExtractClusterCIDR provides a way to perform a extract/modify-in-place/apply workflow. -// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously -// applied if another fieldManager has updated or force applied any of the previously applied fields. -// Experimental! -func ExtractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string) (*ClusterCIDRApplyConfiguration, error) { - return extractClusterCIDR(clusterCIDR, fieldManager, "") -} - -// ExtractClusterCIDRStatus is the same as ExtractClusterCIDR except -// that it extracts the status subresource applied configuration. -// Experimental! -func ExtractClusterCIDRStatus(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string) (*ClusterCIDRApplyConfiguration, error) { - return extractClusterCIDR(clusterCIDR, fieldManager, "status") -} - -func extractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string, subresource string) (*ClusterCIDRApplyConfiguration, error) { - b := &ClusterCIDRApplyConfiguration{} - err := managedfields.ExtractInto(clusterCIDR, internal.Parser().Type("io.k8s.api.networking.v1alpha1.ClusterCIDR"), fieldManager, b, subresource) - if err != nil { - return nil, err - } - b.WithName(clusterCIDR.Name) - - b.WithKind("ClusterCIDR") - b.WithAPIVersion("networking.k8s.io/v1alpha1") - return b, nil -} - -// WithKind sets the Kind field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Kind field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithKind(value string) *ClusterCIDRApplyConfiguration { - b.Kind = &value - return b -} - -// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the APIVersion field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithAPIVersion(value string) *ClusterCIDRApplyConfiguration { - b.APIVersion = &value - return b -} - -// WithName sets the Name field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Name field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithName(value string) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Name = &value - return b -} - -// WithGenerateName sets the GenerateName field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the GenerateName field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithGenerateName(value string) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.GenerateName = &value - return b -} - -// WithNamespace sets the Namespace field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Namespace field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithNamespace(value string) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Namespace = &value - return b -} - -// WithUID sets the UID field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the UID field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithUID(value types.UID) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.UID = &value - return b -} - -// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the ResourceVersion field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithResourceVersion(value string) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.ResourceVersion = &value - return b -} - -// WithGeneration sets the Generation field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Generation field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithGeneration(value int64) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.Generation = &value - return b -} - -// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the CreationTimestamp field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.CreationTimestamp = &value - return b -} - -// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionTimestamp field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionTimestamp = &value - return b -} - -// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - b.DeletionGracePeriodSeconds = &value - return b -} - -// WithLabels puts the entries into the Labels field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Labels field, -// overwriting an existing map entries in Labels field with the same key. -func (b *ClusterCIDRApplyConfiguration) WithLabels(entries map[string]string) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Labels == nil && len(entries) > 0 { - b.Labels = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Labels[k] = v - } - return b -} - -// WithAnnotations puts the entries into the Annotations field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, the entries provided by each call will be put on the Annotations field, -// overwriting an existing map entries in Annotations field with the same key. -func (b *ClusterCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - if b.Annotations == nil && len(entries) > 0 { - b.Annotations = make(map[string]string, len(entries)) - } - for k, v := range entries { - b.Annotations[k] = v - } - return b -} - -// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the OwnerReferences field. -func (b *ClusterCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - if values[i] == nil { - panic("nil value passed to WithOwnerReferences") - } - b.OwnerReferences = append(b.OwnerReferences, *values[i]) - } - return b -} - -// WithFinalizers adds the given value to the Finalizers field in the declarative configuration -// and returns the receiver, so that objects can be build by chaining "With" function invocations. -// If called multiple times, values provided by each call will be appended to the Finalizers field. -func (b *ClusterCIDRApplyConfiguration) WithFinalizers(values ...string) *ClusterCIDRApplyConfiguration { - b.ensureObjectMetaApplyConfigurationExists() - for i := range values { - b.Finalizers = append(b.Finalizers, values[i]) - } - return b -} - -func (b *ClusterCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { - if b.ObjectMetaApplyConfiguration == nil { - b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} - } -} - -// WithSpec sets the Spec field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the Spec field is set to the value of the last call. -func (b *ClusterCIDRApplyConfiguration) WithSpec(value *ClusterCIDRSpecApplyConfiguration) *ClusterCIDRApplyConfiguration { - b.Spec = value - return b -} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go b/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go deleted file mode 100644 index 8d5fa406b09..00000000000 --- a/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by applyconfiguration-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/client-go/applyconfigurations/core/v1" -) - -// ClusterCIDRSpecApplyConfiguration represents an declarative configuration of the ClusterCIDRSpec type for use -// with apply. -type ClusterCIDRSpecApplyConfiguration struct { - NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` - PerNodeHostBits *int32 `json:"perNodeHostBits,omitempty"` - IPv4 *string `json:"ipv4,omitempty"` - IPv6 *string `json:"ipv6,omitempty"` -} - -// ClusterCIDRSpecApplyConfiguration constructs an declarative configuration of the ClusterCIDRSpec type for use with -// apply. -func ClusterCIDRSpec() *ClusterCIDRSpecApplyConfiguration { - return &ClusterCIDRSpecApplyConfiguration{} -} - -// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the NodeSelector field is set to the value of the last call. -func (b *ClusterCIDRSpecApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *ClusterCIDRSpecApplyConfiguration { - b.NodeSelector = value - return b -} - -// WithPerNodeHostBits sets the PerNodeHostBits field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the PerNodeHostBits field is set to the value of the last call. -func (b *ClusterCIDRSpecApplyConfiguration) WithPerNodeHostBits(value int32) *ClusterCIDRSpecApplyConfiguration { - b.PerNodeHostBits = &value - return b -} - -// WithIPv4 sets the IPv4 field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IPv4 field is set to the value of the last call. -func (b *ClusterCIDRSpecApplyConfiguration) WithIPv4(value string) *ClusterCIDRSpecApplyConfiguration { - b.IPv4 = &value - return b -} - -// WithIPv6 sets the IPv6 field in the declarative configuration to the given value -// and returns the receiver, so that objects can be built by chaining "With" function invocations. -// If called multiple times, the IPv6 field is set to the value of the last call. -func (b *ClusterCIDRSpecApplyConfiguration) WithIPv6(value string) *ClusterCIDRSpecApplyConfiguration { - b.IPv6 = &value - return b -} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/utils.go b/staging/src/k8s.io/client-go/applyconfigurations/utils.go index 3e01cb9ccaa..11c6776a38c 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/utils.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/utils.go @@ -1293,10 +1293,6 @@ func ForKind(kind schema.GroupVersionKind) interface{} { return &applyconfigurationsnetworkingv1.ServiceBackendPortApplyConfiguration{} // Group=networking.k8s.io, Version=v1alpha1 - case networkingv1alpha1.SchemeGroupVersion.WithKind("ClusterCIDR"): - return &applyconfigurationsnetworkingv1alpha1.ClusterCIDRApplyConfiguration{} - case networkingv1alpha1.SchemeGroupVersion.WithKind("ClusterCIDRSpec"): - return &applyconfigurationsnetworkingv1alpha1.ClusterCIDRSpecApplyConfiguration{} case networkingv1alpha1.SchemeGroupVersion.WithKind("IPAddress"): return &applyconfigurationsnetworkingv1alpha1.IPAddressApplyConfiguration{} case networkingv1alpha1.SchemeGroupVersion.WithKind("IPAddressSpec"): diff --git a/staging/src/k8s.io/client-go/informers/generic.go b/staging/src/k8s.io/client-go/informers/generic.go index 8cff380f4a5..e067de3c19b 100644 --- a/staging/src/k8s.io/client-go/informers/generic.go +++ b/staging/src/k8s.io/client-go/informers/generic.go @@ -289,8 +289,6 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().NetworkPolicies().Informer()}, nil // Group=networking.k8s.io, Version=v1alpha1 - case networkingv1alpha1.SchemeGroupVersion.WithResource("clustercidrs"): - return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().ClusterCIDRs().Informer()}, nil case networkingv1alpha1.SchemeGroupVersion.WithResource("ipaddresses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().IPAddresses().Informer()}, nil diff --git a/staging/src/k8s.io/client-go/informers/networking/v1alpha1/clustercidr.go b/staging/src/k8s.io/client-go/informers/networking/v1alpha1/clustercidr.go deleted file mode 100644 index cefd0f8a1ee..00000000000 --- a/staging/src/k8s.io/client-go/informers/networking/v1alpha1/clustercidr.go +++ /dev/null @@ -1,89 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by informer-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - time "time" - - networkingv1alpha1 "k8s.io/api/networking/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - watch "k8s.io/apimachinery/pkg/watch" - internalinterfaces "k8s.io/client-go/informers/internalinterfaces" - kubernetes "k8s.io/client-go/kubernetes" - v1alpha1 "k8s.io/client-go/listers/networking/v1alpha1" - cache "k8s.io/client-go/tools/cache" -) - -// ClusterCIDRInformer provides access to a shared informer and lister for -// ClusterCIDRs. -type ClusterCIDRInformer interface { - Informer() cache.SharedIndexInformer - Lister() v1alpha1.ClusterCIDRLister -} - -type clusterCIDRInformer struct { - factory internalinterfaces.SharedInformerFactory - tweakListOptions internalinterfaces.TweakListOptionsFunc -} - -// NewClusterCIDRInformer constructs a new informer for ClusterCIDR type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewClusterCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { - return NewFilteredClusterCIDRInformer(client, resyncPeriod, indexers, nil) -} - -// NewFilteredClusterCIDRInformer constructs a new informer for ClusterCIDR type. -// Always prefer using an informer factory to get a shared informer instead of getting an independent -// one. This reduces memory footprint and number of connections to the server. -func NewFilteredClusterCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { - return cache.NewSharedIndexInformer( - &cache.ListWatch{ - ListFunc: func(options v1.ListOptions) (runtime.Object, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.NetworkingV1alpha1().ClusterCIDRs().List(context.TODO(), options) - }, - WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { - if tweakListOptions != nil { - tweakListOptions(&options) - } - return client.NetworkingV1alpha1().ClusterCIDRs().Watch(context.TODO(), options) - }, - }, - &networkingv1alpha1.ClusterCIDR{}, - resyncPeriod, - indexers, - ) -} - -func (f *clusterCIDRInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { - return NewFilteredClusterCIDRInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) -} - -func (f *clusterCIDRInformer) Informer() cache.SharedIndexInformer { - return f.factory.InformerFor(&networkingv1alpha1.ClusterCIDR{}, f.defaultInformer) -} - -func (f *clusterCIDRInformer) Lister() v1alpha1.ClusterCIDRLister { - return v1alpha1.NewClusterCIDRLister(f.Informer().GetIndexer()) -} diff --git a/staging/src/k8s.io/client-go/informers/networking/v1alpha1/interface.go b/staging/src/k8s.io/client-go/informers/networking/v1alpha1/interface.go index 07e7d208ca2..d909f908fe3 100644 --- a/staging/src/k8s.io/client-go/informers/networking/v1alpha1/interface.go +++ b/staging/src/k8s.io/client-go/informers/networking/v1alpha1/interface.go @@ -24,8 +24,6 @@ import ( // Interface provides access to all the informers in this group version. type Interface interface { - // ClusterCIDRs returns a ClusterCIDRInformer. - ClusterCIDRs() ClusterCIDRInformer // IPAddresses returns a IPAddressInformer. IPAddresses() IPAddressInformer } @@ -41,11 +39,6 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} } -// ClusterCIDRs returns a ClusterCIDRInformer. -func (v *version) ClusterCIDRs() ClusterCIDRInformer { - return &clusterCIDRInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} -} - // IPAddresses returns a IPAddressInformer. func (v *version) IPAddresses() IPAddressInformer { return &iPAddressInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go deleted file mode 100644 index 9df76351db8..00000000000 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "context" - json "encoding/json" - "fmt" - "time" - - v1alpha1 "k8s.io/api/networking/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ClusterCIDRsGetter has a method to return a ClusterCIDRInterface. -// A group's client should implement this interface. -type ClusterCIDRsGetter interface { - ClusterCIDRs() ClusterCIDRInterface -} - -// ClusterCIDRInterface has methods to work with ClusterCIDR resources. -type ClusterCIDRInterface interface { - Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (*v1alpha1.ClusterCIDR, error) - Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (*v1alpha1.ClusterCIDR, error) - Delete(ctx context.Context, name string, opts v1.DeleteOptions) error - DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error - Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterCIDR, error) - List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterCIDRList, error) - Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) - Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) - Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) - ClusterCIDRExpansion -} - -// clusterCIDRs implements ClusterCIDRInterface -type clusterCIDRs struct { - client rest.Interface -} - -// newClusterCIDRs returns a ClusterCIDRs -func newClusterCIDRs(c *NetworkingV1alpha1Client) *clusterCIDRs { - return &clusterCIDRs{ - client: c.RESTClient(), - } -} - -// Get takes name of the clusterCIDR, and returns the corresponding clusterCIDR object, and an error if there is any. -func (c *clusterCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterCIDR, err error) { - result = &v1alpha1.ClusterCIDR{} - err = c.client.Get(). - Resource("clustercidrs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(ctx). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterCIDRs that match those selectors. -func (c *clusterCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterCIDRList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.ClusterCIDRList{} - err = c.client.Get(). - Resource("clustercidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(ctx). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterCIDRs. -func (c *clusterCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("clustercidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch(ctx) -} - -// Create takes the representation of a clusterCIDR and creates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. -func (c *clusterCIDRs) Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (result *v1alpha1.ClusterCIDR, err error) { - result = &v1alpha1.ClusterCIDR{} - err = c.client.Post(). - Resource("clustercidrs"). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterCIDR). - Do(ctx). - Into(result) - return -} - -// Update takes the representation of a clusterCIDR and updates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. -func (c *clusterCIDRs) Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (result *v1alpha1.ClusterCIDR, err error) { - result = &v1alpha1.ClusterCIDR{} - err = c.client.Put(). - Resource("clustercidrs"). - Name(clusterCIDR.Name). - VersionedParams(&opts, scheme.ParameterCodec). - Body(clusterCIDR). - Do(ctx). - Into(result) - return -} - -// Delete takes name of the clusterCIDR and deletes it. Returns an error if one occurs. -func (c *clusterCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clustercidrs"). - Name(name). - Body(&opts). - Do(ctx). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - var timeout time.Duration - if listOpts.TimeoutSeconds != nil { - timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("clustercidrs"). - VersionedParams(&listOpts, scheme.ParameterCodec). - Timeout(timeout). - Body(&opts). - Do(ctx). - Error() -} - -// Patch applies the patch and returns the patched clusterCIDR. -func (c *clusterCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) { - result = &v1alpha1.ClusterCIDR{} - err = c.client.Patch(pt). - Resource("clustercidrs"). - Name(name). - SubResource(subresources...). - VersionedParams(&opts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterCIDR. -func (c *clusterCIDRs) Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) { - if clusterCIDR == nil { - return nil, fmt.Errorf("clusterCIDR provided to Apply must not be nil") - } - patchOpts := opts.ToPatchOptions() - data, err := json.Marshal(clusterCIDR) - if err != nil { - return nil, err - } - name := clusterCIDR.Name - if name == nil { - return nil, fmt.Errorf("clusterCIDR.Name must be provided to Apply") - } - result = &v1alpha1.ClusterCIDR{} - err = c.client.Patch(types.ApplyPatchType). - Resource("clustercidrs"). - Name(*name). - VersionedParams(&patchOpts, scheme.ParameterCodec). - Body(data). - Do(ctx). - Into(result) - return -} diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go deleted file mode 100644 index 592e9fc63dc..00000000000 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go +++ /dev/null @@ -1,145 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package fake - -import ( - "context" - json "encoding/json" - "fmt" - - v1alpha1 "k8s.io/api/networking/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - labels "k8s.io/apimachinery/pkg/labels" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" - testing "k8s.io/client-go/testing" -) - -// FakeClusterCIDRs implements ClusterCIDRInterface -type FakeClusterCIDRs struct { - Fake *FakeNetworkingV1alpha1 -} - -var clustercidrsResource = v1alpha1.SchemeGroupVersion.WithResource("clustercidrs") - -var clustercidrsKind = v1alpha1.SchemeGroupVersion.WithKind("ClusterCIDR") - -// Get takes name of the clusterCIDR, and returns the corresponding clusterCIDR object, and an error if there is any. -func (c *FakeClusterCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootGetAction(clustercidrsResource, name), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} - -// List takes label and field selectors, and returns the list of ClusterCIDRs that match those selectors. -func (c *FakeClusterCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterCIDRList, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootListAction(clustercidrsResource, clustercidrsKind, opts), &v1alpha1.ClusterCIDRList{}) - if obj == nil { - return nil, err - } - - label, _, _ := testing.ExtractFromListOptions(opts) - if label == nil { - label = labels.Everything() - } - list := &v1alpha1.ClusterCIDRList{ListMeta: obj.(*v1alpha1.ClusterCIDRList).ListMeta} - for _, item := range obj.(*v1alpha1.ClusterCIDRList).Items { - if label.Matches(labels.Set(item.Labels)) { - list.Items = append(list.Items, item) - } - } - return list, err -} - -// Watch returns a watch.Interface that watches the requested clusterCIDRs. -func (c *FakeClusterCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { - return c.Fake. - InvokesWatch(testing.NewRootWatchAction(clustercidrsResource, opts)) -} - -// Create takes the representation of a clusterCIDR and creates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. -func (c *FakeClusterCIDRs) Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (result *v1alpha1.ClusterCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootCreateAction(clustercidrsResource, clusterCIDR), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} - -// Update takes the representation of a clusterCIDR and updates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. -func (c *FakeClusterCIDRs) Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (result *v1alpha1.ClusterCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootUpdateAction(clustercidrsResource, clusterCIDR), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} - -// Delete takes name of the clusterCIDR and deletes it. Returns an error if one occurs. -func (c *FakeClusterCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { - _, err := c.Fake. - Invokes(testing.NewRootDeleteActionWithOptions(clustercidrsResource, name, opts), &v1alpha1.ClusterCIDR{}) - return err -} - -// DeleteCollection deletes a collection of objects. -func (c *FakeClusterCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { - action := testing.NewRootDeleteCollectionAction(clustercidrsResource, listOpts) - - _, err := c.Fake.Invokes(action, &v1alpha1.ClusterCIDRList{}) - return err -} - -// Patch applies the patch and returns the patched clusterCIDR. -func (c *FakeClusterCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) { - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clustercidrsResource, name, pt, data, subresources...), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} - -// Apply takes the given apply declarative configuration, applies it and returns the applied clusterCIDR. -func (c *FakeClusterCIDRs) Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) { - if clusterCIDR == nil { - return nil, fmt.Errorf("clusterCIDR provided to Apply must not be nil") - } - data, err := json.Marshal(clusterCIDR) - if err != nil { - return nil, err - } - name := clusterCIDR.Name - if name == nil { - return nil, fmt.Errorf("clusterCIDR.Name must be provided to Apply") - } - obj, err := c.Fake. - Invokes(testing.NewRootPatchSubresourceAction(clustercidrsResource, *name, types.ApplyPatchType, data), &v1alpha1.ClusterCIDR{}) - if obj == nil { - return nil, err - } - return obj.(*v1alpha1.ClusterCIDR), err -} diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go index 2d063836b53..63f5114cf26 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go @@ -28,10 +28,6 @@ type FakeNetworkingV1alpha1 struct { *testing.Fake } -func (c *FakeNetworkingV1alpha1) ClusterCIDRs() v1alpha1.ClusterCIDRInterface { - return &FakeClusterCIDRs{c} -} - func (c *FakeNetworkingV1alpha1) IPAddresses() v1alpha1.IPAddressInterface { return &FakeIPAddresses{c} } diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go index 9c2979d6c44..0ded7944ee6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go @@ -18,6 +18,4 @@ limitations under the License. package v1alpha1 -type ClusterCIDRExpansion interface{} - type IPAddressExpansion interface{} diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go index 884c846f598..59bddd70b50 100644 --- a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go @@ -28,7 +28,6 @@ import ( type NetworkingV1alpha1Interface interface { RESTClient() rest.Interface - ClusterCIDRsGetter IPAddressesGetter } @@ -37,10 +36,6 @@ type NetworkingV1alpha1Client struct { restClient rest.Interface } -func (c *NetworkingV1alpha1Client) ClusterCIDRs() ClusterCIDRInterface { - return newClusterCIDRs(c) -} - func (c *NetworkingV1alpha1Client) IPAddresses() IPAddressInterface { return newIPAddresses(c) } diff --git a/staging/src/k8s.io/client-go/listers/networking/v1alpha1/clustercidr.go b/staging/src/k8s.io/client-go/listers/networking/v1alpha1/clustercidr.go deleted file mode 100644 index dca9d7bf0cb..00000000000 --- a/staging/src/k8s.io/client-go/listers/networking/v1alpha1/clustercidr.go +++ /dev/null @@ -1,68 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by lister-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/networking/v1alpha1" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/tools/cache" -) - -// ClusterCIDRLister helps list ClusterCIDRs. -// All objects returned here must be treated as read-only. -type ClusterCIDRLister interface { - // List lists all ClusterCIDRs in the indexer. - // Objects returned here must be treated as read-only. - List(selector labels.Selector) (ret []*v1alpha1.ClusterCIDR, err error) - // Get retrieves the ClusterCIDR from the index for a given name. - // Objects returned here must be treated as read-only. - Get(name string) (*v1alpha1.ClusterCIDR, error) - ClusterCIDRListerExpansion -} - -// clusterCIDRLister implements the ClusterCIDRLister interface. -type clusterCIDRLister struct { - indexer cache.Indexer -} - -// NewClusterCIDRLister returns a new ClusterCIDRLister. -func NewClusterCIDRLister(indexer cache.Indexer) ClusterCIDRLister { - return &clusterCIDRLister{indexer: indexer} -} - -// List lists all ClusterCIDRs in the indexer. -func (s *clusterCIDRLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterCIDR, err error) { - err = cache.ListAll(s.indexer, selector, func(m interface{}) { - ret = append(ret, m.(*v1alpha1.ClusterCIDR)) - }) - return ret, err -} - -// Get retrieves the ClusterCIDR from the index for a given name. -func (s *clusterCIDRLister) Get(name string) (*v1alpha1.ClusterCIDR, error) { - obj, exists, err := s.indexer.GetByKey(name) - if err != nil { - return nil, err - } - if !exists { - return nil, errors.NewNotFound(v1alpha1.Resource("clustercidr"), name) - } - return obj.(*v1alpha1.ClusterCIDR), nil -} diff --git a/staging/src/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go index d57b71b0059..afa9aabcdcd 100644 --- a/staging/src/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go +++ b/staging/src/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go @@ -18,10 +18,6 @@ limitations under the License. package v1alpha1 -// ClusterCIDRListerExpansion allows custom methods to be added to -// ClusterCIDRLister. -type ClusterCIDRListerExpansion interface{} - // IPAddressListerExpansion allows custom methods to be added to // IPAddressLister. type IPAddressListerExpansion interface{} diff --git a/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__networking.k8s.io__v1alpha1_openapi.json b/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__networking.k8s.io__v1alpha1_openapi.json index 9d36d38a155..51b5f5d1ff9 100644 --- a/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__networking.k8s.io__v1alpha1_openapi.json +++ b/staging/src/k8s.io/client-go/openapi/openapitest/testdata/apis__networking.k8s.io__v1alpha1_openapi.json @@ -82,123 +82,6 @@ "type": "object", "x-kubernetes-map-type": "atomic" }, - "io.k8s.api.networking.v1alpha1.ClusterCIDR": { - "description": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" - } - ], - "default": {}, - "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" - }, - "spec": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRSpec" - } - ], - "default": {}, - "description": "spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" - } - }, - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - ] - }, - "io.k8s.api.networking.v1alpha1.ClusterCIDRList": { - "description": "ClusterCIDRList contains a list of ClusterCIDR.", - "properties": { - "apiVersion": { - "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", - "type": "string" - }, - "items": { - "description": "items is the list of ClusterCIDRs.", - "items": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - ], - "default": {} - }, - "type": "array" - }, - "kind": { - "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "type": "string" - }, - "metadata": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" - } - ], - "default": {}, - "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" - } - }, - "required": [ - "items" - ], - "type": "object", - "x-kubernetes-group-version-kind": [ - { - "group": "networking.k8s.io", - "kind": "ClusterCIDRList", - "version": "v1alpha1" - } - ] - }, - "io.k8s.api.networking.v1alpha1.ClusterCIDRSpec": { - "description": "ClusterCIDRSpec defines the desired state of ClusterCIDR.", - "properties": { - "ipv4": { - "default": "", - "description": "ipv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - "type": "string" - }, - "ipv6": { - "default": "", - "description": "ipv6 defines an IPv6 IP block in CIDR notation(e.g. \"2001:db8::/64\"). At least one of ipv4 and ipv6 must be specified. This field is immutable.", - "type": "string" - }, - "nodeSelector": { - "allOf": [ - { - "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelector" - } - ], - "description": "nodeSelector defines which nodes the config is applicable to. An empty or nil nodeSelector selects all nodes. This field is immutable." - }, - "perNodeHostBits": { - "default": 0, - "description": "perNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", - "format": "int32", - "type": "integer" - } - }, - "required": [ - "perNodeHostBits" - ], - "type": "object" - }, "io.k8s.apimachinery.pkg.apis.meta.v1.APIResource": { "description": "APIResource specifies the name of a resource and whether it is namespaced.", "properties": { @@ -1361,1090 +1244,6 @@ "networking_v1alpha1" ] } - }, - "/apis/networking.k8s.io/v1alpha1/clustercidrs": { - "delete": { - "description": "delete collection of ClusterCIDR", - "operationId": "deleteNetworkingV1alpha1CollectionClusterCIDR", - "parameters": [ - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", - "in": "query", - "name": "gracePeriodSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "in": "query", - "name": "orphanDependents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", - "in": "query", - "name": "propagationPolicy", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "deletecollection", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "get": { - "description": "list or watch objects of kind ClusterCIDR", - "operationId": "listNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", - "in": "query", - "name": "allowWatchBookmarks", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "in": "query", - "name": "watch", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ], - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" - } - }, - "application/json;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" - } - }, - "application/vnd.kubernetes.protobuf;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "list", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "description": "If 'true', then the output is pretty printed.", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "post": { - "description": "create a ClusterCIDR", - "operationId": "createNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "in": "query", - "name": "fieldManager", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "OK" - }, - "201": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "Created" - }, - "202": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "Accepted" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "post", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - } - }, - "/apis/networking.k8s.io/v1alpha1/clustercidrs/{name}": { - "delete": { - "description": "delete a ClusterCIDR", - "operationId": "deleteNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", - "in": "query", - "name": "gracePeriodSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", - "in": "query", - "name": "orphanDependents", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", - "in": "query", - "name": "propagationPolicy", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - } - }, - "description": "OK" - }, - "202": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" - } - } - }, - "description": "Accepted" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "delete", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "get": { - "description": "read the specified ClusterCIDR", - "operationId": "readNetworkingV1alpha1ClusterCIDR", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "get", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "description": "name of the ClusterCIDR", - "in": "path", - "name": "name", - "required": true, - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "If 'true', then the output is pretty printed.", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "patch": { - "description": "partially update the specified ClusterCIDR", - "operationId": "patchNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", - "in": "query", - "name": "fieldManager", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", - "in": "query", - "name": "force", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "application/apply-patch+yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - }, - "application/json-patch+json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - }, - "application/merge-patch+json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - }, - "application/strategic-merge-patch+json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "OK" - }, - "201": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "Created" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "patch", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "put": { - "description": "replace the specified ClusterCIDR", - "operationId": "replaceNetworkingV1alpha1ClusterCIDR", - "parameters": [ - { - "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "in": "query", - "name": "dryRun", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", - "in": "query", - "name": "fieldManager", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", - "in": "query", - "name": "fieldValidation", - "schema": { - "type": "string", - "uniqueItems": true - } - } - ], - "requestBody": { - "content": { - "*/*": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - } - }, - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "OK" - }, - "201": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" - } - } - }, - "description": "Created" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "put", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - } - }, - "/apis/networking.k8s.io/v1alpha1/watch/clustercidrs": { - "get": { - "description": "watch individual changes to a list of ClusterCIDR. deprecated: use the 'watch' parameter with a list operation instead.", - "operationId": "watchNetworkingV1alpha1ClusterCIDRList", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/json;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "watchlist", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", - "in": "query", - "name": "allowWatchBookmarks", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "If 'true', then the output is pretty printed.", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "in": "query", - "name": "watch", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ] - }, - "/apis/networking.k8s.io/v1alpha1/watch/clustercidrs/{name}": { - "get": { - "description": "watch changes to an object of kind ClusterCIDR. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", - "operationId": "watchNetworkingV1alpha1ClusterCIDR", - "responses": { - "200": { - "content": { - "application/json": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/json;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/vnd.kubernetes.protobuf;stream=watch": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - }, - "application/yaml": { - "schema": { - "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" - } - } - }, - "description": "OK" - }, - "401": { - "description": "Unauthorized" - } - }, - "tags": [ - "networking_v1alpha1" - ], - "x-kubernetes-action": "watch", - "x-kubernetes-group-version-kind": { - "group": "networking.k8s.io", - "kind": "ClusterCIDR", - "version": "v1alpha1" - } - }, - "parameters": [ - { - "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", - "in": "query", - "name": "allowWatchBookmarks", - "schema": { - "type": "boolean", - "uniqueItems": true - } - }, - { - "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", - "in": "query", - "name": "continue", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "in": "query", - "name": "fieldSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "in": "query", - "name": "labelSelector", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "in": "query", - "name": "limit", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "name of the ClusterCIDR", - "in": "path", - "name": "name", - "required": true, - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "If 'true', then the output is pretty printed.", - "in": "query", - "name": "pretty", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersion", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", - "in": "query", - "name": "resourceVersionMatch", - "schema": { - "type": "string", - "uniqueItems": true - } - }, - { - "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "in": "query", - "name": "timeoutSeconds", - "schema": { - "type": "integer", - "uniqueItems": true - } - }, - { - "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "in": "query", - "name": "watch", - "schema": { - "type": "boolean", - "uniqueItems": true - } - } - ] } } } diff --git a/staging/src/k8s.io/kubectl/pkg/describe/describe.go b/staging/src/k8s.io/kubectl/pkg/describe/describe.go index d88095ac2a7..2e0e2ec696b 100644 --- a/staging/src/k8s.io/kubectl/pkg/describe/describe.go +++ b/staging/src/k8s.io/kubectl/pkg/describe/describe.go @@ -215,7 +215,6 @@ func describerMap(clientConfig *rest.Config) (map[schema.GroupKind]ResourceDescr {Group: networkingv1beta1.GroupName, Kind: "IngressClass"}: &IngressClassDescriber{c}, {Group: networkingv1.GroupName, Kind: "Ingress"}: &IngressDescriber{c}, {Group: networkingv1.GroupName, Kind: "IngressClass"}: &IngressClassDescriber{c}, - {Group: networkingv1alpha1.GroupName, Kind: "ClusterCIDR"}: &ClusterCIDRDescriber{c}, {Group: networkingv1alpha1.GroupName, Kind: "IPAddress"}: &IPAddressDescriber{c}, {Group: batchv1.GroupName, Kind: "Job"}: &JobDescriber{c}, {Group: batchv1.GroupName, Kind: "CronJob"}: &CronJobDescriber{c}, @@ -2845,63 +2844,6 @@ func (i *IngressClassDescriber) describeIngressClassV1(ic *networkingv1.IngressC }) } -// ClusterCIDRDescriber generates information about a ClusterCIDR. -type ClusterCIDRDescriber struct { - client clientset.Interface -} - -func (c *ClusterCIDRDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { - var events *corev1.EventList - - ccV1alpha1, err := c.client.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), name, metav1.GetOptions{}) - if err == nil { - if describerSettings.ShowEvents { - events, _ = searchEvents(c.client.CoreV1(), ccV1alpha1, describerSettings.ChunkSize) - } - return c.describeClusterCIDRV1alpha1(ccV1alpha1, events) - } - return "", err -} - -func (c *ClusterCIDRDescriber) describeClusterCIDRV1alpha1(cc *networkingv1alpha1.ClusterCIDR, events *corev1.EventList) (string, error) { - return tabbedString(func(out io.Writer) error { - w := NewPrefixWriter(out) - w.Write(LEVEL_0, "Name:\t%v\n", cc.Name) - printLabelsMultiline(w, "Labels", cc.Labels) - printAnnotationsMultiline(w, "Annotations", cc.Annotations) - - w.Write(LEVEL_0, "NodeSelector:\n") - if cc.Spec.NodeSelector != nil { - w.Write(LEVEL_1, "NodeSelector Terms:") - if len(cc.Spec.NodeSelector.NodeSelectorTerms) == 0 { - w.WriteLine("") - } else { - w.WriteLine("") - for i, term := range cc.Spec.NodeSelector.NodeSelectorTerms { - printNodeSelectorTermsMultilineWithIndent(w, LEVEL_2, fmt.Sprintf("Term %v", i), "\t", term.MatchExpressions) - } - } - } - - if cc.Spec.PerNodeHostBits != 0 { - w.Write(LEVEL_0, "PerNodeHostBits:\t%s\n", fmt.Sprint(cc.Spec.PerNodeHostBits)) - } - - if cc.Spec.IPv4 != "" { - w.Write(LEVEL_0, "IPv4:\t%s\n", cc.Spec.IPv4) - } - - if cc.Spec.IPv6 != "" { - w.Write(LEVEL_0, "IPv6:\t%s\n", cc.Spec.IPv6) - } - - if events != nil { - DescribeEvents(events, w) - } - return nil - }) -} - // IPAddressDescriber generates information about an IPAddress. type IPAddressDescriber struct { client clientset.Interface diff --git a/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go b/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go index 30c29c1fd8e..f78d961fd0d 100644 --- a/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go +++ b/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go @@ -5932,64 +5932,6 @@ Events: ` + "\n", } } -func TestDescribeClusterCIDR(t *testing.T) { - - testcases := map[string]struct { - input *fake.Clientset - output string - }{ - "ClusterCIDR v1alpha1": { - input: fake.NewSimpleClientset(&networkingv1alpha1.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{ - Name: "foo.123", - }, - Spec: networkingv1alpha1.ClusterCIDRSpec{ - PerNodeHostBits: int32(8), - IPv4: "10.1.0.0/16", - IPv6: "fd00:1:1::/64", - NodeSelector: &corev1.NodeSelector{ - NodeSelectorTerms: []corev1.NodeSelectorTerm{ - { - MatchExpressions: []corev1.NodeSelectorRequirement{ - { - Key: "foo", - Operator: "In", - Values: []string{"bar"}}, - }, - }, - }, - }, - }, - }), - - output: `Name: foo.123 -Labels: -Annotations: -NodeSelector: - NodeSelector Terms: - Term 0: foo in [bar] -PerNodeHostBits: 8 -IPv4: 10.1.0.0/16 -IPv6: fd00:1:1::/64 -Events: ` + "\n", - }, - } - - for name, tc := range testcases { - t.Run(name, func(t *testing.T) { - c := &describeClient{T: t, Namespace: "foo", Interface: tc.input} - d := ClusterCIDRDescriber{c} - out, err := d.Describe("bar", "foo.123", DescriberSettings{ShowEvents: true}) - if err != nil { - t.Errorf("unexpected error: %v", err) - } - if out != tc.output { - t.Errorf("expected :\n%s\nbut got output:\n%s diff:\n%s", tc.output, out, cmp.Diff(tc.output, out)) - } - }) - } -} - func TestDescribeIPAddress(t *testing.T) { testcases := map[string]struct { diff --git a/test/integration/clustercidr/ipam_test.go b/test/integration/clustercidr/ipam_test.go deleted file mode 100644 index d083cb8db1f..00000000000 --- a/test/integration/clustercidr/ipam_test.go +++ /dev/null @@ -1,650 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clustercidr - -import ( - "context" - "net" - "reflect" - "testing" - "time" - - v1 "k8s.io/api/core/v1" - networkingv1alpha1 "k8s.io/api/networking/v1alpha1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/wait" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/client-go/informers" - clientset "k8s.io/client-go/kubernetes" - featuregatetesting "k8s.io/component-base/featuregate/testing" - "k8s.io/klog/v2/ktesting" - "k8s.io/kubernetes/cmd/kube-apiserver/app/options" - "k8s.io/kubernetes/pkg/controller/nodeipam" - "k8s.io/kubernetes/pkg/controller/nodeipam/ipam" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/test/integration/framework" - netutils "k8s.io/utils/net" -) - -func TestIPAMMultiCIDRRangeAllocatorCIDRAllocate(t *testing.T) { - _, ctx := ktesting.NewTestContext(t) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // set the feature gate to enable MultiCIDRRangeAllocator - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - - _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ - ModifyServerRunOptions: func(opts *options.ServerRunOptions) { - // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. - opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} - opts.APIEnablement.RuntimeConfig.Set("networking.k8s.io/v1alpha1=true") - }, - }) - defer tearDownFn() - - clientSet := clientset.NewForConfigOrDie(kubeConfig) - sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour) - - ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) - - go ipamController.Run(ctx) - sharedInformer.Start(ctx.Done()) - - tests := []struct { - name string - clusterCIDR *networkingv1alpha1.ClusterCIDR - node *v1.Node - expectedPodCIDRs []string - }{ - { - name: "Default dualstack Pod CIDRs assigned to a node, node labels matching no ClusterCIDR nodeSelectors", - clusterCIDR: nil, - node: makeNode("default-node", map[string]string{"label": "unmatched"}), - expectedPodCIDRs: []string{"10.96.0.0/24", "fd00:10:96::/120"}, - }, - { - name: "Dualstack Pod CIDRs assigned to a node from a CC created during bootstrap", - clusterCIDR: nil, - node: makeNode("bootstrap-node", map[string]string{"bootstrap": "true"}), - expectedPodCIDRs: []string{"10.2.1.0/24", "fd00:20:96::100/120"}, - }, - { - name: "Single stack IPv4 Pod CIDR assigned to a node", - clusterCIDR: makeClusterCIDR("ipv4-cc", "10.0.0.0/16", "", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "singlestack": {"true"}})), - node: makeNode("ipv4-node", map[string]string{"ipv4": "true", "singlestack": "true"}), - expectedPodCIDRs: []string{"10.0.0.0/24"}, - }, - { - name: "Single stack IPv6 Pod CIDR assigned to a node", - clusterCIDR: makeClusterCIDR("ipv6-cc", "", "fd00:20:100::/112", 8, nodeSelector(map[string][]string{"ipv6": {"true"}})), - node: makeNode("ipv6-node", map[string]string{"ipv6": "true"}), - expectedPodCIDRs: []string{"fd00:20:100::/120"}, - }, - { - name: "DualStack Pod CIDRs assigned to a node", - clusterCIDR: makeClusterCIDR("dualstack-cc", "192.168.0.0/16", "fd00:30:100::/112", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - node: makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true"}), - expectedPodCIDRs: []string{"192.168.0.0/24", "fd00:30:100::/120"}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - if test.clusterCIDR != nil { - // Create the test ClusterCIDR - if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), test.clusterCIDR, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - } - - // Sleep for one second to make sure the controller process the new created ClusterCIDR. - time.Sleep(1 * time.Second) - - if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), test.node, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - if gotPodCIDRs, err := nodePodCIDRs(clientSet, test.node.Name); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(test.expectedPodCIDRs, gotPodCIDRs) { - t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", test.expectedPodCIDRs, gotPodCIDRs) - } - }) - } -} - -func TestIPAMMultiCIDRRangeAllocatorCIDRRelease(t *testing.T) { - _, ctx := ktesting.NewTestContext(t) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // set the feature gate to enable MultiCIDRRangeAllocator - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - - _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ - ModifyServerRunOptions: func(opts *options.ServerRunOptions) { - // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. - opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} - opts.APIEnablement.RuntimeConfig.Set("networking.k8s.io/v1alpha1=true") - }, - }) - defer tearDownFn() - - clientSet := clientset.NewForConfigOrDie(kubeConfig) - sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour) - - ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) - - go ipamController.Run(ctx) - sharedInformer.Start(ctx.Done()) - - t.Run("Pod CIDR release after node delete", func(t *testing.T) { - // Create the test ClusterCIDR. - clusterCIDR := makeClusterCIDR("dualstack-cc", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})) - if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), clusterCIDR, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - - // Sleep for one second to make sure the controller process the new created ClusterCIDR. - time.Sleep(1 * time.Second) - - // Create 1st node and validate that Pod CIDRs are correctly assigned. - node1 := makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true"}) - expectedPodCIDRs1 := []string{"192.168.0.0/24", "fd00:30:100::/120"} - if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), node1, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - if gotPodCIDRs, err := nodePodCIDRs(clientSet, node1.Name); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(expectedPodCIDRs1, gotPodCIDRs) { - t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", expectedPodCIDRs1, gotPodCIDRs) - } - - // Create 2nd node and validate that Pod CIDRs are correctly assigned. - node2 := makeNode("dualstack-node-2", map[string]string{"ipv4": "true", "ipv6": "true"}) - expectedPodCIDRs2 := []string{"192.168.1.0/24", "fd00:30:100::100/120"} - if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), node2, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - if gotPodCIDRs, err := nodePodCIDRs(clientSet, node2.Name); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(expectedPodCIDRs2, gotPodCIDRs) { - t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", expectedPodCIDRs2, gotPodCIDRs) - } - - // Delete the 1st node, to validate that the PodCIDRs are released. - if err := clientSet.CoreV1().Nodes().Delete(context.TODO(), node1.Name, metav1.DeleteOptions{}); err != nil { - t.Fatal(err) - } - - // Create 3rd node, validate that it has Pod CIDRs assigned from the released CIDR. - node3 := makeNode("dualstack-node-3", map[string]string{"ipv4": "true", "ipv6": "true"}) - expectedPodCIDRs3 := []string{"192.168.0.0/24", "fd00:30:100::/120"} - if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), node3, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - if gotPodCIDRs, err := nodePodCIDRs(clientSet, node3.Name); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(expectedPodCIDRs3, gotPodCIDRs) { - t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", expectedPodCIDRs3, gotPodCIDRs) - } - }) -} - -func TestIPAMMultiCIDRRangeAllocatorClusterCIDRDelete(t *testing.T) { - _, ctx := ktesting.NewTestContext(t) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // set the feature gate to enable MultiCIDRRangeAllocator. - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - - _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ - ModifyServerRunOptions: func(opts *options.ServerRunOptions) { - // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. - opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} - opts.APIEnablement.RuntimeConfig.Set("networking.k8s.io/v1alpha1=true") - }, - }) - defer tearDownFn() - - clientSet := clientset.NewForConfigOrDie(kubeConfig) - sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour) - - ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) - - go ipamController.Run(ctx) - sharedInformer.Start(ctx.Done()) - - t.Run("delete cc with node associated", func(t *testing.T) { - - // Create a ClusterCIDR. - clusterCIDR := makeClusterCIDR("dualstack-cc-del", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})) - if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), clusterCIDR, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - - // Sleep for one second to make sure the controller processes the newly created ClusterCIDR. - time.Sleep(1 * time.Second) - - // Create a node, which gets pod CIDR from the clusterCIDR created above. - node := makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true"}) - expectedPodCIDRs := []string{"192.168.0.0/24", "fd00:30:100::/120"} - if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - if gotPodCIDRs, err := nodePodCIDRs(clientSet, node.Name); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(expectedPodCIDRs, gotPodCIDRs) { - t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", expectedPodCIDRs, gotPodCIDRs) - } - - // Delete the ClusterCIDR. - if err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Delete(context.TODO(), clusterCIDR.Name, metav1.DeleteOptions{}); err != nil { - t.Fatal(err) - } - - // Sleep for five seconds to make sure the ClusterCIDR exists with a deletion timestamp after marked for deletion. - time.Sleep(5 * time.Second) - - // Make sure that the ClusterCIDR is not deleted, as there is a node associated with it. - cc, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), clusterCIDR.Name, metav1.GetOptions{}) - if err != nil { - t.Fatal(err) - } - if cc == nil { - t.Fatalf("expected Cluster CIDR got nil") - } - if cc.DeletionTimestamp.IsZero() { - t.Fatalf("expected Cluster CIDR to have set a deletion timestamp ") - } - - //Delete the node. - if err := clientSet.CoreV1().Nodes().Delete(context.TODO(), node.Name, metav1.DeleteOptions{}); err != nil { - t.Fatal(err) - } - - // Poll to make sure that the Node is deleted. - if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) { - _, err := clientSet.CoreV1().Nodes().Get(context.TODO(), node.Name, metav1.GetOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - return false, err - } - return apierrors.IsNotFound(err), nil - }); err != nil { - t.Fatalf("failed while waiting for Node %q to be deleted: %v", node.Name, err) - } - - // Poll to make sure that the ClusterCIDR is now deleted, as there is no node associated with it. - if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) { - _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), clusterCIDR.Name, metav1.GetOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - return false, err - } - return apierrors.IsNotFound(err), nil - }); err != nil { - t.Fatalf("failed while waiting for ClusterCIDR %q to be deleted: %v", clusterCIDR.Name, err) - } - }) -} - -func TestIPAMMultiCIDRRangeAllocatorClusterCIDRTerminate(t *testing.T) { - _, ctx := ktesting.NewTestContext(t) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // set the feature gate to enable MultiCIDRRangeAllocator. - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - - _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ - ModifyServerRunOptions: func(opts *options.ServerRunOptions) { - // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. - opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} - opts.APIEnablement.RuntimeConfig.Set("networking.k8s.io/v1alpha1=true") - }, - }) - defer tearDownFn() - - clientSet := clientset.NewForConfigOrDie(kubeConfig) - sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour) - - ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) - - go ipamController.Run(ctx) - sharedInformer.Start(ctx.Done()) - - t.Run("Pod CIDRS must not be allocated from a terminating CC", func(t *testing.T) { - - // Create a ClusterCIDR which is the best match based on number of matching labels. - clusterCIDR := makeClusterCIDR("dualstack-cc-del", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})) - if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), clusterCIDR, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - - // Create a ClusterCIDR which has fewer matching labels than the previous ClusterCIDR. - clusterCIDR2 := makeClusterCIDR("few-label-match-cc-del", "10.1.0.0/23", "fd12:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}})) - if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), clusterCIDR2, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - - // Sleep for one second to make sure the controller processes the newly created ClusterCIDR. - time.Sleep(1 * time.Second) - - // Create a node, which gets pod CIDR from the clusterCIDR created above. - node := makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true"}) - expectedPodCIDRs := []string{"192.168.0.0/24", "fd00:30:100::/120"} - if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - if gotPodCIDRs, err := nodePodCIDRs(clientSet, node.Name); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(expectedPodCIDRs, gotPodCIDRs) { - t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", expectedPodCIDRs, gotPodCIDRs) - } - - // Delete the ClusterCIDR - if err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Delete(context.TODO(), clusterCIDR.Name, metav1.DeleteOptions{}); err != nil { - t.Fatal(err) - } - - // Make sure that the ClusterCIDR is not deleted, as there is a node associated with it. - cc, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), clusterCIDR.Name, metav1.GetOptions{}) - if err != nil { - t.Fatal(err) - } - if cc == nil { - t.Fatalf("expected Cluster CIDR got nil") - } - if cc.DeletionTimestamp.IsZero() { - t.Fatalf("expected Cluster CIDR to have set a deletion timestamp ") - } - - // Create a node, which should get Pod CIDRs from the ClusterCIDR with fewer matching label Count, - // as the best match ClusterCIDR is marked as terminating. - node2 := makeNode("dualstack-node-2", map[string]string{"ipv4": "true", "ipv6": "true"}) - expectedPodCIDRs2 := []string{"10.1.0.0/24", "fd12:30:100::/120"} - if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), node2, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - if gotPodCIDRs2, err := nodePodCIDRs(clientSet, node2.Name); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(expectedPodCIDRs2, gotPodCIDRs2) { - t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", expectedPodCIDRs2, gotPodCIDRs2) - } - }) -} - -func TestIPAMMultiCIDRRangeAllocatorClusterCIDRTieBreak(t *testing.T) { - _, ctx := ktesting.NewTestContext(t) - ctx, cancel := context.WithCancel(ctx) - defer cancel() - - // set the feature gate to enable MultiCIDRRangeAllocator - defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() - - _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, t, framework.TestServerSetup{ - ModifyServerRunOptions: func(opts *options.ServerRunOptions) { - // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. - opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} - opts.APIEnablement.RuntimeConfig.Set("networking.k8s.io/v1alpha1=true") - }, - }) - defer tearDownFn() - - clientSet := clientset.NewForConfigOrDie(kubeConfig) - sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour) - - ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) - - go ipamController.Run(ctx) - sharedInformer.Start(ctx.Done()) - - tests := []struct { - name string - clusterCIDRs []*networkingv1alpha1.ClusterCIDR - node *v1.Node - expectedPodCIDRs []string - }{ - { - name: "ClusterCIDR with highest matching labels", - clusterCIDRs: []*networkingv1alpha1.ClusterCIDR{ - makeClusterCIDR("single-label-match-cc", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"match": {"single"}})), - makeClusterCIDR("double-label-match-cc", "10.0.0.0/23", "fd12:30:200::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - }, - node: makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true", "match": "single"}), - expectedPodCIDRs: []string{"10.0.0.0/24", "fd12:30:200::/120"}, - }, - { - name: "ClusterCIDR with fewer allocatable Pod CIDRs", - clusterCIDRs: []*networkingv1alpha1.ClusterCIDR{ - makeClusterCIDR("single-label-match-cc", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"match": {"single"}})), - makeClusterCIDR("double-label-match-cc", "10.0.0.0/20", "fd12:30:200::/116", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - makeClusterCIDR("few-alloc-cc", "172.16.0.0/23", "fd34:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - }, - node: makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true", "match": "single"}), - expectedPodCIDRs: []string{"172.16.0.0/24", "fd34:30:100::/120"}, - }, - { - name: "ClusterCIDR with lower perNodeHostBits", - clusterCIDRs: []*networkingv1alpha1.ClusterCIDR{ - makeClusterCIDR("single-label-match-cc", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"match": {"single"}})), - makeClusterCIDR("double-label-match-cc", "10.0.0.0/20", "fd12:30:200::/116", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - makeClusterCIDR("few-alloc-cc", "172.16.0.0/23", "fd34:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - makeClusterCIDR("low-pernodehostbits-cc", "172.31.0.0/24", "fd35:30:100::/120", 7, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - }, - node: makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true", "match": "single"}), - expectedPodCIDRs: []string{"172.31.0.0/25", "fd35:30:100::/121"}, - }, - { - name: "ClusterCIDR with label having lower alphanumeric value", - clusterCIDRs: []*networkingv1alpha1.ClusterCIDR{ - makeClusterCIDR("single-label-match-cc", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"match": {"single"}})), - makeClusterCIDR("double-label-match-cc", "10.0.0.0/20", "fd12:30:200::/116", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - makeClusterCIDR("few-alloc-cc", "172.16.0.0/23", "fd34:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - makeClusterCIDR("low-pernodehostbits-cc", "172.31.0.0/24", "fd35:30:100::/120", 7, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - makeClusterCIDR("low-alpha-cc", "192.169.0.0/24", "fd12:40:100::/120", 7, nodeSelector(map[string][]string{"apv4": {"true"}, "bpv6": {"true"}})), - }, - node: makeNode("dualstack-node", map[string]string{"apv4": "true", "bpv6": "true", "ipv4": "true", "ipv6": "true", "match": "single"}), - expectedPodCIDRs: []string{"192.169.0.0/25", "fd12:40:100::/121"}, - }, - { - name: "ClusterCIDR with alphanumerically smaller IP address", - clusterCIDRs: []*networkingv1alpha1.ClusterCIDR{ - makeClusterCIDR("single-label-match-cc", "192.168.0.0/23", "fd00:30:100::/119", 8, nodeSelector(map[string][]string{"match": {"single"}})), - makeClusterCIDR("double-label-match-cc", "10.0.0.0/20", "fd12:30:200::/116", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - makeClusterCIDR("few-alloc-cc", "172.16.0.0/23", "fd34:30:100::/119", 8, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - makeClusterCIDR("low-pernodehostbits-cc", "172.31.0.0/24", "fd35:30:100::/120", 7, nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), - makeClusterCIDR("low-alpha-cc", "192.169.0.0/24", "fd12:40:100::/120", 7, nodeSelector(map[string][]string{"apv4": {"true"}, "bpv6": {"true"}})), - makeClusterCIDR("low-ip-cc", "10.1.0.0/24", "fd00:10:100::/120", 7, nodeSelector(map[string][]string{"apv4": {"true"}, "bpv6": {"true"}})), - }, - node: makeNode("dualstack-node", map[string]string{"apv4": "true", "bpv6": "true", "ipv4": "true", "ipv6": "true", "match": "single"}), - expectedPodCIDRs: []string{"10.1.0.0/25", "fd00:10:100::/121"}, - }, - } - - for _, test := range tests { - t.Run(test.name, func(t *testing.T) { - for _, clusterCIDR := range test.clusterCIDRs { - // Create the test ClusterCIDR - if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), clusterCIDR, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - } - - // Sleep for one second to make sure the controller process the new created ClusterCIDR. - time.Sleep(1 * time.Second) - - // Create a node and validate that Pod CIDRs are correctly assigned. - if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), test.node, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - if gotPodCIDRs, err := nodePodCIDRs(clientSet, test.node.Name); err != nil { - t.Fatal(err) - } else if !reflect.DeepEqual(test.expectedPodCIDRs, gotPodCIDRs) { - t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", test.expectedPodCIDRs, gotPodCIDRs) - } - - // Delete the node. - if err := clientSet.CoreV1().Nodes().Delete(context.TODO(), test.node.Name, metav1.DeleteOptions{}); err != nil { - t.Fatal(err) - } - - // Wait till the Node is deleted. - if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) { - _, err := clientSet.CoreV1().Nodes().Get(context.TODO(), test.node.Name, metav1.GetOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - return false, err - } - return apierrors.IsNotFound(err), nil - }); err != nil { - t.Fatalf("failed while waiting for Node %q to be deleted: %v", test.node.Name, err) - } - - // Delete the Cluster CIDRs. - for _, clusterCIDR := range test.clusterCIDRs { - // Delete the test ClusterCIDR. - if err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Delete(context.TODO(), clusterCIDR.Name, metav1.DeleteOptions{}); err != nil { - t.Fatal(err) - } - - // Wait till the ClusterCIDR is deleted. - if err := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) { - _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), clusterCIDR.Name, metav1.GetOptions{}) - if err != nil && !apierrors.IsNotFound(err) { - return false, err - } - return apierrors.IsNotFound(err), nil - }); err != nil { - t.Fatalf("failed while waiting for ClusterCIDR %q to be deleted: %v", clusterCIDR.Name, err) - } - } - }) - } -} - -func booststrapMultiCIDRRangeAllocator(t *testing.T, - clientSet clientset.Interface, - sharedInformer informers.SharedInformerFactory, -) *nodeipam.Controller { - _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.96.0.0/12") // allows up to 8K nodes - _, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("fd00:10:96::/112") // allows up to 8K nodes - _, serviceCIDR, _ := netutils.ParseCIDRSloppy("10.94.0.0/24") // does not matter for test - pick upto 250 services - _, secServiceCIDR, _ := netutils.ParseCIDRSloppy("2001:db2::/120") // does not matter for test - pick upto 250 services - - // order is ipv4 - ipv6 by convention for dual stack - clusterCIDRs := []*net.IPNet{clusterCIDRv4, clusterCIDRv6} - nodeMaskCIDRs := []int{24, 120} - - // set the current state of the informer, we can pre-seed nodes and ClusterCIDRs, so that we - // can simulate the bootstrap - initialCC := makeClusterCIDR("initial-cc", "10.2.0.0/16", "fd00:20:96::/112", 8, nodeSelector(map[string][]string{"bootstrap": {"true"}})) - if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), initialCC, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - - initialNode := makeNode("initial-node", map[string]string{"bootstrap": "true"}) - if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), initialNode, metav1.CreateOptions{}); err != nil { - t.Fatal(err) - } - _, ctx := ktesting.NewTestContext(t) - ipamController, err := nodeipam.NewNodeIpamController( - ctx, - sharedInformer.Core().V1().Nodes(), - sharedInformer.Networking().V1alpha1().ClusterCIDRs(), - nil, - clientSet, - clusterCIDRs, - serviceCIDR, - secServiceCIDR, - nodeMaskCIDRs, - ipam.MultiCIDRRangeAllocatorType, - ) - if err != nil { - t.Fatal(err) - } - - return ipamController -} - -func makeNode(name string, labels map[string]string) *v1.Node { - return &v1.Node{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Labels: labels, - }, - Status: v1.NodeStatus{ - Capacity: v1.ResourceList{ - v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), - v1.ResourceCPU: resource.MustParse("4"), - v1.ResourceMemory: resource.MustParse("32Gi"), - }, - Phase: v1.NodeRunning, - Conditions: []v1.NodeCondition{ - {Type: v1.NodeReady, Status: v1.ConditionTrue}, - }, - }, - } -} - -func makeClusterCIDR(name, ipv4CIDR, ipv6CIDR string, perNodeHostBits int32, nodeSelector *v1.NodeSelector) *networkingv1alpha1.ClusterCIDR { - return &networkingv1alpha1.ClusterCIDR{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - }, - Spec: networkingv1alpha1.ClusterCIDRSpec{ - PerNodeHostBits: perNodeHostBits, - IPv4: ipv4CIDR, - IPv6: ipv6CIDR, - NodeSelector: nodeSelector, - }, - } -} - -func nodeSelector(labels map[string][]string) *v1.NodeSelector { - testNodeSelector := &v1.NodeSelector{} - - for key, values := range labels { - nst := v1.NodeSelectorTerm{ - MatchExpressions: []v1.NodeSelectorRequirement{ - { - Key: key, - Operator: v1.NodeSelectorOpIn, - Values: values, - }, - }, - } - testNodeSelector.NodeSelectorTerms = append(testNodeSelector.NodeSelectorTerms, nst) - } - - return testNodeSelector -} - -func nodePodCIDRs(c clientset.Interface, name string) ([]string, error) { - var node *v1.Node - nodePollErr := wait.PollImmediate(time.Second, wait.ForeverTestTimeout, func() (bool, error) { - var err error - node, err = c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) - if err != nil { - return false, err - } - return len(node.Spec.PodCIDRs) > 0, nil - }) - - return node.Spec.PodCIDRs, nodePollErr -} diff --git a/test/integration/clustercidr/main_test.go b/test/integration/clustercidr/main_test.go deleted file mode 100644 index cf920a27032..00000000000 --- a/test/integration/clustercidr/main_test.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package clustercidr - -import ( - "testing" - - "k8s.io/kubernetes/test/integration/framework" -) - -func TestMain(m *testing.M) { - framework.EtcdMain(m.Run) -} diff --git a/test/integration/etcd/data.go b/test/integration/etcd/data.go index ca7286fd256..eea96c9aa50 100644 --- a/test/integration/etcd/data.go +++ b/test/integration/etcd/data.go @@ -203,13 +203,6 @@ func GetEtcdStorageDataForNamespace(namespace string) map[schema.GroupVersionRes }, // -- - // k8s.io/kubernetes/pkg/apis/networking/v1alpha1 - gvr("networking.k8s.io", "v1alpha1", "clustercidrs"): { - Stub: `{"metadata": {"name": "clustercidr1"}, "spec": {"perNodeHostBits": 8, "ipv4": "192.168.4.0/24", "ipv6": "fd00:1::/120", "nodeSelector": null}}`, - ExpectedEtcdPath: "/registry/clustercidrs/clustercidr1", - }, - // -- - // k8s.io/kubernetes/pkg/apis/networking/v1alpha1 gvr("networking.k8s.io", "v1alpha1", "ipaddresses"): { Stub: `{"metadata": {"name": "192.168.1.2"}, "spec": {"parentRef": {"resource": "services","name": "test", "namespace": "ns"}}}`, diff --git a/test/integration/ipamperf/ipam_test.go b/test/integration/ipamperf/ipam_test.go index 33986da2e1a..14e6826b5c1 100644 --- a/test/integration/ipamperf/ipam_test.go +++ b/test/integration/ipamperf/ipam_test.go @@ -54,7 +54,6 @@ func setupAllocator(ctx context.Context, kubeConfig *restclient.Config, config * ipamController, err := nodeipam.NewNodeIpamController( ctx, sharedInformer.Core().V1().Nodes(), - sharedInformer.Networking().V1alpha1().ClusterCIDRs(), config.Cloud, clientSet, []*net.IPNet{clusterCIDR}, serviceCIDR, nil, []int{subnetMaskSize}, config.AllocatorType, )