diff --git a/api/api-rules/violation_exceptions.list b/api/api-rules/violation_exceptions.list index a261773eccb..278b6f8a8c4 100644 --- a/api/api-rules/violation_exceptions.list +++ b/api/api-rules/violation_exceptions.list @@ -393,6 +393,8 @@ API rule violation: names_match,k8s.io/api/core/v1,RBDVolumeSource,RBDPool API rule violation: names_match,k8s.io/api/core/v1,RBDVolumeSource,RadosUser API rule violation: names_match,k8s.io/api/core/v1,VolumeSource,CephFS API rule violation: names_match,k8s.io/api/core/v1,VolumeSource,StorageOS +API rule violation: names_match,k8s.io/api/networking/v1alpha1,ClusterCIDRSpec,IPv4 +API rule violation: names_match,k8s.io/api/networking/v1alpha1,ClusterCIDRSpec,IPv6 API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSON,Raw API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Ref API rule violation: names_match,k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1,JSONSchemaProps,Schema diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index 740ef34c789..a7e8d52acf8 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -12081,6 +12081,96 @@ }, "type": "object" }, + "io.k8s.api.networking.v1alpha1.ClusterCIDR": { + "description": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDRSpec", + "description": "Spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.networking.v1alpha1.ClusterCIDRList": { + "description": "ClusterCIDRList contains a list of ClusterCIDR.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of ClusterCIDRs.", + "items": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta", + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "ClusterCIDRList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.networking.v1alpha1.ClusterCIDRSpec": { + "description": "ClusterCIDRSpec defines the desired state of ClusterCIDR.", + "properties": { + "ipv4": { + "description": "IPv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.", + "type": "string" + }, + "ipv6": { + "description": "IPv6 defines an IPv6 IP block in CIDR notation(e.g. \"fd12:3456:789a:1::/64\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.", + "type": "string" + }, + "nodeSelector": { + "$ref": "#/definitions/io.k8s.api.core.v1.NodeSelector", + "description": "NodeSelector defines which nodes the config is applicable to. An empty or nil NodeSelector selects all nodes. This field is immutable." + }, + "perNodeHostBits": { + "description": "PerNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "perNodeHostBits" + ], + "type": "object" + }, "io.k8s.api.node.v1.Overhead": { "description": "Overhead structure represents the resource overhead associated with running a pod.", "properties": { @@ -14650,6 +14740,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -15323,6 +15418,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", @@ -67706,6 +67806,867 @@ } ] }, + "/apis/networking.k8s.io/v1alpha1/": { + "get": { + "consumes": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "description": "get available resources", + "operationId": "getNetworkingV1alpha1APIResources", + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList" + } + }, + "401": { + "description": "Unauthorized" + } + }, + "schemes": [ + "https" + ], + "tags": [ + "networking_v1alpha1" + ] + } + }, + "/apis/networking.k8s.io/v1alpha1/clustercidrs": { + "delete": { + "consumes": [ + "*/*" + ], + "description": "delete collection of ClusterCIDR", + "operationId": "deleteNetworkingV1alpha1CollectionClusterCIDR", + "parameters": [ + { + "in": "body", + "name": "body", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" + } + }, + { + "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "in": "query", + "name": "continue", + "type": "string", + "uniqueItems": true + }, + { + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "in": "query", + "name": "dryRun", + "type": "string", + "uniqueItems": true + }, + { + "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "in": "query", + "name": "fieldSelector", + "type": "string", + "uniqueItems": true + }, + { + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "in": "query", + "name": "gracePeriodSeconds", + "type": "integer", + "uniqueItems": true + }, + { + "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "in": "query", + "name": "labelSelector", + "type": "string", + "uniqueItems": true + }, + { + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "in": "query", + "name": "limit", + "type": "integer", + "uniqueItems": true + }, + { + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "in": "query", + "name": "orphanDependents", + "type": "boolean", + "uniqueItems": true + }, + { + "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", + "in": "query", + "name": "propagationPolicy", + "type": "string", + "uniqueItems": true + }, + { + "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "in": "query", + "name": "resourceVersion", + "type": "string", + "uniqueItems": true + }, + { + "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "in": "query", + "name": "resourceVersionMatch", + "type": "string", + "uniqueItems": true + }, + { + "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "in": "query", + "name": "timeoutSeconds", + "type": "integer", + "uniqueItems": true + } + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "401": { + "description": "Unauthorized" + } + }, + "schemes": [ + "https" + ], + "tags": [ + "networking_v1alpha1" + ], + "x-kubernetes-action": "deletecollection", + "x-kubernetes-group-version-kind": { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + }, + "get": { + "consumes": [ + "*/*" + ], + "description": "list or watch objects of kind ClusterCIDR", + "operationId": "listNetworkingV1alpha1ClusterCIDR", + "parameters": [ + { + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", + "in": "query", + "name": "allowWatchBookmarks", + "type": "boolean", + "uniqueItems": true + }, + { + "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "in": "query", + "name": "continue", + "type": "string", + "uniqueItems": true + }, + { + "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "in": "query", + "name": "fieldSelector", + "type": "string", + "uniqueItems": true + }, + { + "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "in": "query", + "name": "labelSelector", + "type": "string", + "uniqueItems": true + }, + { + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "in": "query", + "name": "limit", + "type": "integer", + "uniqueItems": true + }, + { + "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "in": "query", + "name": "resourceVersion", + "type": "string", + "uniqueItems": true + }, + { + "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "in": "query", + "name": "resourceVersionMatch", + "type": "string", + "uniqueItems": true + }, + { + "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "in": "query", + "name": "timeoutSeconds", + "type": "integer", + "uniqueItems": true + }, + { + "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "in": "query", + "name": "watch", + "type": "boolean", + "uniqueItems": true + } + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf", + "application/json;stream=watch", + "application/vnd.kubernetes.protobuf;stream=watch" + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDRList" + } + }, + "401": { + "description": "Unauthorized" + } + }, + "schemes": [ + "https" + ], + "tags": [ + "networking_v1alpha1" + ], + "x-kubernetes-action": "list", + "x-kubernetes-group-version-kind": { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + }, + "parameters": [ + { + "description": "If 'true', then the output is pretty printed.", + "in": "query", + "name": "pretty", + "type": "string", + "uniqueItems": true + } + ], + "post": { + "consumes": [ + "*/*" + ], + "description": "create a ClusterCIDR", + "operationId": "createNetworkingV1alpha1ClusterCIDR", + "parameters": [ + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + { + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "in": "query", + "name": "dryRun", + "type": "string", + "uniqueItems": true + }, + { + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "in": "query", + "name": "fieldManager", + "type": "string", + "uniqueItems": true + }, + { + "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "in": "query", + "name": "fieldValidation", + "type": "string", + "uniqueItems": true + } + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "202": { + "description": "Accepted", + "schema": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "401": { + "description": "Unauthorized" + } + }, + "schemes": [ + "https" + ], + "tags": [ + "networking_v1alpha1" + ], + "x-kubernetes-action": "post", + "x-kubernetes-group-version-kind": { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + } + }, + "/apis/networking.k8s.io/v1alpha1/clustercidrs/{name}": { + "delete": { + "consumes": [ + "*/*" + ], + "description": "delete a ClusterCIDR", + "operationId": "deleteNetworkingV1alpha1ClusterCIDR", + "parameters": [ + { + "in": "body", + "name": "body", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" + } + }, + { + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "in": "query", + "name": "dryRun", + "type": "string", + "uniqueItems": true + }, + { + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "in": "query", + "name": "gracePeriodSeconds", + "type": "integer", + "uniqueItems": true + }, + { + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "in": "query", + "name": "orphanDependents", + "type": "boolean", + "uniqueItems": true + }, + { + "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", + "in": "query", + "name": "propagationPolicy", + "type": "string", + "uniqueItems": true + } + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "202": { + "description": "Accepted", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "401": { + "description": "Unauthorized" + } + }, + "schemes": [ + "https" + ], + "tags": [ + "networking_v1alpha1" + ], + "x-kubernetes-action": "delete", + "x-kubernetes-group-version-kind": { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + }, + "get": { + "consumes": [ + "*/*" + ], + "description": "read the specified ClusterCIDR", + "operationId": "readNetworkingV1alpha1ClusterCIDR", + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "401": { + "description": "Unauthorized" + } + }, + "schemes": [ + "https" + ], + "tags": [ + "networking_v1alpha1" + ], + "x-kubernetes-action": "get", + "x-kubernetes-group-version-kind": { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + }, + "parameters": [ + { + "description": "name of the ClusterCIDR", + "in": "path", + "name": "name", + "required": true, + "type": "string", + "uniqueItems": true + }, + { + "description": "If 'true', then the output is pretty printed.", + "in": "query", + "name": "pretty", + "type": "string", + "uniqueItems": true + } + ], + "patch": { + "consumes": [ + "application/json-patch+json", + "application/merge-patch+json", + "application/strategic-merge-patch+json", + "application/apply-patch+yaml" + ], + "description": "partially update the specified ClusterCIDR", + "operationId": "patchNetworkingV1alpha1ClusterCIDR", + "parameters": [ + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + }, + { + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "in": "query", + "name": "dryRun", + "type": "string", + "uniqueItems": true + }, + { + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", + "in": "query", + "name": "fieldManager", + "type": "string", + "uniqueItems": true + }, + { + "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "in": "query", + "name": "fieldValidation", + "type": "string", + "uniqueItems": true + }, + { + "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", + "in": "query", + "name": "force", + "type": "boolean", + "uniqueItems": true + } + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "401": { + "description": "Unauthorized" + } + }, + "schemes": [ + "https" + ], + "tags": [ + "networking_v1alpha1" + ], + "x-kubernetes-action": "patch", + "x-kubernetes-group-version-kind": { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + }, + "put": { + "consumes": [ + "*/*" + ], + "description": "replace the specified ClusterCIDR", + "operationId": "replaceNetworkingV1alpha1ClusterCIDR", + "parameters": [ + { + "in": "body", + "name": "body", + "required": true, + "schema": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + { + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "in": "query", + "name": "dryRun", + "type": "string", + "uniqueItems": true + }, + { + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "in": "query", + "name": "fieldManager", + "type": "string", + "uniqueItems": true + }, + { + "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "in": "query", + "name": "fieldValidation", + "type": "string", + "uniqueItems": true + } + ], + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf" + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "201": { + "description": "Created", + "schema": { + "$ref": "#/definitions/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "401": { + "description": "Unauthorized" + } + }, + "schemes": [ + "https" + ], + "tags": [ + "networking_v1alpha1" + ], + "x-kubernetes-action": "put", + "x-kubernetes-group-version-kind": { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + } + }, + "/apis/networking.k8s.io/v1alpha1/watch/clustercidrs": { + "get": { + "consumes": [ + "*/*" + ], + "description": "watch individual changes to a list of ClusterCIDR. deprecated: use the 'watch' parameter with a list operation instead.", + "operationId": "watchNetworkingV1alpha1ClusterCIDRList", + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf", + "application/json;stream=watch", + "application/vnd.kubernetes.protobuf;stream=watch" + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" + } + }, + "401": { + "description": "Unauthorized" + } + }, + "schemes": [ + "https" + ], + "tags": [ + "networking_v1alpha1" + ], + "x-kubernetes-action": "watchlist", + "x-kubernetes-group-version-kind": { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + }, + "parameters": [ + { + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", + "in": "query", + "name": "allowWatchBookmarks", + "type": "boolean", + "uniqueItems": true + }, + { + "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "in": "query", + "name": "continue", + "type": "string", + "uniqueItems": true + }, + { + "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "in": "query", + "name": "fieldSelector", + "type": "string", + "uniqueItems": true + }, + { + "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "in": "query", + "name": "labelSelector", + "type": "string", + "uniqueItems": true + }, + { + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "in": "query", + "name": "limit", + "type": "integer", + "uniqueItems": true + }, + { + "description": "If 'true', then the output is pretty printed.", + "in": "query", + "name": "pretty", + "type": "string", + "uniqueItems": true + }, + { + "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "in": "query", + "name": "resourceVersion", + "type": "string", + "uniqueItems": true + }, + { + "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "in": "query", + "name": "resourceVersionMatch", + "type": "string", + "uniqueItems": true + }, + { + "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "in": "query", + "name": "timeoutSeconds", + "type": "integer", + "uniqueItems": true + }, + { + "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "in": "query", + "name": "watch", + "type": "boolean", + "uniqueItems": true + } + ] + }, + "/apis/networking.k8s.io/v1alpha1/watch/clustercidrs/{name}": { + "get": { + "consumes": [ + "*/*" + ], + "description": "watch changes to an object of kind ClusterCIDR. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", + "operationId": "watchNetworkingV1alpha1ClusterCIDR", + "produces": [ + "application/json", + "application/yaml", + "application/vnd.kubernetes.protobuf", + "application/json;stream=watch", + "application/vnd.kubernetes.protobuf;stream=watch" + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" + } + }, + "401": { + "description": "Unauthorized" + } + }, + "schemes": [ + "https" + ], + "tags": [ + "networking_v1alpha1" + ], + "x-kubernetes-action": "watch", + "x-kubernetes-group-version-kind": { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + }, + "parameters": [ + { + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", + "in": "query", + "name": "allowWatchBookmarks", + "type": "boolean", + "uniqueItems": true + }, + { + "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "in": "query", + "name": "continue", + "type": "string", + "uniqueItems": true + }, + { + "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "in": "query", + "name": "fieldSelector", + "type": "string", + "uniqueItems": true + }, + { + "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "in": "query", + "name": "labelSelector", + "type": "string", + "uniqueItems": true + }, + { + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "in": "query", + "name": "limit", + "type": "integer", + "uniqueItems": true + }, + { + "description": "name of the ClusterCIDR", + "in": "path", + "name": "name", + "required": true, + "type": "string", + "uniqueItems": true + }, + { + "description": "If 'true', then the output is pretty printed.", + "in": "query", + "name": "pretty", + "type": "string", + "uniqueItems": true + }, + { + "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "in": "query", + "name": "resourceVersion", + "type": "string", + "uniqueItems": true + }, + { + "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "in": "query", + "name": "resourceVersionMatch", + "type": "string", + "uniqueItems": true + }, + { + "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "in": "query", + "name": "timeoutSeconds", + "type": "integer", + "uniqueItems": true + }, + { + "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "in": "query", + "name": "watch", + "type": "boolean", + "uniqueItems": true + } + ] + }, "/apis/node.k8s.io/": { "get": { "consumes": [ diff --git a/api/openapi-spec/v3/api__v1_openapi.json b/api/openapi-spec/v3/api__v1_openapi.json index 4a194564495..c6c55c47f29 100644 --- a/api/openapi-spec/v3/api__v1_openapi.json +++ b/api/openapi-spec/v3/api__v1_openapi.json @@ -8189,6 +8189,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -8889,6 +8894,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1_openapi.json index 90dae91eec3..b3612973d07 100644 --- a/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__admissionregistration.k8s.io__v1_openapi.json @@ -774,6 +774,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -1469,6 +1474,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__apiextensions.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__apiextensions.k8s.io__v1_openapi.json index de8278d2897..3f455e440ce 100644 --- a/api/openapi-spec/v3/apis__apiextensions.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__apiextensions.k8s.io__v1_openapi.json @@ -1163,6 +1163,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -1801,6 +1806,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__apps__v1_openapi.json b/api/openapi-spec/v3/apis__apps__v1_openapi.json index 0edafc8e2cb..abfee17f072 100644 --- a/api/openapi-spec/v3/apis__apps__v1_openapi.json +++ b/api/openapi-spec/v3/apis__apps__v1_openapi.json @@ -5186,6 +5186,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -5881,6 +5886,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__autoscaling__v1_openapi.json b/api/openapi-spec/v3/apis__autoscaling__v1_openapi.json index 6b17d202ab4..1151ce1ca0d 100644 --- a/api/openapi-spec/v3/apis__autoscaling__v1_openapi.json +++ b/api/openapi-spec/v3/apis__autoscaling__v1_openapi.json @@ -533,6 +533,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -1171,6 +1176,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__autoscaling__v2_openapi.json b/api/openapi-spec/v3/apis__autoscaling__v2_openapi.json index 2bfa3dc9613..42052636e32 100644 --- a/api/openapi-spec/v3/apis__autoscaling__v2_openapi.json +++ b/api/openapi-spec/v3/apis__autoscaling__v2_openapi.json @@ -1186,6 +1186,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -1881,6 +1886,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__autoscaling__v2beta2_openapi.json b/api/openapi-spec/v3/apis__autoscaling__v2beta2_openapi.json index 0e887dfabcf..b3027b1d2ac 100644 --- a/api/openapi-spec/v3/apis__autoscaling__v2beta2_openapi.json +++ b/api/openapi-spec/v3/apis__autoscaling__v2beta2_openapi.json @@ -1177,6 +1177,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -1872,6 +1877,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__batch__v1_openapi.json b/api/openapi-spec/v3/apis__batch__v1_openapi.json index cb00fe4fe11..2c0632bb7c6 100644 --- a/api/openapi-spec/v3/apis__batch__v1_openapi.json +++ b/api/openapi-spec/v3/apis__batch__v1_openapi.json @@ -4380,6 +4380,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -5075,6 +5080,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__certificates.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__certificates.k8s.io__v1_openapi.json index 6a5ca8a0e88..5a3897b8711 100644 --- a/api/openapi-spec/v3/apis__certificates.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__certificates.k8s.io__v1_openapi.json @@ -571,6 +571,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -1209,6 +1214,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__coordination.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__coordination.k8s.io__v1_openapi.json index 482e5d2fecd..340591d2789 100644 --- a/api/openapi-spec/v3/apis__coordination.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__coordination.k8s.io__v1_openapi.json @@ -460,6 +460,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -1103,6 +1108,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__discovery.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__discovery.k8s.io__v1_openapi.json index 8bad7a91438..6c646b62b5c 100644 --- a/api/openapi-spec/v3/apis__discovery.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__discovery.k8s.io__v1_openapi.json @@ -623,6 +623,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -1261,6 +1266,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__events.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__events.k8s.io__v1_openapi.json index 0a564867cb8..0c0f0d461dc 100644 --- a/api/openapi-spec/v3/apis__events.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__events.k8s.io__v1_openapi.json @@ -582,6 +582,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -1225,6 +1230,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta1_openapi.json b/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta1_openapi.json index d6da58f1bc4..2b9414fd4a5 100644 --- a/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta1_openapi.json +++ b/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta1_openapi.json @@ -1033,6 +1033,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -1671,6 +1676,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta2_openapi.json b/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta2_openapi.json index 2167450fe2c..b90e94712db 100644 --- a/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta2_openapi.json +++ b/api/openapi-spec/v3/apis__flowcontrol.apiserver.k8s.io__v1beta2_openapi.json @@ -1033,6 +1033,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -1671,6 +1676,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__internal.apiserver.k8s.io__v1alpha1_openapi.json b/api/openapi-spec/v3/apis__internal.apiserver.k8s.io__v1alpha1_openapi.json index 47e6944abbb..84575a0942f 100644 --- a/api/openapi-spec/v3/apis__internal.apiserver.k8s.io__v1alpha1_openapi.json +++ b/api/openapi-spec/v3/apis__internal.apiserver.k8s.io__v1alpha1_openapi.json @@ -550,6 +550,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -1188,6 +1193,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__networking.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__networking.k8s.io__v1_openapi.json index 1344dbe2775..48ad4a7bd11 100644 --- a/api/openapi-spec/v3/apis__networking.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__networking.k8s.io__v1_openapi.json @@ -1210,6 +1210,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -1905,6 +1910,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__networking.k8s.io__v1alpha1_openapi.json b/api/openapi-spec/v3/apis__networking.k8s.io__v1alpha1_openapi.json new file mode 100644 index 00000000000..895a3301ca3 --- /dev/null +++ b/api/openapi-spec/v3/apis__networking.k8s.io__v1alpha1_openapi.json @@ -0,0 +1,2405 @@ +{ + "components": { + "schemas": { + "io.k8s.api.core.v1.NodeSelector": { + "description": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.", + "properties": { + "nodeSelectorTerms": { + "description": "Required. A list of node selector terms. The terms are ORed.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelectorTerm" + } + ], + "default": {} + }, + "type": "array" + } + }, + "required": [ + "nodeSelectorTerms" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.core.v1.NodeSelectorRequirement": { + "description": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + "properties": { + "key": { + "default": "", + "description": "The label key that the selector applies to.", + "type": "string" + }, + "operator": { + "default": "", + "description": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.\n\n", + "type": "string" + }, + "values": { + "description": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", + "items": { + "default": "", + "type": "string" + }, + "type": "array" + } + }, + "required": [ + "key", + "operator" + ], + "type": "object" + }, + "io.k8s.api.core.v1.NodeSelectorTerm": { + "description": "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.", + "properties": { + "matchExpressions": { + "description": "A list of node selector requirements by node's labels.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelectorRequirement" + } + ], + "default": {} + }, + "type": "array" + }, + "matchFields": { + "description": "A list of node selector requirements by node's fields.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelectorRequirement" + } + ], + "default": {} + }, + "type": "array" + } + }, + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.api.networking.v1alpha1.ClusterCIDR": { + "description": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta" + } + ], + "default": {}, + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "spec": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRSpec" + } + ], + "default": {}, + "description": "Spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.networking.v1alpha1.ClusterCIDRList": { + "description": "ClusterCIDRList contains a list of ClusterCIDR.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "items": { + "description": "Items is the list of ClusterCIDRs.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + ], + "default": {} + }, + "type": "array" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "metadata": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + ], + "default": {}, + "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + } + }, + "required": [ + "items" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "networking.k8s.io", + "kind": "ClusterCIDRList", + "version": "v1alpha1" + } + ] + }, + "io.k8s.api.networking.v1alpha1.ClusterCIDRSpec": { + "description": "ClusterCIDRSpec defines the desired state of ClusterCIDR.", + "properties": { + "ipv4": { + "default": "", + "description": "IPv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.", + "type": "string" + }, + "ipv6": { + "default": "", + "description": "IPv6 defines an IPv6 IP block in CIDR notation(e.g. \"fd12:3456:789a:1::/64\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.", + "type": "string" + }, + "nodeSelector": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.api.core.v1.NodeSelector" + } + ], + "description": "NodeSelector defines which nodes the config is applicable to. An empty or nil NodeSelector selects all nodes. This field is immutable." + }, + "perNodeHostBits": { + "default": 0, + "description": "PerNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "perNodeHostBits" + ], + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.APIResource": { + "description": "APIResource specifies the name of a resource and whether it is namespaced.", + "properties": { + "categories": { + "description": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')", + "items": { + "default": "", + "type": "string" + }, + "type": "array" + }, + "group": { + "description": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", + "type": "string" + }, + "kind": { + "default": "", + "description": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", + "type": "string" + }, + "name": { + "default": "", + "description": "name is the plural name of the resource.", + "type": "string" + }, + "namespaced": { + "default": false, + "description": "namespaced indicates if a resource is namespaced or not.", + "type": "boolean" + }, + "shortNames": { + "description": "shortNames is a list of suggested short names of the resource.", + "items": { + "default": "", + "type": "string" + }, + "type": "array" + }, + "singularName": { + "default": "", + "description": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", + "type": "string" + }, + "storageVersionHash": { + "description": "The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.", + "type": "string" + }, + "verbs": { + "description": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", + "items": { + "default": "", + "type": "string" + }, + "type": "array" + }, + "version": { + "description": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".", + "type": "string" + } + }, + "required": [ + "name", + "singularName", + "namespaced", + "kind", + "verbs" + ], + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList": { + "description": "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "groupVersion": { + "default": "", + "description": "groupVersion is the group and version this APIResourceList is for.", + "type": "string" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "resources": { + "description": "resources contains the name of the resources and if they are namespaced.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResource" + } + ], + "default": {} + }, + "type": "array" + } + }, + "required": [ + "groupVersion", + "resources" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "APIResourceList", + "version": "v1" + } + ] + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions": { + "description": "DeleteOptions may be provided when deleting an API object.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "dryRun": { + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "items": { + "default": "", + "type": "string" + }, + "type": "array" + }, + "gracePeriodSeconds": { + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "format": "int64", + "type": "integer" + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "orphanDependents": { + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "type": "boolean" + }, + "preconditions": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions" + } + ], + "description": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned." + }, + "propagationPolicy": { + "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", + "type": "string" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "admission.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "admission.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "admissionregistration.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "admissionregistration.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "apiextensions.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "apiextensions.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "apiregistration.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "apiregistration.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "apps", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "apps", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "apps", + "kind": "DeleteOptions", + "version": "v1beta2" + }, + { + "group": "authentication.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "authentication.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "authorization.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "authorization.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "autoscaling", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "autoscaling", + "kind": "DeleteOptions", + "version": "v2" + }, + { + "group": "autoscaling", + "kind": "DeleteOptions", + "version": "v2beta1" + }, + { + "group": "autoscaling", + "kind": "DeleteOptions", + "version": "v2beta2" + }, + { + "group": "batch", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "batch", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "certificates.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "certificates.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "coordination.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "coordination.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "discovery.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "discovery.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "events.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "events.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "extensions", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta2" + }, + { + "group": "imagepolicy.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "internal.apiserver.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "node.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "node.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "node.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "policy", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "policy", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "rbac.authorization.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "rbac.authorization.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "rbac.authorization.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "scheduling.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "scheduling.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "scheduling.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + }, + { + "group": "storage.k8s.io", + "kind": "DeleteOptions", + "version": "v1" + }, + { + "group": "storage.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, + { + "group": "storage.k8s.io", + "kind": "DeleteOptions", + "version": "v1beta1" + } + ] + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1": { + "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta": { + "description": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", + "properties": { + "continue": { + "description": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", + "type": "string" + }, + "remainingItemCount": { + "description": "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.", + "format": "int64", + "type": "integer" + }, + "resourceVersion": { + "description": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + "type": "string" + }, + "selfLink": { + "description": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry": { + "description": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.", + "type": "string" + }, + "fieldsType": { + "description": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"", + "type": "string" + }, + "fieldsV1": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1" + } + ], + "description": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type." + }, + "manager": { + "description": "Manager is an identifier of the workflow managing these fields.", + "type": "string" + }, + "operation": { + "description": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.", + "type": "string" + }, + "subresource": { + "description": "Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource.", + "type": "string" + }, + "time": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + } + ], + "description": "Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over." + } + }, + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta": { + "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", + "properties": { + "annotations": { + "additionalProperties": { + "default": "", + "type": "string" + }, + "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "type": "object" + }, + "creationTimestamp": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + } + ], + "default": {}, + "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "deletionGracePeriodSeconds": { + "description": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", + "format": "int64", + "type": "integer" + }, + "deletionTimestamp": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time" + } + ], + "description": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata" + }, + "finalizers": { + "description": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.", + "items": { + "default": "", + "type": "string" + }, + "type": "array", + "x-kubernetes-patch-strategy": "merge" + }, + "generateName": { + "description": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will return a 409.\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", + "type": "string" + }, + "generation": { + "description": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", + "format": "int64", + "type": "integer" + }, + "labels": { + "additionalProperties": { + "default": "", + "type": "string" + }, + "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", + "type": "object" + }, + "managedFields": { + "description": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry" + } + ], + "default": {} + }, + "type": "array" + }, + "name": { + "description": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "type": "string" + }, + "namespace": { + "description": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "type": "string" + }, + "ownerReferences": { + "description": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference" + } + ], + "default": {} + }, + "type": "array", + "x-kubernetes-patch-merge-key": "uid", + "x-kubernetes-patch-strategy": "merge" + }, + "resourceVersion": { + "description": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + "type": "string" + }, + "selfLink": { + "description": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.", + "type": "string" + }, + "uid": { + "description": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference": { + "description": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", + "properties": { + "apiVersion": { + "default": "", + "description": "API version of the referent.", + "type": "string" + }, + "blockOwnerDeletion": { + "description": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", + "type": "boolean" + }, + "controller": { + "description": "If true, this reference points to the managing controller.", + "type": "boolean" + }, + "kind": { + "default": "", + "description": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "name": { + "default": "", + "description": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "type": "string" + }, + "uid": { + "default": "", + "description": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "type": "string" + } + }, + "required": [ + "apiVersion", + "kind", + "name", + "uid" + ], + "type": "object", + "x-kubernetes-map-type": "atomic" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.Patch": { + "description": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.", + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions": { + "description": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", + "properties": { + "resourceVersion": { + "description": "Specifies the target ResourceVersion", + "type": "string" + }, + "uid": { + "description": "Specifies the target UID.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.Status": { + "description": "Status is a return value for calls that don't return other objects.", + "properties": { + "apiVersion": { + "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "type": "string" + }, + "code": { + "description": "Suggested HTTP return code for this status, 0 if not set.", + "format": "int32", + "type": "integer" + }, + "details": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails" + } + ], + "description": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type." + }, + "kind": { + "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "message": { + "description": "A human-readable description of the status of this operation.", + "type": "string" + }, + "metadata": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta" + } + ], + "default": {}, + "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + }, + "reason": { + "description": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", + "type": "string" + }, + "status": { + "description": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "type": "string" + } + }, + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "Status", + "version": "v1" + } + ] + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause": { + "description": "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.", + "properties": { + "field": { + "description": "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"", + "type": "string" + }, + "message": { + "description": "A human-readable description of the cause of the error. This field may be presented as-is to a reader.", + "type": "string" + }, + "reason": { + "description": "A machine-readable description of the cause of the error. If this value is empty there is no information available.", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails": { + "description": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", + "properties": { + "causes": { + "description": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", + "items": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause" + } + ], + "default": {} + }, + "type": "array" + }, + "group": { + "description": "The group attribute of the resource associated with the status StatusReason.", + "type": "string" + }, + "kind": { + "description": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "type": "string" + }, + "name": { + "description": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", + "type": "string" + }, + "retryAfterSeconds": { + "description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", + "format": "int32", + "type": "integer" + }, + "uid": { + "description": "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "type": "string" + } + }, + "type": "object" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.Time": { + "description": "Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.", + "format": "date-time", + "type": "string" + }, + "io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent": { + "description": "Event represents a single event to a watched resource.", + "properties": { + "object": { + "allOf": [ + { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension" + } + ], + "default": {}, + "description": "Object is:\n * If Type is Added or Modified: the new state of the object.\n * If Type is Deleted: the state of the object immediately before deletion.\n * If Type is Error: *Status is recommended; other types may make sense\n depending on context." + }, + "type": { + "default": "", + "type": "string" + } + }, + "required": [ + "type", + "object" + ], + "type": "object", + "x-kubernetes-group-version-kind": [ + { + "group": "", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "admission.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "admission.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "admissionregistration.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "admissionregistration.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "apiextensions.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "apiextensions.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "apiregistration.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "apiregistration.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "apps", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "apps", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "apps", + "kind": "WatchEvent", + "version": "v1beta2" + }, + { + "group": "authentication.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "authentication.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "authorization.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "authorization.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "autoscaling", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "autoscaling", + "kind": "WatchEvent", + "version": "v2" + }, + { + "group": "autoscaling", + "kind": "WatchEvent", + "version": "v2beta1" + }, + { + "group": "autoscaling", + "kind": "WatchEvent", + "version": "v2beta2" + }, + { + "group": "batch", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "batch", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "certificates.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "certificates.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "coordination.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "coordination.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "discovery.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "discovery.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "events.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "events.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "extensions", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "flowcontrol.apiserver.k8s.io", + "kind": "WatchEvent", + "version": "v1beta2" + }, + { + "group": "imagepolicy.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "internal.apiserver.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "node.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "node.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "node.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "policy", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "policy", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "rbac.authorization.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "rbac.authorization.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "rbac.authorization.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "scheduling.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "scheduling.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "scheduling.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + }, + { + "group": "storage.k8s.io", + "kind": "WatchEvent", + "version": "v1" + }, + { + "group": "storage.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, + { + "group": "storage.k8s.io", + "kind": "WatchEvent", + "version": "v1beta1" + } + ] + }, + "io.k8s.apimachinery.pkg.runtime.RawExtension": { + "description": "RawExtension is used to hold extensions in external versions.\n\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\n\n// Internal package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.Object `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// External package:\n\n\ttype MyAPIObject struct {\n\t\truntime.TypeMeta `json:\",inline\"`\n\t\tMyPlugin runtime.RawExtension `json:\"myPlugin\"`\n\t}\n\n\ttype PluginA struct {\n\t\tAOption string `json:\"aOption\"`\n\t}\n\n// On the wire, the JSON will look something like this:\n\n\t{\n\t\t\"kind\":\"MyAPIObject\",\n\t\t\"apiVersion\":\"v1\",\n\t\t\"myPlugin\": {\n\t\t\t\"kind\":\"PluginA\",\n\t\t\t\"aOption\":\"foo\",\n\t\t},\n\t}\n\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)", + "type": "object" + } + }, + "securitySchemes": { + "BearerToken": { + "description": "Bearer Token authentication", + "in": "header", + "name": "authorization", + "type": "apiKey" + } + } + }, + "info": { + "title": "Kubernetes", + "version": "unversioned" + }, + "openapi": "3.0.0", + "paths": { + "/apis/networking.k8s.io/v1alpha1/": { + "get": { + "description": "get available resources", + "operationId": "getNetworkingV1alpha1APIResources", + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList" + } + } + }, + "description": "OK" + }, + "401": { + "description": "Unauthorized" + } + }, + "tags": [ + "networking_v1alpha1" + ] + } + }, + "/apis/networking.k8s.io/v1alpha1/clustercidrs": { + "delete": { + "description": "delete collection of ClusterCIDR", + "operationId": "deleteNetworkingV1alpha1CollectionClusterCIDR", + "parameters": [ + { + "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "in": "query", + "name": "continue", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "in": "query", + "name": "dryRun", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "in": "query", + "name": "fieldSelector", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "in": "query", + "name": "gracePeriodSeconds", + "schema": { + "type": "integer", + "uniqueItems": true + } + }, + { + "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "in": "query", + "name": "labelSelector", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "in": "query", + "name": "limit", + "schema": { + "type": "integer", + "uniqueItems": true + } + }, + { + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "in": "query", + "name": "orphanDependents", + "schema": { + "type": "boolean", + "uniqueItems": true + } + }, + { + "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", + "in": "query", + "name": "propagationPolicy", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "in": "query", + "name": "resourceVersion", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "in": "query", + "name": "resourceVersionMatch", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "in": "query", + "name": "timeoutSeconds", + "schema": { + "type": "integer", + "uniqueItems": true + } + } + ], + "requestBody": { + "content": { + "*/*": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" + } + } + } + }, + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + } + }, + "description": "OK" + }, + "401": { + "description": "Unauthorized" + } + }, + "tags": [ + "networking_v1alpha1" + ], + "x-kubernetes-action": "deletecollection", + "x-kubernetes-group-version-kind": { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + }, + "get": { + "description": "list or watch objects of kind ClusterCIDR", + "operationId": "listNetworkingV1alpha1ClusterCIDR", + "parameters": [ + { + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", + "in": "query", + "name": "allowWatchBookmarks", + "schema": { + "type": "boolean", + "uniqueItems": true + } + }, + { + "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "in": "query", + "name": "continue", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "in": "query", + "name": "fieldSelector", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "in": "query", + "name": "labelSelector", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "in": "query", + "name": "limit", + "schema": { + "type": "integer", + "uniqueItems": true + } + }, + { + "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "in": "query", + "name": "resourceVersion", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "in": "query", + "name": "resourceVersionMatch", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "in": "query", + "name": "timeoutSeconds", + "schema": { + "type": "integer", + "uniqueItems": true + } + }, + { + "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "in": "query", + "name": "watch", + "schema": { + "type": "boolean", + "uniqueItems": true + } + } + ], + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" + } + }, + "application/json;stream=watch": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" + } + }, + "application/vnd.kubernetes.protobuf;stream=watch": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDRList" + } + } + }, + "description": "OK" + }, + "401": { + "description": "Unauthorized" + } + }, + "tags": [ + "networking_v1alpha1" + ], + "x-kubernetes-action": "list", + "x-kubernetes-group-version-kind": { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + }, + "parameters": [ + { + "description": "If 'true', then the output is pretty printed.", + "in": "query", + "name": "pretty", + "schema": { + "type": "string", + "uniqueItems": true + } + } + ], + "post": { + "description": "create a ClusterCIDR", + "operationId": "createNetworkingV1alpha1ClusterCIDR", + "parameters": [ + { + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "in": "query", + "name": "dryRun", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "in": "query", + "name": "fieldManager", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "in": "query", + "name": "fieldValidation", + "schema": { + "type": "string", + "uniqueItems": true + } + } + ], + "requestBody": { + "content": { + "*/*": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + } + } + }, + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + } + }, + "description": "OK" + }, + "201": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + } + }, + "description": "Created" + }, + "202": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + } + }, + "description": "Accepted" + }, + "401": { + "description": "Unauthorized" + } + }, + "tags": [ + "networking_v1alpha1" + ], + "x-kubernetes-action": "post", + "x-kubernetes-group-version-kind": { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + } + }, + "/apis/networking.k8s.io/v1alpha1/clustercidrs/{name}": { + "delete": { + "description": "delete a ClusterCIDR", + "operationId": "deleteNetworkingV1alpha1ClusterCIDR", + "parameters": [ + { + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "in": "query", + "name": "dryRun", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "in": "query", + "name": "gracePeriodSeconds", + "schema": { + "type": "integer", + "uniqueItems": true + } + }, + { + "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "in": "query", + "name": "orphanDependents", + "schema": { + "type": "boolean", + "uniqueItems": true + } + }, + { + "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", + "in": "query", + "name": "propagationPolicy", + "schema": { + "type": "string", + "uniqueItems": true + } + } + ], + "requestBody": { + "content": { + "*/*": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions" + } + } + } + }, + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + } + }, + "description": "OK" + }, + "202": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status" + } + } + }, + "description": "Accepted" + }, + "401": { + "description": "Unauthorized" + } + }, + "tags": [ + "networking_v1alpha1" + ], + "x-kubernetes-action": "delete", + "x-kubernetes-group-version-kind": { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + }, + "get": { + "description": "read the specified ClusterCIDR", + "operationId": "readNetworkingV1alpha1ClusterCIDR", + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + } + }, + "description": "OK" + }, + "401": { + "description": "Unauthorized" + } + }, + "tags": [ + "networking_v1alpha1" + ], + "x-kubernetes-action": "get", + "x-kubernetes-group-version-kind": { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + }, + "parameters": [ + { + "description": "name of the ClusterCIDR", + "in": "path", + "name": "name", + "required": true, + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "If 'true', then the output is pretty printed.", + "in": "query", + "name": "pretty", + "schema": { + "type": "string", + "uniqueItems": true + } + } + ], + "patch": { + "description": "partially update the specified ClusterCIDR", + "operationId": "patchNetworkingV1alpha1ClusterCIDR", + "parameters": [ + { + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "in": "query", + "name": "dryRun", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", + "in": "query", + "name": "fieldManager", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "in": "query", + "name": "fieldValidation", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", + "in": "query", + "name": "force", + "schema": { + "type": "boolean", + "uniqueItems": true + } + } + ], + "requestBody": { + "content": { + "application/apply-patch+yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + }, + "application/json-patch+json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + }, + "application/merge-patch+json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + }, + "application/strategic-merge-patch+json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch" + } + } + } + }, + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + } + }, + "description": "OK" + }, + "201": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + } + }, + "description": "Created" + }, + "401": { + "description": "Unauthorized" + } + }, + "tags": [ + "networking_v1alpha1" + ], + "x-kubernetes-action": "patch", + "x-kubernetes-group-version-kind": { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + }, + "put": { + "description": "replace the specified ClusterCIDR", + "operationId": "replaceNetworkingV1alpha1ClusterCIDR", + "parameters": [ + { + "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "in": "query", + "name": "dryRun", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "in": "query", + "name": "fieldManager", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields, provided that the `ServerSideFieldValidation` feature gate is also enabled. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23 and is the default behavior when the `ServerSideFieldValidation` feature gate is disabled. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default when the `ServerSideFieldValidation` feature gate is enabled. - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.", + "in": "query", + "name": "fieldValidation", + "schema": { + "type": "string", + "uniqueItems": true + } + } + ], + "requestBody": { + "content": { + "*/*": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + } + } + }, + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + } + }, + "description": "OK" + }, + "201": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.api.networking.v1alpha1.ClusterCIDR" + } + } + }, + "description": "Created" + }, + "401": { + "description": "Unauthorized" + } + }, + "tags": [ + "networking_v1alpha1" + ], + "x-kubernetes-action": "put", + "x-kubernetes-group-version-kind": { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + } + }, + "/apis/networking.k8s.io/v1alpha1/watch/clustercidrs": { + "get": { + "description": "watch individual changes to a list of ClusterCIDR. deprecated: use the 'watch' parameter with a list operation instead.", + "operationId": "watchNetworkingV1alpha1ClusterCIDRList", + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" + } + }, + "application/json;stream=watch": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" + } + }, + "application/vnd.kubernetes.protobuf;stream=watch": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" + } + } + }, + "description": "OK" + }, + "401": { + "description": "Unauthorized" + } + }, + "tags": [ + "networking_v1alpha1" + ], + "x-kubernetes-action": "watchlist", + "x-kubernetes-group-version-kind": { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + }, + "parameters": [ + { + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", + "in": "query", + "name": "allowWatchBookmarks", + "schema": { + "type": "boolean", + "uniqueItems": true + } + }, + { + "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "in": "query", + "name": "continue", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "in": "query", + "name": "fieldSelector", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "in": "query", + "name": "labelSelector", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "in": "query", + "name": "limit", + "schema": { + "type": "integer", + "uniqueItems": true + } + }, + { + "description": "If 'true', then the output is pretty printed.", + "in": "query", + "name": "pretty", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "in": "query", + "name": "resourceVersion", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "in": "query", + "name": "resourceVersionMatch", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "in": "query", + "name": "timeoutSeconds", + "schema": { + "type": "integer", + "uniqueItems": true + } + }, + { + "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "in": "query", + "name": "watch", + "schema": { + "type": "boolean", + "uniqueItems": true + } + } + ] + }, + "/apis/networking.k8s.io/v1alpha1/watch/clustercidrs/{name}": { + "get": { + "description": "watch changes to an object of kind ClusterCIDR. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.", + "operationId": "watchNetworkingV1alpha1ClusterCIDR", + "responses": { + "200": { + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" + } + }, + "application/json;stream=watch": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" + } + }, + "application/vnd.kubernetes.protobuf": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" + } + }, + "application/vnd.kubernetes.protobuf;stream=watch": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" + } + }, + "application/yaml": { + "schema": { + "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent" + } + } + }, + "description": "OK" + }, + "401": { + "description": "Unauthorized" + } + }, + "tags": [ + "networking_v1alpha1" + ], + "x-kubernetes-action": "watch", + "x-kubernetes-group-version-kind": { + "group": "networking.k8s.io", + "kind": "ClusterCIDR", + "version": "v1alpha1" + } + }, + "parameters": [ + { + "description": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.", + "in": "query", + "name": "allowWatchBookmarks", + "schema": { + "type": "boolean", + "uniqueItems": true + } + }, + { + "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "in": "query", + "name": "continue", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "in": "query", + "name": "fieldSelector", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "in": "query", + "name": "labelSelector", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "in": "query", + "name": "limit", + "schema": { + "type": "integer", + "uniqueItems": true + } + }, + { + "description": "name of the ClusterCIDR", + "in": "path", + "name": "name", + "required": true, + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "If 'true', then the output is pretty printed.", + "in": "query", + "name": "pretty", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "in": "query", + "name": "resourceVersion", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\n\nDefaults to unset", + "in": "query", + "name": "resourceVersionMatch", + "schema": { + "type": "string", + "uniqueItems": true + } + }, + { + "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "in": "query", + "name": "timeoutSeconds", + "schema": { + "type": "integer", + "uniqueItems": true + } + }, + { + "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "in": "query", + "name": "watch", + "schema": { + "type": "boolean", + "uniqueItems": true + } + } + ] + } + } +} diff --git a/api/openapi-spec/v3/apis__node.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__node.k8s.io__v1_openapi.json index 6540662e68f..549332ab366 100644 --- a/api/openapi-spec/v3/apis__node.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__node.k8s.io__v1_openapi.json @@ -523,6 +523,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -1161,6 +1166,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__policy__v1_openapi.json b/api/openapi-spec/v3/apis__policy__v1_openapi.json index b14a27baac0..bca53f16459 100644 --- a/api/openapi-spec/v3/apis__policy__v1_openapi.json +++ b/api/openapi-spec/v3/apis__policy__v1_openapi.json @@ -582,6 +582,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -1277,6 +1282,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__rbac.authorization.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__rbac.authorization.k8s.io__v1_openapi.json index 5e201854e6a..4a9b5ef640d 100644 --- a/api/openapi-spec/v3/apis__rbac.authorization.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__rbac.authorization.k8s.io__v1_openapi.json @@ -843,6 +843,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -1538,6 +1543,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__scheduling.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__scheduling.k8s.io__v1_openapi.json index 1aee1ac65cf..621ad821692 100644 --- a/api/openapi-spec/v3/apis__scheduling.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__scheduling.k8s.io__v1_openapi.json @@ -436,6 +436,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -1074,6 +1079,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__storage.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__storage.k8s.io__v1_openapi.json index f767836eac5..f6c625d7c2a 100644 --- a/api/openapi-spec/v3/apis__storage.k8s.io__v1_openapi.json +++ b/api/openapi-spec/v3/apis__storage.k8s.io__v1_openapi.json @@ -2275,6 +2275,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -2970,6 +2975,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/api/openapi-spec/v3/apis__storage.k8s.io__v1beta1_openapi.json b/api/openapi-spec/v3/apis__storage.k8s.io__v1beta1_openapi.json index 4ccfde61f3c..f43ed905b54 100644 --- a/api/openapi-spec/v3/apis__storage.k8s.io__v1beta1_openapi.json +++ b/api/openapi-spec/v3/apis__storage.k8s.io__v1beta1_openapi.json @@ -462,6 +462,11 @@ "kind": "DeleteOptions", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "DeleteOptions", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "DeleteOptions", @@ -1157,6 +1162,11 @@ "kind": "WatchEvent", "version": "v1" }, + { + "group": "networking.k8s.io", + "kind": "WatchEvent", + "version": "v1alpha1" + }, { "group": "networking.k8s.io", "kind": "WatchEvent", diff --git a/cmd/cloud-controller-manager/.import-restrictions b/cmd/cloud-controller-manager/.import-restrictions index 151e8744804..695312b7312 100644 --- a/cmd/cloud-controller-manager/.import-restrictions +++ b/cmd/cloud-controller-manager/.import-restrictions @@ -39,4 +39,5 @@ rules: - k8s.io/kubernetes/pkg/util/taints - k8s.io/kubernetes/pkg/proxy/util - k8s.io/kubernetes/pkg/proxy/util/testing + - k8s.io/kubernetes/pkg/util/slice - k8s.io/kubernetes/pkg/util/sysctl \ No newline at end of file diff --git a/cmd/cloud-controller-manager/nodeipamcontroller.go b/cmd/cloud-controller-manager/nodeipamcontroller.go index b03a9cd4a09..bcd6211342f 100644 --- a/cmd/cloud-controller-manager/nodeipamcontroller.go +++ b/cmd/cloud-controller-manager/nodeipamcontroller.go @@ -26,6 +26,8 @@ import ( "net" "strings" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/informers/networking/v1alpha1" cloudprovider "k8s.io/cloud-provider" "k8s.io/cloud-provider/app" cloudcontrollerconfig "k8s.io/cloud-provider/app/config" @@ -36,6 +38,7 @@ import ( nodeipamcontroller "k8s.io/kubernetes/pkg/controller/nodeipam" nodeipamconfig "k8s.io/kubernetes/pkg/controller/nodeipam/config" "k8s.io/kubernetes/pkg/controller/nodeipam/ipam" + "k8s.io/kubernetes/pkg/features" netutils "k8s.io/utils/net" ) @@ -120,8 +123,14 @@ func startNodeIpamController(initContext app.ControllerInitContext, ccmConfig *c return nil, false, err } + var clusterCIDRInformer v1alpha1.ClusterCIDRInformer + if utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRRangeAllocator) { + clusterCIDRInformer = ctx.InformerFactory.Networking().V1alpha1().ClusterCIDRs() + } + nodeIpamController, err := nodeipamcontroller.NewNodeIpamController( ctx.InformerFactory.Core().V1().Nodes(), + clusterCIDRInformer, cloud, ctx.ClientBuilder.ClientOrDie(initContext.ClientName), clusterCIDRs, diff --git a/cmd/kube-apiserver/app/aggregator.go b/cmd/kube-apiserver/app/aggregator.go index 2466dcc21be..1ccf17581da 100644 --- a/cmd/kube-apiserver/app/aggregator.go +++ b/cmd/kube-apiserver/app/aggregator.go @@ -260,6 +260,7 @@ var apiVersionPriorities = map[schema.GroupVersion]priority{ {Group: "batch", Version: "v2alpha1"}: {group: 17400, version: 9}, {Group: "certificates.k8s.io", Version: "v1"}: {group: 17300, version: 15}, {Group: "networking.k8s.io", Version: "v1"}: {group: 17200, version: 15}, + {Group: "networking.k8s.io", Version: "v1alpha1"}: {group: 17200, version: 1}, {Group: "policy", Version: "v1"}: {group: 17100, version: 15}, {Group: "policy", Version: "v1beta1"}: {group: 17100, version: 9}, {Group: "rbac.authorization.k8s.io", Version: "v1"}: {group: 17000, version: 15}, diff --git a/cmd/kube-controller-manager/app/core.go b/cmd/kube-controller-manager/app/core.go index 7c0a5d40ac5..5bb31738d2d 100644 --- a/cmd/kube-controller-manager/app/core.go +++ b/cmd/kube-controller-manager/app/core.go @@ -27,7 +27,9 @@ import ( "strings" "time" + "k8s.io/client-go/informers/networking/v1alpha1" "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/features" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -153,8 +155,14 @@ func startNodeIpamController(ctx context.Context, controllerContext ControllerCo return nil, false, err } + var clusterCIDRInformer v1alpha1.ClusterCIDRInformer + if utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRRangeAllocator) { + clusterCIDRInformer = controllerContext.InformerFactory.Networking().V1alpha1().ClusterCIDRs() + } + nodeIpamController, err := nodeipamcontroller.NewNodeIpamController( controllerContext.InformerFactory.Core().V1().Nodes(), + clusterCIDRInformer, controllerContext.Cloud, controllerContext.ClientBuilder.ClientOrDie("node-controller"), clusterCIDRs, diff --git a/hack/lib/init.sh b/hack/lib/init.sh index f31141b8757..a0ed9b050c3 100755 --- a/hack/lib/init.sh +++ b/hack/lib/init.sh @@ -92,6 +92,7 @@ events.k8s.io/v1 \ events.k8s.io/v1beta1 \ imagepolicy.k8s.io/v1alpha1 \ networking.k8s.io/v1 \ +networking.k8s.io/v1alpha1 \ networking.k8s.io/v1beta1 \ node.k8s.io/v1 \ node.k8s.io/v1alpha1 \ diff --git a/pkg/apis/core/v1/helper/helpers.go b/pkg/apis/core/v1/helper/helpers.go index 1a7aa4458f3..fbc733d09cc 100644 --- a/pkg/apis/core/v1/helper/helpers.go +++ b/pkg/apis/core/v1/helper/helpers.go @@ -370,3 +370,62 @@ func ScopedResourceSelectorRequirementsAsSelector(ssr v1.ScopedResourceSelectorR selector = selector.Add(*r) return selector, nil } + +// nodeSelectorRequirementsAsLabelRequirements converts the NodeSelectorRequirement +// type to a labels.Requirement type. +func nodeSelectorRequirementsAsLabelRequirements(nsr v1.NodeSelectorRequirement) (*labels.Requirement, error) { + var op selection.Operator + switch nsr.Operator { + case v1.NodeSelectorOpIn: + op = selection.In + case v1.NodeSelectorOpNotIn: + op = selection.NotIn + case v1.NodeSelectorOpExists: + op = selection.Exists + case v1.NodeSelectorOpDoesNotExist: + op = selection.DoesNotExist + case v1.NodeSelectorOpGt: + op = selection.GreaterThan + case v1.NodeSelectorOpLt: + op = selection.LessThan + default: + return nil, fmt.Errorf("%q is not a valid node selector operator", nsr.Operator) + } + return labels.NewRequirement(nsr.Key, op, nsr.Values) +} + +// NodeSelectorAsSelector converts the NodeSelector api type into a struct that +// implements labels.Selector +// Note: This function should be kept in sync with the selector methods in +// pkg/labels/selector.go +func NodeSelectorAsSelector(ns *v1.NodeSelector) (labels.Selector, error) { + if ns == nil { + return labels.Nothing(), nil + } + if len(ns.NodeSelectorTerms) == 0 { + return labels.Everything(), nil + } + var requirements []labels.Requirement + + for _, nsTerm := range ns.NodeSelectorTerms { + for _, expr := range nsTerm.MatchExpressions { + req, err := nodeSelectorRequirementsAsLabelRequirements(expr) + if err != nil { + return nil, err + } + requirements = append(requirements, *req) + } + + for _, field := range nsTerm.MatchFields { + req, err := nodeSelectorRequirementsAsLabelRequirements(field) + if err != nil { + return nil, err + } + requirements = append(requirements, *req) + } + } + + selector := labels.NewSelector() + selector = selector.Add(requirements...) + return selector, nil +} diff --git a/pkg/apis/networking/install/install.go b/pkg/apis/networking/install/install.go index 6863a7f7af6..c29708f2cb2 100644 --- a/pkg/apis/networking/install/install.go +++ b/pkg/apis/networking/install/install.go @@ -24,6 +24,7 @@ import ( "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/networking" "k8s.io/kubernetes/pkg/apis/networking/v1" + "k8s.io/kubernetes/pkg/apis/networking/v1alpha1" "k8s.io/kubernetes/pkg/apis/networking/v1beta1" ) @@ -36,5 +37,6 @@ func Install(scheme *runtime.Scheme) { utilruntime.Must(networking.AddToScheme(scheme)) utilruntime.Must(v1.AddToScheme(scheme)) utilruntime.Must(v1beta1.AddToScheme(scheme)) - utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion)) + utilruntime.Must(v1alpha1.AddToScheme(scheme)) + utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion, v1beta1.SchemeGroupVersion, v1alpha1.SchemeGroupVersion)) } diff --git a/pkg/apis/networking/register.go b/pkg/apis/networking/register.go index 81dad49b1de..486bf834bfe 100644 --- a/pkg/apis/networking/register.go +++ b/pkg/apis/networking/register.go @@ -52,6 +52,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &IngressList{}, &IngressClass{}, &IngressClassList{}, + &ClusterCIDR{}, + &ClusterCIDRList{}, ) return nil } diff --git a/pkg/apis/networking/types.go b/pkg/apis/networking/types.go index 43d2d83645a..d6adda6399c 100644 --- a/pkg/apis/networking/types.go +++ b/pkg/apis/networking/types.go @@ -583,3 +583,67 @@ type ServiceBackendPort struct { // +optional Number int32 } + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterCIDR represents a single configuration for per-Node Pod CIDR +// allocations when the MultiCIDRRangeAllocator is enabled (see the config for +// kube-controller-manager). A cluster may have any number of ClusterCIDR +// resources, all of which will be considered when allocating a CIDR for a +// Node. A ClusterCIDR is eligible to be used for a given Node when the node +// selector matches the node in question and has free CIDRs to allocate. In +// case of multiple matching ClusterCIDR resources, the allocator will attempt +// to break ties using internal heuristics, but any ClusterCIDR whose node +// selector matches the Node may be used. +type ClusterCIDR struct { + metav1.TypeMeta + metav1.ObjectMeta + + Spec ClusterCIDRSpec +} + +// ClusterCIDRSpec defines the desired state of ClusterCIDR. +type ClusterCIDRSpec struct { + // NodeSelector defines which nodes the config is applicable to. + // An empty or nil NodeSelector selects all nodes. + // This field is immutable. + // +optional + NodeSelector *api.NodeSelector + + // PerNodeHostBits defines the number of host bits to be configured per node. + // A subnet mask determines how much of the address is used for network bits + // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the + // address into 24 bits for the network portion and 8 bits for the host portion. + // To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). + // Minimum value is 4 (16 IPs). + // This field is immutable. + // +required + PerNodeHostBits int32 + + // IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). + // At least one of IPv4 and IPv6 must be specified. + // This field is immutable. + // +optional + IPv4 string + + // IPv6 defines an IPv6 IP block in CIDR notation(e.g. "fd12:3456:789a:1::/64"). + // At least one of IPv4 and IPv6 must be specified. + // This field is immutable. + // +optional + IPv6 string +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterCIDRList contains a list of ClusterCIDRs. +type ClusterCIDRList struct { + metav1.TypeMeta + + // +optional + metav1.ListMeta + + // Items is the list of ClusterCIDRs. + Items []ClusterCIDR +} diff --git a/pkg/apis/networking/v1alpha1/defaults.go b/pkg/apis/networking/v1alpha1/defaults.go new file mode 100644 index 00000000000..c0de82c4bb6 --- /dev/null +++ b/pkg/apis/networking/v1alpha1/defaults.go @@ -0,0 +1,25 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func addDefaultingFuncs(scheme *runtime.Scheme) error { + return RegisterDefaults(scheme) +} diff --git a/pkg/apis/networking/v1alpha1/doc.go b/pkg/apis/networking/v1alpha1/doc.go new file mode 100644 index 00000000000..8f7931ecefc --- /dev/null +++ b/pkg/apis/networking/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=k8s.io/kubernetes/pkg/apis/networking +// +k8s:conversion-gen-external-types=k8s.io/api/networking/v1alpha1 +// +k8s:defaulter-gen=TypeMeta +// +k8s:defaulter-gen-input=k8s.io/api/networking/v1alpha1 +// +groupName=networking.k8s.io + +package v1alpha1 // import "k8s.io/kubernetes/pkg/apis/networking/v1alpha1" diff --git a/pkg/apis/networking/v1alpha1/register.go b/pkg/apis/networking/v1alpha1/register.go new file mode 100644 index 00000000000..729909ba9a2 --- /dev/null +++ b/pkg/apis/networking/v1alpha1/register.go @@ -0,0 +1,45 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package. +const GroupName = "networking.k8s.io" + +// SchemeGroupVersion is group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + localSchemeBuilder = &networkingv1alpha1.SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addDefaultingFuncs) +} diff --git a/pkg/apis/networking/v1alpha1/zz_generated.conversion.go b/pkg/apis/networking/v1alpha1/zz_generated.conversion.go new file mode 100644 index 00000000000..01a5d2d1e89 --- /dev/null +++ b/pkg/apis/networking/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,147 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/api/core/v1" + v1alpha1 "k8s.io/api/networking/v1alpha1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + core "k8s.io/kubernetes/pkg/apis/core" + networking "k8s.io/kubernetes/pkg/apis/networking" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*v1alpha1.ClusterCIDR)(nil), (*networking.ClusterCIDR)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(a.(*v1alpha1.ClusterCIDR), b.(*networking.ClusterCIDR), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*networking.ClusterCIDR)(nil), (*v1alpha1.ClusterCIDR)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(a.(*networking.ClusterCIDR), b.(*v1alpha1.ClusterCIDR), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.ClusterCIDRList)(nil), (*networking.ClusterCIDRList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(a.(*v1alpha1.ClusterCIDRList), b.(*networking.ClusterCIDRList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*networking.ClusterCIDRList)(nil), (*v1alpha1.ClusterCIDRList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(a.(*networking.ClusterCIDRList), b.(*v1alpha1.ClusterCIDRList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*v1alpha1.ClusterCIDRSpec)(nil), (*networking.ClusterCIDRSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(a.(*v1alpha1.ClusterCIDRSpec), b.(*networking.ClusterCIDRSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*networking.ClusterCIDRSpec)(nil), (*v1alpha1.ClusterCIDRSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(a.(*networking.ClusterCIDRSpec), b.(*v1alpha1.ClusterCIDRSpec), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(in *v1alpha1.ClusterCIDR, out *networking.ClusterCIDR, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR is an autogenerated conversion function. +func Convert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(in *v1alpha1.ClusterCIDR, out *networking.ClusterCIDR, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterCIDR_To_networking_ClusterCIDR(in, out, s) +} + +func autoConvert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(in *networking.ClusterCIDR, out *v1alpha1.ClusterCIDR, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + return nil +} + +// Convert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR is an autogenerated conversion function. +func Convert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(in *networking.ClusterCIDR, out *v1alpha1.ClusterCIDR, s conversion.Scope) error { + return autoConvert_networking_ClusterCIDR_To_v1alpha1_ClusterCIDR(in, out, s) +} + +func autoConvert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(in *v1alpha1.ClusterCIDRList, out *networking.ClusterCIDRList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]networking.ClusterCIDR)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList is an autogenerated conversion function. +func Convert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(in *v1alpha1.ClusterCIDRList, out *networking.ClusterCIDRList, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterCIDRList_To_networking_ClusterCIDRList(in, out, s) +} + +func autoConvert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(in *networking.ClusterCIDRList, out *v1alpha1.ClusterCIDRList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]v1alpha1.ClusterCIDR)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList is an autogenerated conversion function. +func Convert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(in *networking.ClusterCIDRList, out *v1alpha1.ClusterCIDRList, s conversion.Scope) error { + return autoConvert_networking_ClusterCIDRList_To_v1alpha1_ClusterCIDRList(in, out, s) +} + +func autoConvert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(in *v1alpha1.ClusterCIDRSpec, out *networking.ClusterCIDRSpec, s conversion.Scope) error { + out.NodeSelector = (*core.NodeSelector)(unsafe.Pointer(in.NodeSelector)) + out.PerNodeHostBits = in.PerNodeHostBits + out.IPv4 = in.IPv4 + out.IPv6 = in.IPv6 + return nil +} + +// Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec is an autogenerated conversion function. +func Convert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(in *v1alpha1.ClusterCIDRSpec, out *networking.ClusterCIDRSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_ClusterCIDRSpec_To_networking_ClusterCIDRSpec(in, out, s) +} + +func autoConvert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(in *networking.ClusterCIDRSpec, out *v1alpha1.ClusterCIDRSpec, s conversion.Scope) error { + out.NodeSelector = (*v1.NodeSelector)(unsafe.Pointer(in.NodeSelector)) + out.PerNodeHostBits = in.PerNodeHostBits + out.IPv4 = in.IPv4 + out.IPv6 = in.IPv6 + return nil +} + +// Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec is an autogenerated conversion function. +func Convert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(in *networking.ClusterCIDRSpec, out *v1alpha1.ClusterCIDRSpec, s conversion.Scope) error { + return autoConvert_networking_ClusterCIDRSpec_To_v1alpha1_ClusterCIDRSpec(in, out, s) +} diff --git a/pkg/apis/networking/v1alpha1/zz_generated.defaults.go b/pkg/apis/networking/v1alpha1/zz_generated.defaults.go new file mode 100644 index 00000000000..5070cb91b90 --- /dev/null +++ b/pkg/apis/networking/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,33 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/pkg/apis/networking/validation/validation.go b/pkg/apis/networking/validation/validation.go index 195b080b8a8..f0f7638ab49 100644 --- a/pkg/apis/networking/validation/validation.go +++ b/pkg/apis/networking/validation/validation.go @@ -20,6 +20,7 @@ import ( "fmt" "strings" + v1 "k8s.io/api/core/v1" apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation" pathvalidation "k8s.io/apimachinery/pkg/api/validation/path" unversionedvalidation "k8s.io/apimachinery/pkg/apis/meta/v1/validation" @@ -602,3 +603,89 @@ func allowInvalidWildcardHostRule(oldIngress *networking.Ingress) bool { } return false } + +// ValidateClusterCIDRName validates that the given name can be used as an +// ClusterCIDR name. +var ValidateClusterCIDRName = apimachineryvalidation.NameIsDNSLabel + +// ValidateClusterCIDR validates a ClusterCIDR. +func ValidateClusterCIDR(cc *networking.ClusterCIDR) field.ErrorList { + allErrs := apivalidation.ValidateObjectMeta(&cc.ObjectMeta, false, ValidateClusterCIDRName, field.NewPath("metadata")) + allErrs = append(allErrs, ValidateClusterCIDRSpec(&cc.Spec, field.NewPath("spec"))...) + return allErrs +} + +// ValidateClusterCIDRSpec validates ClusterCIDR Spec. +func ValidateClusterCIDRSpec(spec *networking.ClusterCIDRSpec, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + if spec.NodeSelector != nil { + allErrs = append(allErrs, apivalidation.ValidateNodeSelector(spec.NodeSelector, fldPath.Child("nodeSelector"))...) + } + + // Validate if CIDR is specified for at least one IP Family(IPv4/IPv6). + if spec.IPv4 == "" && spec.IPv6 == "" { + allErrs = append(allErrs, field.Required(fldPath, "one or both of `ipv4` and `ipv6` must be specified")) + return allErrs + } + + // Validate specified IPv4 CIDR and PerNodeHostBits. + if spec.IPv4 != "" { + allErrs = append(allErrs, validateCIDRConfig(spec.IPv4, spec.PerNodeHostBits, 32, v1.IPv4Protocol, fldPath)...) + } + + // Validate specified IPv6 CIDR and PerNodeHostBits. + if spec.IPv6 != "" { + allErrs = append(allErrs, validateCIDRConfig(spec.IPv6, spec.PerNodeHostBits, 128, v1.IPv6Protocol, fldPath)...) + } + + return allErrs +} + +func validateCIDRConfig(configCIDR string, perNodeHostBits, maxMaskSize int32, ipFamily v1.IPFamily, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + minPerNodeHostBits := int32(4) + + ip, ipNet, err := netutils.ParseCIDRSloppy(configCIDR) + if err != nil { + allErrs = append(allErrs, field.Invalid(fldPath.Child(string(ipFamily)), configCIDR, fmt.Sprintf("must be a valid CIDR: %s", configCIDR))) + return allErrs + } + + if ipFamily == v1.IPv4Protocol && !netutils.IsIPv4(ip) { + allErrs = append(allErrs, field.Invalid(fldPath.Child(string(ipFamily)), configCIDR, "must be a valid IPv4 CIDR")) + } + if ipFamily == v1.IPv6Protocol && !netutils.IsIPv6(ip) { + allErrs = append(allErrs, field.Invalid(fldPath.Child(string(ipFamily)), configCIDR, "must be a valid IPv6 CIDR")) + } + + // Validate PerNodeHostBits + maskSize, _ := ipNet.Mask.Size() + maxPerNodeHostBits := maxMaskSize - int32(maskSize) + + if perNodeHostBits < minPerNodeHostBits { + allErrs = append(allErrs, field.Invalid(fldPath.Child("perNodeHostBits"), perNodeHostBits, fmt.Sprintf("must be greater than or equal to %d", minPerNodeHostBits))) + } + if perNodeHostBits > maxPerNodeHostBits { + allErrs = append(allErrs, field.Invalid(fldPath.Child("perNodeHostBits"), perNodeHostBits, fmt.Sprintf("must be less than or equal to %d", maxPerNodeHostBits))) + } + return allErrs +} + +// ValidateClusterCIDRUpdate tests if an update to a ClusterCIDR is valid. +func ValidateClusterCIDRUpdate(update, old *networking.ClusterCIDR) field.ErrorList { + var allErrs field.ErrorList + allErrs = append(allErrs, apivalidation.ValidateObjectMetaUpdate(&update.ObjectMeta, &old.ObjectMeta, field.NewPath("metadata"))...) + allErrs = append(allErrs, validateClusterCIDRUpdateSpec(&update.Spec, &old.Spec, field.NewPath("spec"))...) + return allErrs +} + +func validateClusterCIDRUpdateSpec(update, old *networking.ClusterCIDRSpec, fldPath *field.Path) field.ErrorList { + var allErrs field.ErrorList + + allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.NodeSelector, old.NodeSelector, fldPath.Child("nodeSelector"))...) + allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.PerNodeHostBits, old.PerNodeHostBits, fldPath.Child("perNodeHostBits"))...) + allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.IPv4, old.IPv4, fldPath.Child("ipv4"))...) + allErrs = append(allErrs, apivalidation.ValidateImmutableField(update.IPv6, old.IPv6, fldPath.Child("ipv6"))...) + + return allErrs +} diff --git a/pkg/apis/networking/validation/validation_test.go b/pkg/apis/networking/validation/validation_test.go index 6f1f55314c4..5399426c626 100644 --- a/pkg/apis/networking/validation/validation_test.go +++ b/pkg/apis/networking/validation/validation_test.go @@ -1982,3 +1982,216 @@ func TestValidateIngressStatusUpdate(t *testing.T) { } } } + +func makeNodeSelector(key string, op api.NodeSelectorOperator, values []string) *api.NodeSelector { + return &api.NodeSelector{ + NodeSelectorTerms: []api.NodeSelectorTerm{ + { + MatchExpressions: []api.NodeSelectorRequirement{ + { + Key: key, + Operator: op, + Values: values, + }, + }, + }, + }, + } +} + +func makeClusterCIDR(perNodeHostBits int32, ipv4, ipv6 string, nodeSelector *api.NodeSelector) *networking.ClusterCIDR { + return &networking.ClusterCIDR{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + ResourceVersion: "9", + }, + Spec: networking.ClusterCIDRSpec{ + PerNodeHostBits: perNodeHostBits, + IPv4: ipv4, + IPv6: ipv6, + NodeSelector: nodeSelector, + }, + } +} + +func TestValidateClusterCIDR(t *testing.T) { + testCases := []struct { + name string + cc *networking.ClusterCIDR + expectErr bool + }{ + { + name: "valid SingleStack IPv4 ClusterCIDR", + cc: makeClusterCIDR(8, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: false, + }, + { + name: "valid SingleStack IPv4 ClusterCIDR, perNodeHostBits = maxPerNodeHostBits", + cc: makeClusterCIDR(16, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: false, + }, + { + name: "valid SingleStack IPv4 ClusterCIDR, perNodeHostBits > minPerNodeHostBits", + cc: makeClusterCIDR(4, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: false, + }, + { + name: "valid SingleStack IPv6 ClusterCIDR", + cc: makeClusterCIDR(8, "", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: false, + }, + { + name: "valid SingleStack IPv6 ClusterCIDR, perNodeHostBits = maxPerNodeHostBit", + cc: makeClusterCIDR(64, "", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: false, + }, + { + name: "valid SingleStack IPv6 ClusterCIDR, perNodeHostBits > minPerNodeHostBit", + cc: makeClusterCIDR(4, "", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: false, + }, + { + name: "valid SingleStack IPv6 ClusterCIDR perNodeHostBits=100", + cc: makeClusterCIDR(100, "", "fd00:1:1::/16", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: false, + }, + { + name: "valid DualStack ClusterCIDR", + cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: false, + }, + { + name: "valid DualStack ClusterCIDR, no NodeSelector", + cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", nil), + expectErr: false, + }, + // Failure cases. + { + name: "invalid ClusterCIDR, no IPv4 or IPv6 CIDR", + cc: makeClusterCIDR(8, "", "", nil), + expectErr: true, + }, + { + name: "invalid ClusterCIDR, invalid nodeSelector", + cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("NoUppercaseOrSpecialCharsLike=Equals", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: true, + }, + // IPv4 tests. + { + name: "invalid SingleStack IPv4 ClusterCIDR, invalid spec.IPv4", + cc: makeClusterCIDR(8, "test", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: true, + }, + { + name: "invalid Singlestack IPv4 ClusterCIDR, perNodeHostBits > maxPerNodeHostBits", + cc: makeClusterCIDR(100, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: true, + }, + { + name: "invalid SingleStack IPv4 ClusterCIDR, perNodeHostBits < minPerNodeHostBits", + cc: makeClusterCIDR(2, "10.1.0.0/16", "", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: true, + }, + // IPv6 tests. + { + name: "invalid SingleStack IPv6 ClusterCIDR, invalid spec.IPv6", + cc: makeClusterCIDR(8, "", "testv6", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: true, + }, + { + name: "invalid SingleStack IPv6 ClusterCIDR, valid IPv4 CIDR in spec.IPv6", + cc: makeClusterCIDR(8, "", "10.2.0.0/16", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: true, + }, + { + name: "invalid SingleStack IPv6 ClusterCIDR, invalid perNodeHostBits > maxPerNodeHostBits", + cc: makeClusterCIDR(12, "", "fd00::/120", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: true, + }, + { + name: "invalid SingleStack IPv6 ClusterCIDR, invalid perNodeHostBits < minPerNodeHostBits", + cc: makeClusterCIDR(3, "", "fd00::/120", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: true, + }, + // DualStack tests + { + name: "invalid DualStack ClusterCIDR, valid spec.IPv4, invalid spec.IPv6", + cc: makeClusterCIDR(8, "10.1.0.0/16", "testv6", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: true, + }, + { + name: "invalid DualStack ClusterCIDR, valid spec.IPv6, invalid spec.IPv4", + cc: makeClusterCIDR(8, "testv4", "fd00::/120", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: true, + }, + { + name: "invalid DualStack ClusterCIDR, invalid perNodeHostBits > maxPerNodeHostBits", + cc: makeClusterCIDR(24, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: true, + }, + { + name: "invalid DualStack ClusterCIDR, valid IPv6 CIDR in spec.IPv4", + cc: makeClusterCIDR(8, "fd00::/120", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: true, + }, + } + + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + err := ValidateClusterCIDR(testCase.cc) + if !testCase.expectErr && err != nil { + t.Errorf("ValidateClusterCIDR(%+v) must be successful for test '%s', got %v", testCase.cc, testCase.name, err) + } + if testCase.expectErr && err == nil { + t.Errorf("ValidateClusterCIDR(%+v) must return an error for test: %s, but got nil", testCase.cc, testCase.name) + } + }) + } +} + +func TestValidateClusterConfigUpdate(t *testing.T) { + oldCCC := makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})) + + testCases := []struct { + name string + cc *networking.ClusterCIDR + expectErr bool + }{ + { + name: "Successful update, no changes to ClusterCIDR.Spec", + cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: false, + }, + { + name: "Failed update, update spec.PerNodeHostBits", + cc: makeClusterCIDR(12, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: true, + }, + { + name: "Failed update, update spec.IPv4", + cc: makeClusterCIDR(8, "10.2.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: true, + }, + { + name: "Failed update, update spec.IPv6", + cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:2:/112", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"})), + expectErr: true, + }, + { + name: "Failed update, update spec.NodeSelector", + cc: makeClusterCIDR(8, "10.1.0.0/16", "fd00:1:1::/64", makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar2"})), + expectErr: true, + }, + } + for _, testCase := range testCases { + t.Run(testCase.name, func(t *testing.T) { + err := ValidateClusterCIDRUpdate(testCase.cc, oldCCC) + if !testCase.expectErr && err != nil { + t.Errorf("ValidateClusterCIDRUpdate(%+v) must be successful for test '%s', got %v", testCase.cc, testCase.name, err) + } + if testCase.expectErr && err == nil { + t.Errorf("ValidateClusterCIDRUpdate(%+v) must return error for test: %s, but got nil", testCase.cc, testCase.name) + } + }) + } +} diff --git a/pkg/apis/networking/zz_generated.deepcopy.go b/pkg/apis/networking/zz_generated.deepcopy.go index 34df7bcacb8..bc5421027cd 100644 --- a/pkg/apis/networking/zz_generated.deepcopy.go +++ b/pkg/apis/networking/zz_generated.deepcopy.go @@ -28,6 +28,87 @@ import ( core "k8s.io/kubernetes/pkg/apis/core" ) +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCIDR) DeepCopyInto(out *ClusterCIDR) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDR. +func (in *ClusterCIDR) DeepCopy() *ClusterCIDR { + if in == nil { + return nil + } + out := new(ClusterCIDR) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterCIDR) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCIDRList) DeepCopyInto(out *ClusterCIDRList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterCIDR, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRList. +func (in *ClusterCIDRList) DeepCopy() *ClusterCIDRList { + if in == nil { + return nil + } + out := new(ClusterCIDRList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterCIDRList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCIDRSpec) DeepCopyInto(out *ClusterCIDRSpec) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(core.NodeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRSpec. +func (in *ClusterCIDRSpec) DeepCopy() *ClusterCIDRSpec { + if in == nil { + return nil + } + out := new(ClusterCIDRSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { *out = *in diff --git a/pkg/controller/nodeipam/ipam/cidr_allocator.go b/pkg/controller/nodeipam/ipam/cidr_allocator.go index 543a7797f13..4ca058eefcf 100644 --- a/pkg/controller/nodeipam/ipam/cidr_allocator.go +++ b/pkg/controller/nodeipam/ipam/cidr_allocator.go @@ -22,16 +22,18 @@ import ( "net" "time" - "k8s.io/klog/v2" - "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/util/wait" + utilfeature "k8s.io/apiserver/pkg/util/feature" informers "k8s.io/client-go/informers/core/v1" + networkinginformers "k8s.io/client-go/informers/networking/v1alpha1" clientset "k8s.io/client-go/kubernetes" cloudprovider "k8s.io/cloud-provider" + "k8s.io/klog/v2" + "k8s.io/kubernetes/pkg/features" ) // CIDRAllocatorType is the type of the allocator to use. @@ -41,6 +43,9 @@ const ( // RangeAllocatorType is the allocator that uses an internal CIDR // range allocator to do node CIDR range allocations. RangeAllocatorType CIDRAllocatorType = "RangeAllocator" + // MultiCIDRRangeAllocatorType is the allocator that uses an internal CIDR + // range allocator to do node CIDR range allocations. + MultiCIDRRangeAllocatorType CIDRAllocatorType = "MultiCIDRRangeAllocator" // CloudAllocatorType is the allocator that uses cloud platform // support to do node CIDR range allocations. CloudAllocatorType CIDRAllocatorType = "CloudAllocator" @@ -87,7 +92,7 @@ type CIDRAllocator interface { // CIDR if it doesn't currently have one or mark the CIDR as used if // the node already have one. AllocateOrOccupyCIDR(node *v1.Node) error - // ReleaseCIDR releases the CIDR of the removed node + // ReleaseCIDR releases the CIDR of the removed node. ReleaseCIDR(node *v1.Node) error // Run starts all the working logic of the allocator. Run(stopCh <-chan struct{}) @@ -96,18 +101,25 @@ type CIDRAllocator interface { // CIDRAllocatorParams is parameters that's required for creating new // cidr range allocator. type CIDRAllocatorParams struct { - // ClusterCIDRs is list of cluster cidrs + // ClusterCIDRs is list of cluster cidrs. ClusterCIDRs []*net.IPNet - // ServiceCIDR is primary service cidr for cluster + // ServiceCIDR is primary service cidr for cluster. ServiceCIDR *net.IPNet - // SecondaryServiceCIDR is secondary service cidr for cluster + // SecondaryServiceCIDR is secondary service cidr for cluster. SecondaryServiceCIDR *net.IPNet - // NodeCIDRMaskSizes is list of node cidr mask sizes + // NodeCIDRMaskSizes is list of node cidr mask sizes. NodeCIDRMaskSizes []int } +// CIDRs are reserved, then node resource is patched with them. +// nodeReservedCIDRs holds the reservation info for a node. +type nodeReservedCIDRs struct { + allocatedCIDRs []*net.IPNet + nodeName string +} + // New creates a new CIDR range allocator. -func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, allocatorType CIDRAllocatorType, allocatorParams CIDRAllocatorParams) (CIDRAllocator, error) { +func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInformer informers.NodeInformer, clusterCIDRInformer networkinginformers.ClusterCIDRInformer, allocatorType CIDRAllocatorType, allocatorParams CIDRAllocatorParams) (CIDRAllocator, error) { nodeList, err := listNodes(kubeClient) if err != nil { return nil, err @@ -116,6 +128,12 @@ func New(kubeClient clientset.Interface, cloud cloudprovider.Interface, nodeInfo switch allocatorType { case RangeAllocatorType: return NewCIDRRangeAllocator(kubeClient, nodeInformer, allocatorParams, nodeList) + case MultiCIDRRangeAllocatorType: + if !utilfeature.DefaultFeatureGate.Enabled(features.MultiCIDRRangeAllocator) { + return nil, fmt.Errorf("invalid CIDR allocator type: %v, feature gate %v must be enabled", allocatorType, features.MultiCIDRRangeAllocator) + } + return NewMultiCIDRRangeAllocator(kubeClient, nodeInformer, clusterCIDRInformer, allocatorParams, nodeList, nil) + case CloudAllocatorType: return NewCloudCIDRAllocator(kubeClient, cloud, nodeInformer) default: @@ -144,3 +162,12 @@ func listNodes(kubeClient clientset.Interface) (*v1.NodeList, error) { } return nodeList, nil } + +// ipnetToStringList converts a slice of net.IPNet into a list of CIDR in string format +func ipnetToStringList(inCIDRs []*net.IPNet) []string { + outCIDRs := make([]string, len(inCIDRs)) + for idx, inCIDR := range inCIDRs { + outCIDRs[idx] = inCIDR.String() + } + return outCIDRs +} diff --git a/pkg/controller/nodeipam/ipam/multi_cidr_priority_queue.go b/pkg/controller/nodeipam/ipam/multi_cidr_priority_queue.go new file mode 100644 index 00000000000..1c3eedc7d17 --- /dev/null +++ b/pkg/controller/nodeipam/ipam/multi_cidr_priority_queue.go @@ -0,0 +1,140 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipam + +import ( + "math" + + cidrset "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/multicidrset" +) + +// A PriorityQueue implementation based on https://pkg.go.dev/container/heap#example-package-PriorityQueue + +// An PriorityQueueItem is something we manage in a priority queue. +type PriorityQueueItem struct { + clusterCIDR *cidrset.ClusterCIDR + // labelMatchCount is the first determinant of priority. + labelMatchCount int + // selectorString is a string representation of the labelSelector associated with the cidrSet. + selectorString string + // index is needed by update and is maintained by the heap.Interface methods. + index int // The index of the item in the heap. +} + +// A PriorityQueue implements heap.Interface and holds PriorityQueueItems. +type PriorityQueue []*PriorityQueueItem + +func (pq PriorityQueue) Len() int { return len(pq) } + +// Less compares the priority queue items, to store in a min heap. +// Less(i,j) == true denotes i has higher priority than j. +func (pq PriorityQueue) Less(i, j int) bool { + if pq[i].labelMatchCount != pq[j].labelMatchCount { + // P0: CidrSet with higher number of matching labels has the highest priority. + return pq[i].labelMatchCount > pq[j].labelMatchCount + } + + // If the count of matching labels is equal, compare the max allocatable pod CIDRs. + if pq[i].maxAllocatable() != pq[j].maxAllocatable() { + // P1: CidrSet with fewer allocatable pod CIDRs has higher priority. + return pq[i].maxAllocatable() < pq[j].maxAllocatable() + } + + // If the value of allocatable pod CIDRs is equal, compare the node mask size. + if pq[i].nodeMaskSize() != pq[j].nodeMaskSize() { + // P2: CidrSet with a PerNodeMaskSize having fewer IPs has higher priority. + // For example, `27` (32 IPs) picked before `25` (128 IPs). + return pq[i].nodeMaskSize() > pq[j].nodeMaskSize() + } + + // If the per node mask size are equal compare the CIDR labels. + if pq[i].selectorString != pq[j].selectorString { + // P3: CidrSet having label with lower alphanumeric value has higher priority. + return pq[i].selectorString < pq[j].selectorString + } + + // P4: CidrSet having an alpha-numerically smaller IP address value has a higher priority. + return pq[i].cidrLabel() < pq[j].cidrLabel() +} + +func (pq PriorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] + pq[i].index = i + pq[j].index = j +} + +func (pq *PriorityQueue) Push(x interface{}) { + n := len(*pq) + if item, ok := x.(*PriorityQueueItem); ok { + item.index = n + *pq = append(*pq, item) + } +} + +func (pq *PriorityQueue) Pop() interface{} { + old := *pq + n := len(old) + item := old[n-1] + old[n-1] = nil // avoid memory leak. + item.index = -1 // for safety. + *pq = old[0 : n-1] + return item +} + +// maxAllocatable computes the minimum value of the MaxCIDRs for a ClusterCIDR. +// It compares the MaxCIDRs for each CIDR family and returns the minimum. +// e.g. IPv4 - 10.0.0.0/16 PerNodeMaskSize: 24 MaxCIDRs = 256 +// IPv6 - ff:ff::/120 PerNodeMaskSize: 120 MaxCIDRs = 1 +// MaxAllocatable for this ClusterCIDR = 1 +func (pqi *PriorityQueueItem) maxAllocatable() int { + ipv4Allocatable := math.MaxInt + ipv6Allocatable := math.MaxInt + + if pqi.clusterCIDR.IPv4CIDRSet != nil { + ipv4Allocatable = pqi.clusterCIDR.IPv4CIDRSet.MaxCIDRs + } + + if pqi.clusterCIDR.IPv6CIDRSet != nil { + ipv6Allocatable = pqi.clusterCIDR.IPv6CIDRSet.MaxCIDRs + } + + if ipv4Allocatable < ipv6Allocatable { + return ipv4Allocatable + } + + return ipv6Allocatable +} + +// nodeMaskSize returns IPv4 NodeMaskSize if present, else returns IPv6 NodeMaskSize. +// Note the requirement: 32 - IPv4 NodeMaskSize == 128 - IPv6 NodeMaskSize +// Due to the above requirement it does not matter which NodeMaskSize we compare. +func (pqi *PriorityQueueItem) nodeMaskSize() int { + if pqi.clusterCIDR.IPv4CIDRSet != nil { + return pqi.clusterCIDR.IPv4CIDRSet.NodeMaskSize + } + + return pqi.clusterCIDR.IPv6CIDRSet.NodeMaskSize +} + +// cidrLabel returns IPv4 CIDR if present, else returns IPv6 CIDR. +func (pqi *PriorityQueueItem) cidrLabel() string { + if pqi.clusterCIDR.IPv4CIDRSet != nil { + return pqi.clusterCIDR.IPv4CIDRSet.Label + } + + return pqi.clusterCIDR.IPv6CIDRSet.Label +} diff --git a/pkg/controller/nodeipam/ipam/multi_cidr_priority_queue_test.go b/pkg/controller/nodeipam/ipam/multi_cidr_priority_queue_test.go new file mode 100644 index 00000000000..357592f6ba1 --- /dev/null +++ b/pkg/controller/nodeipam/ipam/multi_cidr_priority_queue_test.go @@ -0,0 +1,170 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipam + +import ( + "container/heap" + "testing" + + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/multicidrset" + utilnet "k8s.io/utils/net" +) + +func createTestPriorityQueueItem(name, cidr, selectorString string, labelMatchCount, perNodeHostBits int) *PriorityQueueItem { + _, clusterCIDR, _ := utilnet.ParseCIDRSloppy(cidr) + cidrSet, _ := multicidrset.NewMultiCIDRSet(clusterCIDR, perNodeHostBits) + + return &PriorityQueueItem{ + clusterCIDR: &multicidrset.ClusterCIDR{ + Name: name, + IPv4CIDRSet: cidrSet, + }, + labelMatchCount: labelMatchCount, + selectorString: selectorString, + } +} + +func TestPriorityQueue(t *testing.T) { + + pqi1 := createTestPriorityQueueItem("cidr1", "192.168.0.0/16", "foo=bar,name=test1", 1, 8) + pqi2 := createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8) + pqi3 := createTestPriorityQueueItem("cidr3", "172.16.0.0/16", "foo=bar,name=test3", 2, 8) + pqi4 := createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6) + pqi5 := createTestPriorityQueueItem("cidr5", "10.1.2.0/26", "foo=bar,name=test5", 2, 6) + pqi6 := createTestPriorityQueueItem("cidr6", "10.1.3.0/26", "abc=bar,name=test4", 2, 6) + + for _, testQueue := range []struct { + name string + items []*PriorityQueueItem + want *PriorityQueueItem + }{ + {"Test queue with single item", []*PriorityQueueItem{pqi1}, pqi1}, + {"Test queue with items having different labelMatchCount", []*PriorityQueueItem{pqi1, pqi2}, pqi2}, + {"Test queue with items having same labelMatchCount, different max Allocatable Pod CIDRs", []*PriorityQueueItem{pqi1, pqi2, pqi3}, pqi2}, + {"Test queue with items having same labelMatchCount, max Allocatable Pod CIDRs, different PerNodeMaskSize", []*PriorityQueueItem{pqi1, pqi2, pqi4}, pqi4}, + {"Test queue with items having same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, different labels", []*PriorityQueueItem{pqi1, pqi2, pqi4, pqi5}, pqi4}, + {"Test queue with items having same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, labels, different IP addresses", []*PriorityQueueItem{pqi1, pqi2, pqi4, pqi5, pqi6}, pqi4}, + } { + pq := make(PriorityQueue, 0) + for _, pqi := range testQueue.items { + heap.Push(&pq, pqi) + } + + got := heap.Pop(&pq) + + if got != testQueue.want { + t.Errorf("Error, wanted: %+v, got: %+v", testQueue.want, got) + } + } +} + +func TestLess(t *testing.T) { + + for _, testQueue := range []struct { + name string + items []*PriorityQueueItem + want bool + }{ + { + name: "different labelMatchCount, i higher priority than j", + items: []*PriorityQueueItem{ + createTestPriorityQueueItem("cidr1", "192.168.0.0/16", "foo=bar,name=test1", 2, 8), + createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 1, 8), + }, + want: true, + }, + { + name: "different labelMatchCount, i lower priority than j", + items: []*PriorityQueueItem{ + createTestPriorityQueueItem("cidr1", "192.168.0.0/16", "foo=bar,name=test1", 1, 8), + createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8), + }, + want: false, + }, + { + name: "same labelMatchCount, different max allocatable cidrs, i higher priority than j", + items: []*PriorityQueueItem{ + createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8), + createTestPriorityQueueItem("cidr3", "172.16.0.0/16", "foo=bar,name=test3", 2, 8), + }, + want: true, + }, + { + name: "same labelMatchCount, different max allocatable cidrs, i lower priority than j", + items: []*PriorityQueueItem{ + createTestPriorityQueueItem("cidr2", "10.1.0.0/16", "foo=bar,name=test2", 2, 8), + createTestPriorityQueueItem("cidr3", "172.16.0.0/24", "foo=bar,name=test3", 2, 8), + }, + want: false, + }, + { + name: "same labelMatchCount, max allocatable cidrs, different PerNodeMaskSize i higher priority than j", + items: []*PriorityQueueItem{ + createTestPriorityQueueItem("cidr2", "10.1.0.0/26", "foo=bar,name=test2", 2, 6), + createTestPriorityQueueItem("cidr4", "10.1.1.0/24", "abc=bar,name=test4", 2, 8), + }, + want: true, + }, + { + name: "same labelMatchCount, max allocatable cidrs, different PerNodeMaskSize i lower priority than j", + items: []*PriorityQueueItem{ + createTestPriorityQueueItem("cidr2", "10.1.0.0/24", "foo=bar,name=test2", 2, 8), + createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6), + }, + want: false, + }, + { + name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, different labels i higher priority than j", + items: []*PriorityQueueItem{ + createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6), + createTestPriorityQueueItem("cidr5", "10.1.2.0/26", "foo=bar,name=test5", 2, 6), + }, + want: true, + }, + { + name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, different labels i lower priority than j", + items: []*PriorityQueueItem{ + createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "xyz=bar,name=test4", 2, 6), + createTestPriorityQueueItem("cidr5", "10.1.2.0/26", "foo=bar,name=test5", 2, 6), + }, + want: false, + }, + { + name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, labels, different IP addresses i higher priority than j", + items: []*PriorityQueueItem{ + createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "abc=bar,name=test4", 2, 6), + createTestPriorityQueueItem("cidr6", "10.1.3.0/26", "abc=bar,name=test4", 2, 6), + }, + want: true, + }, + { + name: "same labelMatchCount, max Allocatable Pod CIDRs, PerNodeMaskSize, labels, different IP addresses i lower priority than j", + items: []*PriorityQueueItem{ + createTestPriorityQueueItem("cidr4", "10.1.1.0/26", "xyz=bar,name=test4", 2, 6), + createTestPriorityQueueItem("cidr6", "10.0.3.0/26", "abc=bar,name=test4", 2, 6), + }, + want: false, + }, + } { + var pq PriorityQueue + pq = testQueue.items + got := pq.Less(0, 1) + if got != testQueue.want { + t.Errorf("Error, wanted: %v, got: %v\nTest %q \npq[0]: %+v \npq[1]: %+v ", testQueue.want, got, testQueue.name, pq[0], pq[1]) + } + } +} diff --git a/pkg/controller/nodeipam/ipam/multi_cidr_range_allocator.go b/pkg/controller/nodeipam/ipam/multi_cidr_range_allocator.go new file mode 100644 index 00000000000..5fb96887df2 --- /dev/null +++ b/pkg/controller/nodeipam/ipam/multi_cidr_range_allocator.go @@ -0,0 +1,1205 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipam + +import ( + "container/heap" + "context" + "errors" + "fmt" + "math" + "math/rand" + "net" + "sync" + "time" + + "k8s.io/api/core/v1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + informers "k8s.io/client-go/informers/core/v1" + networkinginformers "k8s.io/client-go/informers/networking/v1alpha1" + clientset "k8s.io/client-go/kubernetes" + "k8s.io/client-go/kubernetes/scheme" + v1core "k8s.io/client-go/kubernetes/typed/core/v1" + corelisters "k8s.io/client-go/listers/core/v1" + networkinglisters "k8s.io/client-go/listers/networking/v1alpha1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" + "k8s.io/component-base/metrics/prometheus/ratelimiter" + nodeutil "k8s.io/component-helpers/node/util" + "k8s.io/klog/v2" + v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper" + cidrset "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/multicidrset" + controllerutil "k8s.io/kubernetes/pkg/controller/util/node" + "k8s.io/kubernetes/pkg/util/slice" + netutil "k8s.io/utils/net" +) + +const ( + defaultClusterCIDRKey = "kubernetes.io/clusterCIDR" + defaultClusterCIDRValue = "default" + defaultClusterCIDRName = "default-cluster-cidr" + defaultClusterCIDRAPIVersion = "networking.k8s.io/v1alpha1" + clusterCIDRFinalizer = "networking.k8s.io/cluster-cidr-finalizer" + ipv4MaxCIDRMask = 32 + ipv6MaxCIDRMask = 128 + minPerNodeHostBits = 4 +) + +// CIDRs are reserved, then node resource is patched with them. +// multiCIDRNodeReservedCIDRs holds the reservation info for a node. +type multiCIDRNodeReservedCIDRs struct { + nodeReservedCIDRs + clusterCIDR *cidrset.ClusterCIDR +} + +// multiCIDRNodeProcessingInfo tracks information related to current nodes in processing +type multiCIDRNodeProcessingInfo struct { + retries int +} + +type multiCIDRRangeAllocator struct { + client clientset.Interface + // nodeLister is able to list/get nodes and is populated by the shared informer passed to controller. + nodeLister corelisters.NodeLister + // nodesSynced returns true if the node shared informer has been synced at least once. + nodesSynced cache.InformerSynced + // clusterCIDRLister is able to list/get clustercidrs and is populated by the shared informer passed to controller. + clusterCIDRLister networkinglisters.ClusterCIDRLister + // clusterCIDRSynced returns true if the clustercidr shared informer has been synced at least once. + clusterCIDRSynced cache.InformerSynced + // Channel that is used to pass updating Nodes and their reserved CIDRs to the background. + // This increases a throughput of CIDR assignment by not blocking on long operations. + nodeCIDRUpdateChannel chan multiCIDRNodeReservedCIDRs + recorder record.EventRecorder + // queue is where incoming work is placed to de-dup and to allow "easy" + // rate limited requeues on errors + queue workqueue.RateLimitingInterface + + // lock guards nodesInProcessing and cidrMap to avoid races in CIDR allocation. + lock *sync.Mutex + // nodesInProcessing is a set of nodes that are currently being processed. + nodesInProcessing map[string]*multiCIDRNodeProcessingInfo + // cidrMap maps ClusterCIDR labels to internal ClusterCIDR objects. + cidrMap map[string][]*cidrset.ClusterCIDR +} + +// NewMultiCIDRRangeAllocator returns a CIDRAllocator to allocate CIDRs for node (one for each ip family). +// Caller must always pass in a list of existing nodes to the new allocator. +// NodeList is only nil in testing. +func NewMultiCIDRRangeAllocator( + client clientset.Interface, + nodeInformer informers.NodeInformer, + clusterCIDRInformer networkinginformers.ClusterCIDRInformer, + allocatorParams CIDRAllocatorParams, + nodeList *v1.NodeList, + testCIDRMap map[string][]*cidrset.ClusterCIDR, +) (CIDRAllocator, error) { + if client == nil { + klog.Fatalf("client is nil") + } + + eventBroadcaster := record.NewBroadcaster() + eventSource := v1.EventSource{ + Component: "multiCIDRRangeAllocator", + } + recorder := eventBroadcaster.NewRecorder(scheme.Scheme, eventSource) + eventBroadcaster.StartStructuredLogging(0) + klog.V(0).Infof("Started sending events to API Server. (EventSource = %v)", eventSource) + + eventBroadcaster.StartRecordingToSink( + &v1core.EventSinkImpl{ + Interface: client.CoreV1().Events(""), + }) + + if client.CoreV1().RESTClient().GetRateLimiter() != nil { + ratelimiter.RegisterMetricAndTrackRateLimiterUsage("multi_cidr_range_allocator", client.CoreV1().RESTClient().GetRateLimiter()) + } + + ra := &multiCIDRRangeAllocator{ + client: client, + nodeLister: nodeInformer.Lister(), + nodesSynced: nodeInformer.Informer().HasSynced, + clusterCIDRLister: clusterCIDRInformer.Lister(), + clusterCIDRSynced: clusterCIDRInformer.Informer().HasSynced, + nodeCIDRUpdateChannel: make(chan multiCIDRNodeReservedCIDRs, cidrUpdateQueueSize), + recorder: recorder, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "multi_cidr_range_allocator"), + lock: &sync.Mutex{}, + nodesInProcessing: map[string]*multiCIDRNodeProcessingInfo{}, + cidrMap: make(map[string][]*cidrset.ClusterCIDR, 0), + } + + // testCIDRMap is only set for testing purposes. + if len(testCIDRMap) > 0 { + ra.cidrMap = testCIDRMap + klog.Warningf("testCIDRMap should only be set for testing purposes, if this is seen in production logs, it might be a misconfiguration or a bug.") + } + + ccList, err := listClusterCIDRs(client) + if err != nil { + return nil, err + } + + if ccList == nil { + ccList = &networkingv1alpha1.ClusterCIDRList{} + } + createDefaultClusterCIDR(ccList, allocatorParams) + + // Regenerate the cidrMaps from the existing ClusterCIDRs. + for _, clusterCIDR := range ccList.Items { + klog.Infof("Regenerating existing ClusterCIDR: %v", clusterCIDR) + // Create an event for invalid ClusterCIDRs, do not crash on failures. + if err := ra.reconcileBootstrap(&clusterCIDR); err != nil { + klog.Errorf("Error while regenerating existing ClusterCIDR: %v", err) + ra.recorder.Event(&clusterCIDR, "Warning", "InvalidClusterCIDR encountered while regenerating ClusterCIDR during bootstrap.", err.Error()) + } + } + + clusterCIDRInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: createClusterCIDRHandler(ra.reconcileCreate), + DeleteFunc: createClusterCIDRHandler(ra.reconcileDelete), + }) + + if allocatorParams.ServiceCIDR != nil { + ra.filterOutServiceRange(allocatorParams.ServiceCIDR) + } else { + klog.V(0).Info("No Service CIDR provided. Skipping filtering out service addresses.") + } + + if allocatorParams.SecondaryServiceCIDR != nil { + ra.filterOutServiceRange(allocatorParams.SecondaryServiceCIDR) + } else { + klog.V(0).Info("No Secondary Service CIDR provided. Skipping filtering out secondary service addresses.") + } + + if nodeList != nil { + for _, node := range nodeList.Items { + if len(node.Spec.PodCIDRs) == 0 { + klog.V(4).Infof("Node %v has no CIDR, ignoring", node.Name) + continue + } + klog.V(0).Infof("Node %v has CIDR %s, occupying it in CIDR map", node.Name, node.Spec.PodCIDRs) + if err := ra.occupyCIDRs(&node); err != nil { + // This will happen if: + // 1. We find garbage in the podCIDRs field. Retrying is useless. + // 2. CIDR out of range: This means ClusterCIDR is not yet created + // This error will keep crashing controller-manager until the + // appropriate ClusterCIDR has been created + return nil, err + } + } + } + + nodeInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: controllerutil.CreateAddNodeHandler(ra.AllocateOrOccupyCIDR), + UpdateFunc: controllerutil.CreateUpdateNodeHandler(func(_, newNode *v1.Node) error { + // If the PodCIDRs list is not empty we either: + // - already processed a Node that already had CIDRs after NC restarted + // (cidr is marked as used), + // - already processed a Node successfully and allocated CIDRs for it + // (cidr is marked as used), + // - already processed a Node but we saw a "timeout" response and + // request eventually got through in this case we haven't released + // the allocated CIDRs (cidr is still marked as used). + // There's a possible error here: + // - NC sees a new Node and assigns CIDRs X,Y.. to it, + // - Update Node call fails with a timeout, + // - Node is updated by some other component, NC sees an update and + // assigns CIDRs A,B.. to the Node, + // - Both CIDR X,Y.. and CIDR A,B.. are marked as used in the local cache, + // even though Node sees only CIDR A,B.. + // The problem here is that in in-memory cache we see CIDR X,Y.. as marked, + // which prevents it from being assigned to any new node. The cluster + // state is correct. + // Restart of NC fixes the issue. + if len(newNode.Spec.PodCIDRs) == 0 { + return ra.AllocateOrOccupyCIDR(newNode) + } + return nil + }), + DeleteFunc: controllerutil.CreateDeleteNodeHandler(ra.ReleaseCIDR), + }) + + return ra, nil +} + +func (r *multiCIDRRangeAllocator) Run(stopCh <-chan struct{}) { + defer utilruntime.HandleCrash() + + klog.Infof("Starting Multi CIDR Range allocator") + defer klog.Infof("Shutting down Multi CIDR Range allocator") + + if !cache.WaitForNamedCacheSync("multi_cidr_range_allocator", stopCh, r.nodesSynced, r.clusterCIDRSynced) { + return + } + + // raWaitGroup is used to wait for the RangeAllocator to finish the goroutines. + var raWaitGroup sync.WaitGroup + + for i := 0; i < cidrUpdateWorkers; i++ { + raWaitGroup.Add(1) + go func() { + defer raWaitGroup.Done() + r.worker(stopCh) + }() + } + + raWaitGroup.Wait() + + <-stopCh +} + +func (r *multiCIDRRangeAllocator) worker(stopChan <-chan struct{}) { + for { + select { + case workItem, ok := <-r.nodeCIDRUpdateChannel: + if !ok { + klog.Error("Channel nodeCIDRUpdateChannel was unexpectedly closed") + return + } + r.lock.Lock() + if err := r.updateCIDRsAllocation(workItem); err == nil { + klog.V(3).Infof("Updated CIDR for %q", workItem.nodeName) + } else { + klog.Errorf("Error updating CIDR for %q: %v", workItem.nodeName, err) + if canRetry, timeout := r.retryParams(workItem.nodeName); canRetry { + klog.V(2).Infof("Retrying update for %q after %v", workItem.nodeName, timeout) + time.AfterFunc(timeout, func() { + // Requeue the failed node for update again. + r.nodeCIDRUpdateChannel <- workItem + }) + continue + } + klog.Errorf("Exceeded retry count for %q, dropping from queue", workItem.nodeName) + } + r.removeNodeFromProcessing(workItem.nodeName) + r.lock.Unlock() + case <-stopChan: + klog.Infof("MultiCIDRRangeAllocator worker is stopping.") + return + } + } +} + +// createClusterCIDRHandler creates clusterCIDR handler. +func createClusterCIDRHandler(f func(ccc *networkingv1alpha1.ClusterCIDR) error) func(obj interface{}) { + return func(originalObj interface{}) { + ccc := originalObj.(*networkingv1alpha1.ClusterCIDR) + if err := f(ccc); err != nil { + utilruntime.HandleError(fmt.Errorf("error while processing ClusterCIDR Add/Delete: %w", err)) + } + } +} + +// needToAddFinalizer checks if a finalizer should be added to the object. +func needToAddFinalizer(obj metav1.Object, finalizer string) bool { + return obj.GetDeletionTimestamp() == nil && !slice.ContainsString(obj.GetFinalizers(), + finalizer, nil) +} + +func (r *multiCIDRRangeAllocator) syncClusterCIDR(key string) error { + startTime := time.Now() + defer func() { + klog.V(4).Infof("Finished syncing clusterCIDR request %q (%v)", key, time.Since(startTime)) + }() + + clusterCIDR, err := r.clusterCIDRLister.Get(key) + if apierrors.IsNotFound(err) { + klog.V(3).Infof("clusterCIDR has been deleted: %v", key) + return nil + } + + if err != nil { + return err + } + + // Check the DeletionTimestamp to determine if object is under deletion. + if !clusterCIDR.DeletionTimestamp.IsZero() { + return r.reconcileDelete(clusterCIDR) + } + return r.reconcileCreate(clusterCIDR) +} + +func (r *multiCIDRRangeAllocator) insertNodeToProcessing(nodeName string) bool { + if _, found := r.nodesInProcessing[nodeName]; found { + return false + } + r.nodesInProcessing[nodeName] = &multiCIDRNodeProcessingInfo{} + return true +} + +func (r *multiCIDRRangeAllocator) removeNodeFromProcessing(nodeName string) { + klog.Infof("Removing node %q from processing", nodeName) + delete(r.nodesInProcessing, nodeName) +} + +func (r *multiCIDRRangeAllocator) retryParams(nodeName string) (bool, time.Duration) { + r.lock.Lock() + defer r.lock.Unlock() + + entry, ok := r.nodesInProcessing[nodeName] + if !ok { + klog.Errorf("Cannot get retryParams for %q as entry does not exist", nodeName) + return false, 0 + } + + count := entry.retries + 1 + if count > updateMaxRetries { + return false, 0 + } + r.nodesInProcessing[nodeName].retries = count + + return true, multiCIDRNodeUpdateRetryTimeout(count) +} + +func multiCIDRNodeUpdateRetryTimeout(count int) time.Duration { + timeout := updateRetryTimeout + for i := 0; i < count && timeout < maxUpdateRetryTimeout; i++ { + timeout *= 2 + } + if timeout > maxUpdateRetryTimeout { + timeout = maxUpdateRetryTimeout + } + return time.Duration(timeout.Nanoseconds()/2 + rand.Int63n(timeout.Nanoseconds())) +} + +// occupyCIDRs marks node.PodCIDRs[...] as used in allocator's tracked cidrSet. +func (r *multiCIDRRangeAllocator) occupyCIDRs(node *v1.Node) error { + + err := func(node *v1.Node) error { + + if len(node.Spec.PodCIDRs) == 0 { + return nil + } + + clusterCIDRList, err := r.orderedMatchingClusterCIDRs(node) + if err != nil { + return err + } + + for _, clusterCIDR := range clusterCIDRList { + occupiedCount := 0 + + for _, cidr := range node.Spec.PodCIDRs { + _, podCIDR, err := netutil.ParseCIDRSloppy(cidr) + if err != nil { + return fmt.Errorf("failed to parse CIDR %s on Node %v: %w", cidr, node.Name, err) + } + + klog.Infof("occupy CIDR %s for node: %s", cidr, node.Name) + + if err := r.Occupy(clusterCIDR, podCIDR); err != nil { + klog.V(3).Infof("Could not occupy cidr: %v, trying next range: %w", node.Spec.PodCIDRs, err) + break + } + + occupiedCount++ + } + + // Mark CIDRs as occupied only if the CCC is able to occupy all the node CIDRs. + if occupiedCount == len(node.Spec.PodCIDRs) { + clusterCIDR.AssociatedNodes[node.Name] = true + return nil + } + } + + return fmt.Errorf("could not occupy cidrs: %v, No matching ClusterCIDRs found", node.Spec.PodCIDRs) + }(node) + + r.removeNodeFromProcessing(node.Name) + return err +} + +// associatedCIDRSet returns the CIDRSet, based on the ip family of the CIDR. +func (r *multiCIDRRangeAllocator) associatedCIDRSet(clusterCIDR *cidrset.ClusterCIDR, cidr *net.IPNet) (*cidrset.MultiCIDRSet, error) { + switch { + case netutil.IsIPv4CIDR(cidr): + return clusterCIDR.IPv4CIDRSet, nil + case netutil.IsIPv6CIDR(cidr): + return clusterCIDR.IPv6CIDRSet, nil + default: + return nil, fmt.Errorf("invalid cidr: %v", cidr) + } +} + +// Occupy marks the CIDR as occupied in the allocatedCIDRMap of the cidrSet. +func (r *multiCIDRRangeAllocator) Occupy(clusterCIDR *cidrset.ClusterCIDR, cidr *net.IPNet) error { + currCIDRSet, err := r.associatedCIDRSet(clusterCIDR, cidr) + if err != nil { + return err + } + + if err := currCIDRSet.Occupy(cidr); err != nil { + return fmt.Errorf("unable to occupy cidr %v in cidrSet", cidr) + } + + return nil +} + +// Release marks the CIDR as free in the cidrSet used bitmap, +// Also removes the CIDR from the allocatedCIDRSet. +func (r *multiCIDRRangeAllocator) Release(clusterCIDR *cidrset.ClusterCIDR, cidr *net.IPNet) error { + currCIDRSet, err := r.associatedCIDRSet(clusterCIDR, cidr) + if err != nil { + return err + } + + if err := currCIDRSet.Release(cidr); err != nil { + klog.Infof("Unable to release cidr %v in cidrSet", cidr) + return err + } + + return nil +} + +// AllocateOrOccupyCIDR allocates a CIDR to the node if the node doesn't have a +// CIDR already allocated, occupies the CIDR and marks as used if the node +// already has a PodCIDR assigned. +// WARNING: If you're adding any return calls or defer any more work from this +// function you have to make sure to update nodesInProcessing properly with the +// disposition of the node when the work is done. +func (r *multiCIDRRangeAllocator) AllocateOrOccupyCIDR(node *v1.Node) error { + r.lock.Lock() + defer r.lock.Unlock() + + if node == nil { + return nil + } + + if !r.insertNodeToProcessing(node.Name) { + klog.Infof("Node %v is already in a process of CIDR assignment.", node.Name) + return nil + } + + if len(node.Spec.PodCIDRs) > 0 { + return r.occupyCIDRs(node) + } + + cidrs, clusterCIDR, err := r.prioritizedCIDRs(node) + if err != nil { + r.removeNodeFromProcessing(node.Name) + controllerutil.RecordNodeStatusChange(r.recorder, node, "CIDRNotAvailable") + return fmt.Errorf("failed to get cidrs for node %s", node.Name) + } + + if len(cidrs) == 0 { + r.removeNodeFromProcessing(node.Name) + controllerutil.RecordNodeStatusChange(r.recorder, node, "CIDRNotAvailable") + return fmt.Errorf("no cidrSets with matching labels found for node %s", node.Name) + } + + // allocate and queue the assignment. + allocated := multiCIDRNodeReservedCIDRs{ + nodeReservedCIDRs: nodeReservedCIDRs{ + nodeName: node.Name, + allocatedCIDRs: cidrs, + }, + clusterCIDR: clusterCIDR, + } + + return r.updateCIDRsAllocation(allocated) +} + +// ReleaseCIDR marks node.podCIDRs[...] as unused in our tracked cidrSets. +func (r *multiCIDRRangeAllocator) ReleaseCIDR(node *v1.Node) error { + r.lock.Lock() + defer r.lock.Unlock() + + if node == nil || len(node.Spec.PodCIDRs) == 0 { + return nil + } + + clusterCIDR, err := r.allocatedClusterCIDR(node) + if err != nil { + return err + } + + for _, cidr := range node.Spec.PodCIDRs { + _, podCIDR, err := netutil.ParseCIDRSloppy(cidr) + if err != nil { + return fmt.Errorf("failed to parse CIDR %q on Node %q: %w", cidr, node.Name, err) + } + + klog.Infof("release CIDR %s for node: %s", cidr, node.Name) + if err := r.Release(clusterCIDR, podCIDR); err != nil { + return fmt.Errorf("failed to release cidr %q from clusterCIDR %q for node %q: %w", cidr, clusterCIDR.Name, node.Name, err) + } + } + + // Remove the node from the ClusterCIDR AssociatedNodes. + delete(clusterCIDR.AssociatedNodes, node.Name) + + return nil +} + +// Marks all CIDRs with subNetMaskSize that belongs to serviceCIDR as used across all cidrs +// so that they won't be assignable. +func (r *multiCIDRRangeAllocator) filterOutServiceRange(serviceCIDR *net.IPNet) { + // Checks if service CIDR has a nonempty intersection with cluster + // CIDR. It is the case if either clusterCIDR contains serviceCIDR with + // clusterCIDR's Mask applied (this means that clusterCIDR contains + // serviceCIDR) or vice versa (which means that serviceCIDR contains + // clusterCIDR). + for _, clusterCIDRList := range r.cidrMap { + for _, clusterCIDR := range clusterCIDRList { + if err := r.occupyServiceCIDR(clusterCIDR, serviceCIDR); err != nil { + klog.Errorf("unable to occupy service CIDR: %w", err) + } + } + } +} + +func (r *multiCIDRRangeAllocator) occupyServiceCIDR(clusterCIDR *cidrset.ClusterCIDR, serviceCIDR *net.IPNet) error { + + cidrSet, err := r.associatedCIDRSet(clusterCIDR, serviceCIDR) + if err != nil { + return err + } + + cidr := cidrSet.ClusterCIDR + + // No need to occupy as Service CIDR doesn't intersect with the current ClusterCIDR. + if !cidr.Contains(serviceCIDR.IP.Mask(cidr.Mask)) && !serviceCIDR.Contains(cidr.IP.Mask(serviceCIDR.Mask)) { + return nil + } + + if err := r.Occupy(clusterCIDR, serviceCIDR); err != nil { + return fmt.Errorf("error filtering out service cidr %v from cluster cidr %v: %w", cidr, serviceCIDR, err) + } + + return nil +} + +// updateCIDRsAllocation assigns CIDR to Node and sends an update to the API server. +func (r *multiCIDRRangeAllocator) updateCIDRsAllocation(data multiCIDRNodeReservedCIDRs) error { + err := func(data multiCIDRNodeReservedCIDRs) error { + cidrsString := ipnetToStringList(data.allocatedCIDRs) + node, err := r.nodeLister.Get(data.nodeName) + if err != nil { + klog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDRs: %v", data.nodeName, err) + return err + } + + // if cidr list matches the proposed, + // then we possibly updated this node + // and just failed to ack the success. + if len(node.Spec.PodCIDRs) == len(data.allocatedCIDRs) { + match := true + for idx, cidr := range cidrsString { + if node.Spec.PodCIDRs[idx] != cidr { + match = false + break + } + } + if match { + klog.V(4).Infof("Node %q already has allocated CIDR %q. It matches the proposed one.", node.Name, data.allocatedCIDRs) + return nil + } + } + + // node has cidrs allocated, release the reserved. + if len(node.Spec.PodCIDRs) != 0 { + klog.Errorf("Node %q already has a CIDR allocated %q. Releasing the new one.", node.Name, node.Spec.PodCIDRs) + for _, cidr := range data.allocatedCIDRs { + if err := r.Release(data.clusterCIDR, cidr); err != nil { + return fmt.Errorf("failed to release cidr %s from clusterCIDR %s for node: %s: %w", cidr, data.clusterCIDR.Name, node.Name, err) + } + } + return nil + } + + // If we reached here, it means that the node has no CIDR currently assigned. So we set it. + for i := 0; i < cidrUpdateRetries; i++ { + if err = nodeutil.PatchNodeCIDRs(r.client, types.NodeName(node.Name), cidrsString); err == nil { + data.clusterCIDR.AssociatedNodes[node.Name] = true + klog.Infof("Set node %q PodCIDR to %q", node.Name, cidrsString) + return nil + } + } + // failed release back to the pool. + klog.Errorf("Failed to update node %q PodCIDR to %q after %d attempts: %v", node.Name, cidrsString, cidrUpdateRetries, err) + controllerutil.RecordNodeStatusChange(r.recorder, node, "CIDRAssignmentFailed") + // We accept the fact that we may leak CIDRs here. This is safer than releasing + // them in case when we don't know if request went through. + // NodeController restart will return all falsely allocated CIDRs to the pool. + if !apierrors.IsServerTimeout(err) { + klog.Errorf("CIDR assignment for node %q failed: %v. Releasing allocated CIDR", node.Name, err) + for _, cidr := range data.allocatedCIDRs { + if err := r.Release(data.clusterCIDR, cidr); err != nil { + return fmt.Errorf("failed to release cidr %q from clusterCIDR %q for node: %q: %w", cidr, data.clusterCIDR.Name, node.Name, err) + } + } + } + return err + }(data) + + r.removeNodeFromProcessing(data.nodeName) + return err +} + +// defaultNodeSelector generates a label with defaultClusterCIDRKey as the key and +// defaultClusterCIDRValue as the value, it is an internal nodeSelector matching all +// nodes. Only used if no ClusterCIDR selects the node. +func defaultNodeSelector() ([]byte, error) { + nodeSelector := &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: defaultClusterCIDRKey, + Operator: v1.NodeSelectorOpIn, + Values: []string{defaultClusterCIDRValue}, + }, + }, + }, + }, + } + + marshalledSelector, err := nodeSelector.Marshal() + if err != nil { + return nil, err + } + + return marshalledSelector, nil +} + +// prioritizedCIDRs returns a list of CIDRs to be allocated to the node. +// Returns 1 CIDR if single stack. +// Returns 2 CIDRs , 1 from each ip family if dual stack. +func (r *multiCIDRRangeAllocator) prioritizedCIDRs(node *v1.Node) ([]*net.IPNet, *cidrset.ClusterCIDR, error) { + clusterCIDRList, err := r.orderedMatchingClusterCIDRs(node) + if err != nil { + return nil, nil, fmt.Errorf("unable to get a clusterCIDR for node %s: %w", node.Name, err) + } + + for _, clusterCIDR := range clusterCIDRList { + cidrs := make([]*net.IPNet, 0) + if clusterCIDR.IPv4CIDRSet != nil { + cidr, err := r.allocateCIDR(clusterCIDR, clusterCIDR.IPv4CIDRSet) + if err != nil { + klog.V(3).Infof("unable to allocate IPv4 CIDR, trying next range: %w", err) + continue + } + cidrs = append(cidrs, cidr) + } + + if clusterCIDR.IPv6CIDRSet != nil { + cidr, err := r.allocateCIDR(clusterCIDR, clusterCIDR.IPv6CIDRSet) + if err != nil { + klog.V(3).Infof("unable to allocate IPv6 CIDR, trying next range: %w", err) + continue + } + cidrs = append(cidrs, cidr) + } + + return cidrs, clusterCIDR, nil + } + return nil, nil, fmt.Errorf("unable to get a clusterCIDR for node %s, no available CIDRs", node.Name) +} + +func (r *multiCIDRRangeAllocator) allocateCIDR(clusterCIDR *cidrset.ClusterCIDR, cidrSet *cidrset.MultiCIDRSet) (*net.IPNet, error) { + + for evaluated := 0; evaluated < cidrSet.MaxCIDRs; evaluated++ { + candidate, lastEvaluated, err := cidrSet.NextCandidate() + if err != nil { + return nil, err + } + + evaluated += lastEvaluated + + if r.cidrInAllocatedList(candidate) { + continue + } + + // Deep Check. + if r.cidrOverlapWithAllocatedList(candidate) { + continue + } + + // Mark the CIDR as occupied in the map. + if err := r.Occupy(clusterCIDR, candidate); err != nil { + return nil, err + } + // Increment the evaluated count metric. + cidrSet.UpdateEvaluatedCount(evaluated) + return candidate, nil + } + return nil, &cidrset.CIDRRangeNoCIDRsRemainingErr{ + CIDR: cidrSet.Label, + } +} + +func (r *multiCIDRRangeAllocator) cidrInAllocatedList(cidr *net.IPNet) bool { + for _, clusterCIDRList := range r.cidrMap { + for _, clusterCIDR := range clusterCIDRList { + cidrSet, _ := r.associatedCIDRSet(clusterCIDR, cidr) + if cidrSet != nil { + if ok := cidrSet.AllocatedCIDRMap[cidr.String()]; ok { + return true + } + } + } + } + return false +} + +func (r *multiCIDRRangeAllocator) cidrOverlapWithAllocatedList(cidr *net.IPNet) bool { + for _, clusterCIDRList := range r.cidrMap { + for _, clusterCIDR := range clusterCIDRList { + cidrSet, _ := r.associatedCIDRSet(clusterCIDR, cidr) + if cidrSet != nil { + for allocated := range cidrSet.AllocatedCIDRMap { + _, allocatedCIDR, _ := netutil.ParseCIDRSloppy(allocated) + if cidr.Contains(allocatedCIDR.IP.Mask(cidr.Mask)) || allocatedCIDR.Contains(cidr.IP.Mask(allocatedCIDR.Mask)) { + return true + } + } + } + } + } + return false +} + +// allocatedClusterCIDR returns the ClusterCIDR from which the node CIDRs were allocated. +func (r *multiCIDRRangeAllocator) allocatedClusterCIDR(node *v1.Node) (*cidrset.ClusterCIDR, error) { + clusterCIDRList, err := r.orderedMatchingClusterCIDRs(node) + if err != nil { + return nil, fmt.Errorf("unable to get a clusterCIDR for node %s: %w", node.Name, err) + } + + for _, clusterCIDR := range clusterCIDRList { + if ok := clusterCIDR.AssociatedNodes[node.Name]; ok { + return clusterCIDR, nil + } + } + return nil, fmt.Errorf("no clusterCIDR found associated with node: %s", node.Name) +} + +// orderedMatchingClusterCIDRs returns a list of all the ClusterCIDRs matching the node labels. +// The list is ordered with the following priority, which act as tie-breakers. +// P0: ClusterCIDR with higher number of matching labels has the highest priority. +// P1: ClusterCIDR having cidrSet with fewer allocatable Pod CIDRs has higher priority. +// P2: ClusterCIDR with a PerNodeMaskSize having fewer IPs has higher priority. +// P3: ClusterCIDR having label with lower alphanumeric value has higher priority. +// P4: ClusterCIDR with a cidrSet having a smaller IP address value has a higher priority. +func (r *multiCIDRRangeAllocator) orderedMatchingClusterCIDRs(node *v1.Node) ([]*cidrset.ClusterCIDR, error) { + matchingCIDRs := make([]*cidrset.ClusterCIDR, 0) + pq := make(PriorityQueue, 0) + + for label, clusterCIDRList := range r.cidrMap { + labelsMatch, matchCnt, err := r.matchCIDRLabels(node, []byte(label)) + if err != nil { + return nil, err + } + + if !labelsMatch { + continue + } + + for _, clusterCIDR := range clusterCIDRList { + pqItem := &PriorityQueueItem{ + clusterCIDR: clusterCIDR, + labelMatchCount: matchCnt, + selectorString: label, + } + + // Only push the CIDRsets which are not marked for termination. + if !clusterCIDR.Terminating { + heap.Push(&pq, pqItem) + } + } + } + + // Remove the ClusterCIDRs from the PriorityQueue. + // They arrive in descending order of matchCnt, + // if matchCnt is equal it is ordered in ascending order of labels. + for pq.Len() > 0 { + pqItem := heap.Pop(&pq).(*PriorityQueueItem) + matchingCIDRs = append(matchingCIDRs, pqItem.clusterCIDR) + } + + // Append the catch all CIDR config. + defaultSelector, err := defaultNodeSelector() + if err != nil { + return nil, err + } + if clusterCIDRList, ok := r.cidrMap[string(defaultSelector)]; ok { + matchingCIDRs = append(matchingCIDRs, clusterCIDRList...) + } + return matchingCIDRs, nil +} + +// matchCIDRLabels Matches the Node labels to CIDR Configs. +// Returns true only if all the labels match, also returns the count of matching labels. +func (r *multiCIDRRangeAllocator) matchCIDRLabels(node *v1.Node, label []byte) (bool, int, error) { + var labelSet labels.Set + var matchCnt int + + labelsMatch := false + selector := &v1.NodeSelector{} + err := selector.Unmarshal(label) + if err != nil { + klog.Errorf("Unable to unmarshal node selector for label %v: %v", label, err) + return labelsMatch, 0, err + } + + ls, err := v1helper.NodeSelectorAsSelector(selector) + if err != nil { + klog.Errorf("Unable to convert NodeSelector to labels.Selector: %v", err) + return labelsMatch, 0, err + } + reqs, selectable := ls.Requirements() + + labelSet = node.ObjectMeta.Labels + if selectable { + matchCnt = 0 + for _, req := range reqs { + if req.Matches(labelSet) { + matchCnt += 1 + } + } + if matchCnt == len(reqs) { + labelsMatch = true + } + } + return labelsMatch, matchCnt, err +} + +// Methods for handling ClusterCIDRs. + +// createDefaultClusterCIDR creates a default ClusterCIDR if --cluster-cidr has +// been configured. It converts the --cluster-cidr and --per-node-mask-size* flags +// to appropriate ClusterCIDR fields. +func createDefaultClusterCIDR(existingConfigList *networkingv1alpha1.ClusterCIDRList, + allocatorParams CIDRAllocatorParams) { + // Create default ClusterCIDR only if --cluster-cidr has been configured + if len(allocatorParams.ClusterCIDRs) == 0 { + return + } + + for _, clusterCIDR := range existingConfigList.Items { + if clusterCIDR.Name == defaultClusterCIDRName { + // Default ClusterCIDR already exists, no further action required. + klog.V(3).Infof("Default ClusterCIDR %s already exists", defaultClusterCIDRName) + return + } + } + + // Create a default ClusterCIDR as it is not already created. + defaultCIDRConfig := &networkingv1alpha1.ClusterCIDR{ + TypeMeta: metav1.TypeMeta{ + APIVersion: defaultClusterCIDRAPIVersion, + Kind: "ClusterCIDR", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: defaultClusterCIDRName, + }, + Spec: networkingv1alpha1.ClusterCIDRSpec{ + PerNodeHostBits: minPerNodeHostBits, + }, + } + + ipv4PerNodeHostBits := int32(math.MinInt32) + ipv6PerNodeHostBits := int32(math.MinInt32) + isDualstack := false + if len(allocatorParams.ClusterCIDRs) == 2 { + isDualstack = true + } + + for i, cidr := range allocatorParams.ClusterCIDRs { + if netutil.IsIPv4CIDR(cidr) { + defaultCIDRConfig.Spec.IPv4 = cidr.String() + ipv4PerNodeHostBits = ipv4MaxCIDRMask - int32(allocatorParams.NodeCIDRMaskSizes[i]) + if !isDualstack && ipv4PerNodeHostBits > minPerNodeHostBits { + defaultCIDRConfig.Spec.PerNodeHostBits = ipv4PerNodeHostBits + } + } else if netutil.IsIPv6CIDR(cidr) { + defaultCIDRConfig.Spec.IPv6 = cidr.String() + ipv6PerNodeHostBits = ipv6MaxCIDRMask - int32(allocatorParams.NodeCIDRMaskSizes[i]) + if !isDualstack && ipv6PerNodeHostBits > minPerNodeHostBits { + defaultCIDRConfig.Spec.PerNodeHostBits = ipv6PerNodeHostBits + } + } + } + + if isDualstack { + // In case of dualstack CIDRs, currently the default values for PerNodeMaskSize are + // 24 for IPv4 (PerNodeHostBits=8) and 64 for IPv6(PerNodeHostBits=64), there is no + // requirement for the PerNodeHostBits to be equal for IPv4 and IPv6, However with + // the introduction of ClusterCIDRs, we enforce the requirement for a single + // PerNodeHostBits field, thus we choose the minimum PerNodeHostBits value, to avoid + // overflow for IPv4 CIDRs. + if ipv4PerNodeHostBits >= minPerNodeHostBits && ipv4PerNodeHostBits <= ipv6PerNodeHostBits { + defaultCIDRConfig.Spec.PerNodeHostBits = ipv4PerNodeHostBits + } else if ipv6PerNodeHostBits >= minPerNodeHostBits && ipv6PerNodeHostBits <= ipv4MaxCIDRMask { + defaultCIDRConfig.Spec.PerNodeHostBits = ipv6PerNodeHostBits + } + } + + existingConfigList.Items = append(existingConfigList.Items, *defaultCIDRConfig) + + return +} + +// reconcileCreate handles create ClusterCIDR events. +func (r *multiCIDRRangeAllocator) reconcileCreate(clusterCIDR *networkingv1alpha1.ClusterCIDR) error { + r.lock.Lock() + defer r.lock.Unlock() + + if needToAddFinalizer(clusterCIDR, clusterCIDRFinalizer) { + klog.V(3).Infof("Creating ClusterCIDR %s", clusterCIDR.Name) + if err := r.createClusterCIDR(clusterCIDR, false); err != nil { + klog.Errorf("Unable to create ClusterCIDR %s : %v", clusterCIDR.Name, err) + return err + } + } + return nil +} + +// reconcileBootstrap handles creation of existing ClusterCIDRs. +// adds a finalizer if not already present. +func (r *multiCIDRRangeAllocator) reconcileBootstrap(clusterCIDR *networkingv1alpha1.ClusterCIDR) error { + r.lock.Lock() + defer r.lock.Unlock() + + terminating := false + // Create the ClusterCIDR only if the Spec has not been modified. + if clusterCIDR.Generation > 1 { + terminating = true + err := fmt.Errorf("CIDRs from ClusterCIDR %s will not be used for allocation as it was modified", clusterCIDR.Name) + klog.Errorf("ClusterCIDR Modified: %v", err) + } + + klog.V(2).Infof("Creating ClusterCIDR %s during bootstrap", clusterCIDR.Name) + if err := r.createClusterCIDR(clusterCIDR, terminating); err != nil { + klog.Errorf("Unable to create ClusterCIDR %s: %v", clusterCIDR.Name, err) + return err + } + + return nil +} + +// createClusterCIDR creates and maps the cidrSets in the cidrMap. +func (r *multiCIDRRangeAllocator) createClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, terminating bool) error { + nodeSelector, err := r.nodeSelectorKey(clusterCIDR) + if err != nil { + return fmt.Errorf("unable to get labelSelector key: %w", err) + } + + clusterCIDRSet, err := r.createClusterCIDRSet(clusterCIDR, terminating) + if err != nil { + return fmt.Errorf("invalid ClusterCIDR: %w", err) + } + + if clusterCIDRSet.IPv4CIDRSet == nil && clusterCIDRSet.IPv6CIDRSet == nil { + return errors.New("invalid ClusterCIDR: must provide IPv4 and/or IPv6 config") + } + + if err := r.mapClusterCIDRSet(r.cidrMap, nodeSelector, clusterCIDRSet); err != nil { + return fmt.Errorf("unable to map clusterCIDRSet: %w", err) + } + + // Make a copy so we don't mutate the shared informer cache. + updatedClusterCIDR := clusterCIDR.DeepCopy() + if needToAddFinalizer(clusterCIDR, clusterCIDRFinalizer) { + updatedClusterCIDR.ObjectMeta.Finalizers = append(clusterCIDR.ObjectMeta.Finalizers, clusterCIDRFinalizer) + } + + if updatedClusterCIDR.ResourceVersion == "" { + // Create is only used for creating default ClusterCIDR. + if _, err := r.client.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), updatedClusterCIDR, metav1.CreateOptions{}); err != nil { + klog.V(2).Infof("Error creating ClusterCIDR %s: %v", clusterCIDR.Name, err) + return err + } + } else { + // Update the ClusterCIDR object when called from reconcileCreate. + if _, err := r.client.NetworkingV1alpha1().ClusterCIDRs().Update(context.TODO(), updatedClusterCIDR, metav1.UpdateOptions{}); err != nil { + klog.V(2).Infof("Error creating ClusterCIDR %s: %v", clusterCIDR.Name, err) + return err + } + } + + return nil +} + +// createClusterCIDRSet creates and returns new cidrset.ClusterCIDR based on ClusterCIDR API object. +func (r *multiCIDRRangeAllocator) createClusterCIDRSet(clusterCIDR *networkingv1alpha1.ClusterCIDR, terminating bool) (*cidrset.ClusterCIDR, error) { + + clusterCIDRSet := &cidrset.ClusterCIDR{ + Name: clusterCIDR.Name, + AssociatedNodes: make(map[string]bool, 0), + Terminating: terminating, + } + + if clusterCIDR.Spec.IPv4 != "" { + _, ipv4CIDR, err := netutil.ParseCIDRSloppy(clusterCIDR.Spec.IPv4) + if err != nil { + return nil, fmt.Errorf("unable to parse provided IPv4 CIDR: %w", err) + } + clusterCIDRSet.IPv4CIDRSet, err = cidrset.NewMultiCIDRSet(ipv4CIDR, int(clusterCIDR.Spec.PerNodeHostBits)) + if err != nil { + return nil, fmt.Errorf("unable to create IPv4 cidrSet: %w", err) + } + } + + if clusterCIDR.Spec.IPv6 != "" { + _, ipv6CIDR, err := netutil.ParseCIDRSloppy(clusterCIDR.Spec.IPv6) + if err != nil { + return nil, fmt.Errorf("unable to parse provided IPv6 CIDR: %w", err) + } + clusterCIDRSet.IPv6CIDRSet, err = cidrset.NewMultiCIDRSet(ipv6CIDR, int(clusterCIDR.Spec.PerNodeHostBits)) + if err != nil { + return nil, fmt.Errorf("unable to create IPv6 cidrSet: %w", err) + } + } + + return clusterCIDRSet, nil +} + +// mapClusterCIDRSet maps the ClusterCIDRSet to the provided labelSelector in the cidrMap. +func (r *multiCIDRRangeAllocator) mapClusterCIDRSet(cidrMap map[string][]*cidrset.ClusterCIDR, nodeSelector string, clusterCIDRSet *cidrset.ClusterCIDR) error { + if clusterCIDRSet == nil { + return errors.New("invalid clusterCIDRSet, clusterCIDRSet cannot be nil") + } + + if clusterCIDRSetList, ok := cidrMap[nodeSelector]; ok { + cidrMap[nodeSelector] = append(clusterCIDRSetList, clusterCIDRSet) + } else { + cidrMap[nodeSelector] = []*cidrset.ClusterCIDR{clusterCIDRSet} + } + return nil +} + +// reconcileDelete deletes the ClusterCIDR object and removes the finalizer. +func (r *multiCIDRRangeAllocator) reconcileDelete(clusterCIDR *networkingv1alpha1.ClusterCIDR) error { + r.lock.Lock() + defer r.lock.Unlock() + + if slice.ContainsString(clusterCIDR.GetFinalizers(), clusterCIDRFinalizer, nil) { + if err := r.deleteClusterCIDR(clusterCIDR); err != nil { + return err + } + // Remove the finalizer as delete is successful. + cccCopy := clusterCIDR.DeepCopy() + cccCopy.ObjectMeta.Finalizers = slice.RemoveString(cccCopy.ObjectMeta.Finalizers, clusterCIDRFinalizer, nil) + if _, err := r.client.NetworkingV1alpha1().ClusterCIDRs().Update(context.TODO(), clusterCIDR, metav1.UpdateOptions{}); err != nil { + klog.V(2).Infof("Error removing finalizer for ClusterCIDR %s: %v", clusterCIDR.Name, err) + return err + } + klog.V(2).Infof("Removed finalizer for ClusterCIDR %s", clusterCIDR.Name) + } + return nil +} + +// deleteClusterCIDR Deletes and unmaps the ClusterCIDRs from the cidrMap. +func (r *multiCIDRRangeAllocator) deleteClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR) error { + + labelSelector, err := r.nodeSelectorKey(clusterCIDR) + if err != nil { + return fmt.Errorf("unable to delete cidr: %w", err) + } + + clusterCIDRSetList, ok := r.cidrMap[labelSelector] + if !ok { + klog.Infof("Label %s not found in CIDRMap, proceeding with delete", labelSelector) + return nil + } + + for i, clusterCIDRSet := range clusterCIDRSetList { + if clusterCIDRSet.Name != clusterCIDR.Name { + continue + } + + // Mark clusterCIDRSet as terminating. + clusterCIDRSet.Terminating = true + + // Allow deletion only if no nodes are associated with the ClusterCIDR. + if len(clusterCIDRSet.AssociatedNodes) > 0 { + return fmt.Errorf("ClusterCIDRSet %s marked as terminating, won't be deleted until all associated nodes are deleted", clusterCIDR.Name) + } + + // Remove the label from the map if this was the only clusterCIDR associated + // with it. + if len(clusterCIDRSetList) == 1 { + delete(r.cidrMap, labelSelector) + return nil + } + + clusterCIDRSetList = append(clusterCIDRSetList[:i], clusterCIDRSetList[i+1:]...) + r.cidrMap[labelSelector] = clusterCIDRSetList + return nil + } + klog.V(2).Info("clusterCIDR not found, proceeding with delete", "Name", clusterCIDR.Name, "label", labelSelector) + return nil +} + +func (r *multiCIDRRangeAllocator) nodeSelectorKey(clusterCIDR *networkingv1alpha1.ClusterCIDR) (string, error) { + var nodeSelector []byte + var err error + + if clusterCIDR.Spec.NodeSelector != nil { + nodeSelector, err = clusterCIDR.Spec.NodeSelector.Marshal() + } else { + nodeSelector, err = defaultNodeSelector() + } + + if err != nil { + return "", err + } + + return string(nodeSelector), nil +} + +func listClusterCIDRs(kubeClient clientset.Interface) (*networkingv1alpha1.ClusterCIDRList, error) { + var clusterCIDRList *networkingv1alpha1.ClusterCIDRList + // We must poll because apiserver might not be up. This error causes + // controller manager to restart. + startTimestamp := time.Now() + + // start with 2s, multiply the duration by 1.6 each step, 11 steps = 9.7 minutes + backoff := wait.Backoff{ + Duration: 2 * time.Second, + Factor: 1.6, + Steps: 11, + } + + if pollErr := wait.ExponentialBackoff(backoff, func() (bool, error) { + var err error + clusterCIDRList, err = kubeClient.NetworkingV1alpha1().ClusterCIDRs().List(context.TODO(), metav1.ListOptions{ + FieldSelector: fields.Everything().String(), + LabelSelector: labels.Everything().String(), + }) + if err != nil { + klog.Errorf("Failed to list all clusterCIDRs: %v", err) + return false, nil + } + return true, nil + }); pollErr != nil { + klog.Errorf("Failed to list clusterCIDRs (after %v)", time.Now().Sub(startTimestamp)) + return nil, fmt.Errorf("failed to list all clusterCIDRs in %v, cannot proceed without updating CIDR map", + apiserverStartupGracePeriod) + } + return clusterCIDRList, nil +} diff --git a/pkg/controller/nodeipam/ipam/multi_cidr_range_allocator_test.go b/pkg/controller/nodeipam/ipam/multi_cidr_range_allocator_test.go new file mode 100644 index 00000000000..6569b0a5d13 --- /dev/null +++ b/pkg/controller/nodeipam/ipam/multi_cidr_range_allocator_test.go @@ -0,0 +1,1868 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ipam + +import ( + "context" + "fmt" + "net" + "testing" + "time" + + "github.com/stretchr/testify/assert" + v1 "k8s.io/api/core/v1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/rand" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes/fake" + k8stesting "k8s.io/client-go/testing" + "k8s.io/client-go/tools/cache" + "k8s.io/kubernetes/pkg/controller" + cidrset "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/multicidrset" + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test" + "k8s.io/kubernetes/pkg/controller/testutil" + utilnet "k8s.io/utils/net" +) + +type testCaseMultiCIDR struct { + description string + fakeNodeHandler *testutil.FakeNodeHandler + allocatorParams CIDRAllocatorParams + testCIDRMap map[string][]*cidrset.ClusterCIDR + // key is index of the cidr allocated. + expectedAllocatedCIDR map[int]string + allocatedCIDRs map[int][]string + // should controller creation fail? + ctrlCreateFail bool +} + +type testClusterCIDR struct { + perNodeHostBits int32 + ipv4CIDR string + ipv6CIDR string + name string +} + +type testNodeSelectorRequirement struct { + key string + operator v1.NodeSelectorOperator + values []string +} + +func getTestNodeSelector(requirements []testNodeSelectorRequirement) string { + testNodeSelector := &v1.NodeSelector{} + + for _, nsr := range requirements { + nst := v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: nsr.key, + Operator: nsr.operator, + Values: nsr.values, + }, + }, + } + testNodeSelector.NodeSelectorTerms = append(testNodeSelector.NodeSelectorTerms, nst) + } + + marshalledSelector, _ := testNodeSelector.Marshal() + return string(marshalledSelector) +} + +func getTestCidrMap(testClusterCIDRMap map[string][]*testClusterCIDR) map[string][]*cidrset.ClusterCIDR { + cidrMap := make(map[string][]*cidrset.ClusterCIDR, 0) + for labels, testClusterCIDRList := range testClusterCIDRMap { + clusterCIDRList := make([]*cidrset.ClusterCIDR, 0) + for _, testClusterCIDR := range testClusterCIDRList { + clusterCIDR := &cidrset.ClusterCIDR{ + Name: testClusterCIDR.name, + AssociatedNodes: make(map[string]bool, 0), + } + + if testClusterCIDR.ipv4CIDR != "" { + _, testCIDR, _ := utilnet.ParseCIDRSloppy(testClusterCIDR.ipv4CIDR) + testCIDRSet, _ := cidrset.NewMultiCIDRSet(testCIDR, int(testClusterCIDR.perNodeHostBits)) + clusterCIDR.IPv4CIDRSet = testCIDRSet + } + if testClusterCIDR.ipv6CIDR != "" { + _, testCIDR, _ := utilnet.ParseCIDRSloppy(testClusterCIDR.ipv6CIDR) + testCIDRSet, _ := cidrset.NewMultiCIDRSet(testCIDR, int(testClusterCIDR.perNodeHostBits)) + clusterCIDR.IPv6CIDRSet = testCIDRSet + } + clusterCIDRList = append(clusterCIDRList, clusterCIDR) + } + cidrMap[labels] = clusterCIDRList + } + return cidrMap +} + +func getClusterCIDRList(nodeName string, cidrMap map[string][]*cidrset.ClusterCIDR) ([]*cidrset.ClusterCIDR, error) { + labelSelector := getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{nodeName}, + }, + }) + if clusterCIDRList, ok := cidrMap[labelSelector]; ok { + return clusterCIDRList, nil + } + return nil, fmt.Errorf("unable to get clusterCIDR for node: %s", nodeName) +} + +func TestMultiCIDROccupyPreExistingCIDR(t *testing.T) { + // all tests operate on a single node. + testCaseMultiCIDRs := []testCaseMultiCIDR{ + { + description: "success, single stack no node allocation", + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + }): { + { + name: "single-stack-cidr", + perNodeHostBits: 8, + ipv4CIDR: "10.10.0.0/16", + }, + }, + }), + allocatedCIDRs: nil, + expectedAllocatedCIDR: nil, + ctrlCreateFail: false, + }, + { + description: "success, dual stack no node allocation", + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + }): { + { + name: "dual-stack-cidr", + perNodeHostBits: 8, + ipv4CIDR: "10.10.0.0/16", + ipv6CIDR: "ace:cab:deca::/112", + }, + }, + }), + allocatedCIDRs: nil, + expectedAllocatedCIDR: nil, + ctrlCreateFail: false, + }, + { + description: "success, single stack correct node allocation", + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + }, + }, + Spec: v1.NodeSpec{ + PodCIDRs: []string{"10.10.0.1/24"}, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + }): { + { + name: "single-stack-cidr-allocated", + perNodeHostBits: 8, + ipv4CIDR: "10.10.0.0/16", + }, + }, + }), + allocatedCIDRs: nil, + expectedAllocatedCIDR: nil, + ctrlCreateFail: false, + }, + { + description: "success, dual stack both allocated correctly", + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + }, + }, + Spec: v1.NodeSpec{ + PodCIDRs: []string{"10.10.0.1/24", "ace:cab:deca::1/120"}, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + }): { + { + name: "dual-stack-cidr-allocated", + perNodeHostBits: 8, + ipv4CIDR: "10.10.0.0/16", + ipv6CIDR: "ace:cab:deca::/112", + }, + }, + }), + allocatedCIDRs: nil, + expectedAllocatedCIDR: nil, + ctrlCreateFail: false, + }, + // failure cases. + { + description: "fail, single stack incorrect node allocation", + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + }, + }, + Spec: v1.NodeSpec{ + PodCIDRs: []string{"172.10.0.1/24"}, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + }): { + { + name: "single-stack-cidr-allocate-fail", + perNodeHostBits: 8, + ipv4CIDR: "10.10.0.0/16", + }, + }, + }), + allocatedCIDRs: nil, + expectedAllocatedCIDR: nil, + ctrlCreateFail: true, + }, + { + description: "fail, dualstack node allocating from non existing cidr", + + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + }, + }, + Spec: v1.NodeSpec{ + PodCIDRs: []string{"10.10.0.1/24", "a00::/86"}, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + }): { + { + name: "dual-stack-cidr-allocate-fail", + perNodeHostBits: 8, + ipv4CIDR: "10.10.0.0/16", + ipv6CIDR: "ace:cab:deca::/112", + }, + }, + }), + allocatedCIDRs: nil, + expectedAllocatedCIDR: nil, + ctrlCreateFail: true, + }, + { + description: "fail, dualstack node allocating bad v4", + + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + }, + }, + Spec: v1.NodeSpec{ + PodCIDRs: []string{"172.10.0.1/24", "ace:cab:deca::1/120"}, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + }): { + { + name: "dual-stack-cidr-bad-v4", + perNodeHostBits: 8, + ipv4CIDR: "10.10.0.0/16", + ipv6CIDR: "ace:cab:deca::/112", + }, + }, + }), + allocatedCIDRs: nil, + expectedAllocatedCIDR: nil, + ctrlCreateFail: true, + }, + { + description: "fail, dualstack node allocating bad v6", + + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + }, + }, + Spec: v1.NodeSpec{ + PodCIDRs: []string{"10.10.0.1/24", "cdd::/86"}, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + }): { + { + name: "dual-stack-cidr-bad-v6", + perNodeHostBits: 8, + ipv4CIDR: "10.10.0.0/16", + ipv6CIDR: "ace:cab:deca::/112", + }, + }, + }), + allocatedCIDRs: nil, + expectedAllocatedCIDR: nil, + ctrlCreateFail: true, + }, + } + + // test function + for _, tc := range testCaseMultiCIDRs { + t.Run(tc.description, func(t *testing.T) { + // Initialize the range allocator. + fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler) + fakeClient := &fake.Clientset{} + fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc()) + fakeClusterCIDRInformer := fakeInformerFactory.Networking().V1alpha1().ClusterCIDRs() + nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{}) + + _, err := NewMultiCIDRRangeAllocator(tc.fakeNodeHandler, fakeNodeInformer, fakeClusterCIDRInformer, tc.allocatorParams, nodeList, tc.testCIDRMap) + if err == nil && tc.ctrlCreateFail { + t.Fatalf("creating range allocator was expected to fail, but it did not") + } + if err != nil && !tc.ctrlCreateFail { + t.Fatalf("creating range allocator was expected to succeed, but it did not") + } + }) + } +} + +func TestMultiCIDRAllocateOrOccupyCIDRSuccess(t *testing.T) { + // Non-parallel test (overrides global var). + oldNodePollInterval := nodePollInterval + nodePollInterval = test.NodePollInterval + defer func() { + nodePollInterval = oldNodePollInterval + }() + + // all tests operate on a single node. + testCaseMultiCIDRs := []testCaseMultiCIDR{ + { + description: "When there's no ServiceCIDR return first CIDR in range", + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + }): { + { + name: "single-stack-cidr", + perNodeHostBits: 2, + ipv4CIDR: "127.123.234.0/24", + }, + }, + }), + expectedAllocatedCIDR: map[int]string{ + 0: "127.123.234.0/30", + }, + }, + { + description: "Correctly filter out ServiceCIDR", + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: func() *net.IPNet { + _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") + return serviceCIDR + }(), + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{30}, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + }): { + { + name: "single-stack-cidr", + perNodeHostBits: 2, + ipv4CIDR: "127.123.234.0/24", + }, + }, + }), + // it should return first /30 CIDR after service range. + expectedAllocatedCIDR: map[int]string{ + 0: "127.123.234.64/30", + }, + }, + { + description: "Correctly ignore already allocated CIDRs", + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: func() *net.IPNet { + _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") + return serviceCIDR + }(), + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + }): { + { + name: "single-stack-cidr", + perNodeHostBits: 2, + ipv4CIDR: "127.123.234.0/24", + }, + }, + }), + allocatedCIDRs: map[int][]string{ + 0: {"127.123.234.64/30", "127.123.234.68/30", "127.123.234.72/30", "127.123.234.80/30"}, + }, + expectedAllocatedCIDR: map[int]string{ + 0: "127.123.234.76/30", + }, + }, + { + description: "Dualstack CIDRs, prioritize clusterCIDR with higher label match count", + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + "testLabel-1": "label1", + "testLabel-2": "label2", + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: func() *net.IPNet { + _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") + return serviceCIDR + }(), + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + }): { + { + name: "dual-stack-cidr-1", + perNodeHostBits: 8, + ipv4CIDR: "10.0.0.0/8", + ipv6CIDR: "ace:cab:deca::/112", + }, + }, + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + { + key: "testLabel-1", + operator: v1.NodeSelectorOpIn, + values: []string{"label1"}, + }, + }): { + { + name: "dual-stack-cidr-2", + perNodeHostBits: 8, + ipv4CIDR: "127.123.234.0/8", + ipv6CIDR: "abc:def:deca::/112", + }, + }, + }), + expectedAllocatedCIDR: map[int]string{ + 0: "127.0.0.0/24", + 1: "abc:def:deca::/120", + }, + }, + { + description: "Dualstack CIDRs, prioritize clusterCIDR with higher label match count, overlapping CIDRs", + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + "testLabel-1": "label1", + "testLabel-2": "label2", + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: func() *net.IPNet { + _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") + return serviceCIDR + }(), + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + }): { + { + name: "dual-stack-cidr-1", + perNodeHostBits: 8, + ipv4CIDR: "10.0.0.0/8", + ipv6CIDR: "ace:cab:deca::/112", + }, + }, + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + { + key: "testLabel-1", + operator: v1.NodeSelectorOpIn, + values: []string{"label1"}, + }, + }): { + { + name: "dual-stack-cidr-2", + perNodeHostBits: 8, + ipv4CIDR: "10.0.0.0/16", + ipv6CIDR: "ace:cab:deca::/112", + }, + }, + }), + allocatedCIDRs: map[int][]string{ + 0: {"10.0.0.0/24", "10.0.1.0/24", "10.0.2.0/24", "10.0.4.0/24"}, + 1: {"ace:cab:deca::/120"}, + }, + expectedAllocatedCIDR: map[int]string{ + 0: "10.0.3.0/24", + 1: "ace:cab:deca::100/120", + }, + }, + { + description: "Dualstack CIDRs, clusterCIDR with equal label match count, prioritize clusterCIDR with fewer allocatable pod CIDRs", + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + "testLabel-1": "label1", + "testLabel-2": "label2", + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: func() *net.IPNet { + _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") + return serviceCIDR + }(), + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + { + key: "testLabel-1", + operator: v1.NodeSelectorOpIn, + values: []string{"label1"}, + }, + }): { + { + name: "dual-stack-cidr-1", + perNodeHostBits: 8, + ipv4CIDR: "127.123.234.0/8", + ipv6CIDR: "abc:def:deca::/112", + }, + }, + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + { + key: "testLabel-2", + operator: v1.NodeSelectorOpIn, + values: []string{"label2"}, + }, + }): { + { + name: "dual-stack-cidr-2", + perNodeHostBits: 8, + ipv4CIDR: "10.0.0.0/24", + ipv6CIDR: "ace:cab:deca::/120", + }, + }, + }), + expectedAllocatedCIDR: map[int]string{ + 0: "10.0.0.0/24", + 1: "ace:cab:deca::/120", + }, + }, + { + description: "Dualstack CIDRs, clusterCIDR with equal label count, non comparable allocatable pod CIDRs, prioritize clusterCIDR with lower perNodeMaskSize", + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + "testLabel-1": "label1", + "testLabel-2": "label2", + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: func() *net.IPNet { + _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") + return serviceCIDR + }(), + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + { + key: "testLabel-1", + operator: v1.NodeSelectorOpIn, + values: []string{"label1"}, + }, + }): { + { + name: "dual-stack-cidr-1", + perNodeHostBits: 8, + ipv4CIDR: "127.123.234.0/23", + }, + }, + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + { + key: "testLabel-2", + operator: v1.NodeSelectorOpIn, + values: []string{"label2"}, + }, + }): { + { + name: "dual-stack-cidr-2", + perNodeHostBits: 8, + ipv4CIDR: "10.0.0.0/16", + ipv6CIDR: "ace:cab:deca::/120", + }, + }, + }), + expectedAllocatedCIDR: map[int]string{ + 0: "10.0.0.0/24", + 1: "ace:cab:deca::/120", + }, + }, + { + description: "Dualstack CIDRs, clusterCIDR with equal label count and allocatable pod CIDRs, prioritize clusterCIDR with lower perNodeMaskSize", + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + "testLabel-1": "label1", + "testLabel-2": "label2", + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: func() *net.IPNet { + _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") + return serviceCIDR + }(), + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + { + key: "testLabel-1", + operator: v1.NodeSelectorOpIn, + values: []string{"label1"}, + }, + }): { + { + name: "dual-stack-cidr-1", + perNodeHostBits: 8, + ipv4CIDR: "127.123.234.0/24", + ipv6CIDR: "abc:def:deca::/120", + }, + }, + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + { + key: "testLabel-2", + operator: v1.NodeSelectorOpIn, + values: []string{"label2"}, + }, + }): { + { + name: "dual-stack-cidr-2", + perNodeHostBits: 0, + ipv4CIDR: "10.0.0.0/32", + ipv6CIDR: "ace:cab:deca::/128", + }, + }, + }), + expectedAllocatedCIDR: map[int]string{ + 0: "10.0.0.0/32", + 1: "ace:cab:deca::/128", + }, + }, + { + description: "Dualstack CIDRs, clusterCIDR with equal label count, allocatable pod CIDRs and allocatable IPs, prioritize clusterCIDR with lower alphanumeric label", + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + "testLabel-1": "label1", + "testLabel-2": "label2", + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: func() *net.IPNet { + _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") + return serviceCIDR + }(), + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + { + key: "testLabel-1", + operator: v1.NodeSelectorOpIn, + values: []string{"label1"}, + }, + }): { + { + name: "dual-stack-cidr-1", + perNodeHostBits: 8, + ipv4CIDR: "127.123.234.0/16", + ipv6CIDR: "abc:def:deca::/112", + }, + }, + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + { + key: "testLabel-2", + operator: v1.NodeSelectorOpIn, + values: []string{"label2"}, + }, + }): { + { + name: "dual-stack-cidr-2", + perNodeHostBits: 8, + ipv4CIDR: "10.0.0.0/16", + ipv6CIDR: "ace:cab:deca::/112", + }, + }, + }), + expectedAllocatedCIDR: map[int]string{ + 0: "127.123.0.0/24", + 1: "abc:def:deca::/120", + }, + }, + { + description: "Dualstack CIDRs, clusterCIDR with equal label count, allocatable pod CIDRs, allocatable IPs and labels, prioritize clusterCIDR with smaller IP", + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + "testLabel-1": "label1", + "testLabel-2": "label2", + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: func() *net.IPNet { + _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("127.123.234.0/26") + return serviceCIDR + }(), + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + { + key: "testLabel-1", + operator: v1.NodeSelectorOpIn, + values: []string{"label1"}, + }, + }): { + { + name: "dual-stack-cidr-1", + perNodeHostBits: 8, + ipv4CIDR: "127.123.234.0/16", + ipv6CIDR: "abc:def:deca::/112", + }, + }, + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + { + key: "testLabel-1", + operator: v1.NodeSelectorOpIn, + values: []string{"label1"}, + }, + }): { + { + name: "dual-stack-cidr-2", + perNodeHostBits: 8, + ipv4CIDR: "10.0.0.0/16", + ipv6CIDR: "ace:cab:deca::/112", + }, + }, + }), + expectedAllocatedCIDR: map[int]string{ + 0: "10.0.0.0/24", + 1: "ace:cab:deca::/120", + }, + }, + { + description: "no double counting", + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "nodepool1", + }, + }, + Spec: v1.NodeSpec{ + PodCIDRs: []string{"10.10.0.0/24"}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + Labels: map[string]string{ + "testLabel-0": "nodepool1", + }, + }, + Spec: v1.NodeSpec{ + PodCIDRs: []string{"10.10.2.0/24"}, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node2", + Labels: map[string]string{ + "testLabel-0": "nodepool1", + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"nodepool1"}, + }, + }): { + { + name: "no-double-counting", + perNodeHostBits: 8, + ipv4CIDR: "10.10.0.0/22", + }, + }, + }), + expectedAllocatedCIDR: map[int]string{ + 0: "10.10.1.0/24", + }, + }, + } + + // test function + testFunc := func(tc testCaseMultiCIDR) { + nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{}) + // Initialize the range allocator. + + fakeClient := &fake.Clientset{} + fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc()) + fakeClusterCIDRInformer := fakeInformerFactory.Networking().V1alpha1().ClusterCIDRs() + allocator, err := NewMultiCIDRRangeAllocator(tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), fakeClusterCIDRInformer, tc.allocatorParams, nodeList, tc.testCIDRMap) + if err != nil { + t.Errorf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err) + return + } + rangeAllocator, ok := allocator.(*multiCIDRRangeAllocator) + if !ok { + t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) + return + } + rangeAllocator.nodesSynced = test.AlwaysReady + rangeAllocator.recorder = testutil.NewFakeRecorder() + + // this is a bit of white box testing + // pre allocate the CIDRs as per the test + for _, allocatedList := range tc.allocatedCIDRs { + for _, allocated := range allocatedList { + _, cidr, err := utilnet.ParseCIDRSloppy(allocated) + if err != nil { + t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err) + } + + clusterCIDRList, err := getClusterCIDRList("node0", rangeAllocator.cidrMap) + if err != nil { + t.Fatalf("%v: unexpected error when getting associated clusterCIDR for node %v %v", tc.description, "node0", err) + } + + occupied := false + for _, clusterCIDR := range clusterCIDRList { + if err := rangeAllocator.Occupy(clusterCIDR, cidr); err == nil { + occupied = true + break + } + } + if !occupied { + t.Fatalf("%v: unable to occupy CIDR %v", tc.description, allocated) + } + } + } + + updateCount := 0 + for _, node := range tc.fakeNodeHandler.Existing { + if node.Spec.PodCIDRs == nil { + updateCount++ + } + if err := allocator.AllocateOrOccupyCIDR(node); err != nil { + t.Errorf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) + } + } + if updateCount != 1 { + t.Fatalf("test error: all tests must update exactly one node") + } + if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, updateCount, wait.ForeverTestTimeout); err != nil { + t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err) + } + + if len(tc.expectedAllocatedCIDR) == 0 { + // nothing further expected + return + } + for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() { + if len(updatedNode.Spec.PodCIDRs) == 0 { + continue // not assigned yet + } + //match + for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDR { + if updatedNode.Spec.PodCIDRs[podCIDRIdx] != expectedPodCIDR { + t.Errorf("%v: Unable to find allocated CIDR %v, found updated Nodes with CIDRs: %v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs) + break + } + } + } + } + + // run the test cases + for _, tc := range testCaseMultiCIDRs { + testFunc(tc) + } +} + +func TestMultiCIDRAllocateOrOccupyCIDRFailure(t *testing.T) { + testCaseMultiCIDRs := []testCaseMultiCIDR{ + { + description: "When there's no ServiceCIDR return first CIDR in range", + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + }): { + { + name: "allocate-fail", + perNodeHostBits: 2, + ipv4CIDR: "127.123.234.0/28", + }, + }, + }), + allocatedCIDRs: map[int][]string{ + 0: {"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, + }, + }, + } + + testFunc := func(tc testCaseMultiCIDR) { + fakeClient := &fake.Clientset{} + fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc()) + fakeClusterCIDRInformer := fakeInformerFactory.Networking().V1alpha1().ClusterCIDRs() + + // Initialize the range allocator. + allocator, err := NewMultiCIDRRangeAllocator(tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), fakeClusterCIDRInformer, tc.allocatorParams, nil, tc.testCIDRMap) + if err != nil { + t.Logf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err) + } + rangeAllocator, ok := allocator.(*multiCIDRRangeAllocator) + if !ok { + t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) + return + } + rangeAllocator.nodesSynced = test.AlwaysReady + rangeAllocator.recorder = testutil.NewFakeRecorder() + + // this is a bit of white box testing + // pre allocate the CIDRs as per the test + for _, allocatedList := range tc.allocatedCIDRs { + for _, allocated := range allocatedList { + _, cidr, err := utilnet.ParseCIDRSloppy(allocated) + if err != nil { + t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err) + } + + clusterCIDRList, err := getClusterCIDRList("node0", rangeAllocator.cidrMap) + if err != nil { + t.Fatalf("%v: unexpected error when getting associated clusterCIDR for node %v %v", tc.description, "node0", err) + } + + occupied := false + for _, clusterCIDR := range clusterCIDRList { + if err := rangeAllocator.Occupy(clusterCIDR, cidr); err == nil { + occupied = true + break + } + } + if !occupied { + t.Fatalf("%v: unable to occupy CIDR %v", tc.description, allocated) + } + } + } + + if err := allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]); err == nil { + t.Errorf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err) + } + // We don't expect any updates, so just sleep for some time + time.Sleep(time.Second) + if len(tc.fakeNodeHandler.GetUpdatedNodesCopy()) != 0 { + t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.GetUpdatedNodesCopy()) + } + if len(tc.expectedAllocatedCIDR) == 0 { + // nothing further expected + return + } + for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() { + if len(updatedNode.Spec.PodCIDRs) == 0 { + continue // not assigned yet + } + //match + for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDR { + if updatedNode.Spec.PodCIDRs[podCIDRIdx] == expectedPodCIDR { + t.Errorf("%v: found cidr %v that should not be allocated on node with CIDRs:%v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs) + break + } + } + } + } + for _, tc := range testCaseMultiCIDRs { + testFunc(tc) + } +} + +type releasetestCaseMultiCIDR struct { + description string + fakeNodeHandler *testutil.FakeNodeHandler + testCIDRMap map[string][]*cidrset.ClusterCIDR + allocatorParams CIDRAllocatorParams + expectedAllocatedCIDRFirstRound map[int]string + expectedAllocatedCIDRSecondRound map[int]string + allocatedCIDRs map[int][]string + cidrsToRelease [][]string +} + +func TestMultiCIDRReleaseCIDRSuccess(t *testing.T) { + // Non-parallel test (overrides global var) + oldNodePollInterval := nodePollInterval + nodePollInterval = test.NodePollInterval + defer func() { + nodePollInterval = oldNodePollInterval + }() + + testCaseMultiCIDRs := []releasetestCaseMultiCIDR{ + { + description: "Correctly release preallocated CIDR", + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + }): { + { + name: "cidr-release", + perNodeHostBits: 2, + ipv4CIDR: "127.123.234.0/28", + }, + }, + }), + allocatedCIDRs: map[int][]string{ + 0: {"127.123.234.0/30", "127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, + }, + expectedAllocatedCIDRFirstRound: nil, + cidrsToRelease: [][]string{ + {"127.123.234.4/30"}, + }, + expectedAllocatedCIDRSecondRound: map[int]string{ + 0: "127.123.234.4/30", + }, + }, + { + description: "Correctly recycle CIDR", + fakeNodeHandler: &testutil.FakeNodeHandler{ + Existing: []*v1.Node{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "node0", + Labels: map[string]string{ + "testLabel-0": "node0", + }, + }, + }, + }, + Clientset: fake.NewSimpleClientset(), + }, + allocatorParams: CIDRAllocatorParams{ + ServiceCIDR: nil, + SecondaryServiceCIDR: nil, + }, + testCIDRMap: getTestCidrMap( + map[string][]*testClusterCIDR{ + getTestNodeSelector([]testNodeSelectorRequirement{ + { + key: "testLabel-0", + operator: v1.NodeSelectorOpIn, + values: []string{"node0"}, + }, + }): { + { + name: "cidr-release", + perNodeHostBits: 2, + ipv4CIDR: "127.123.234.0/28", + }, + }, + }), + allocatedCIDRs: map[int][]string{ + 0: {"127.123.234.4/30", "127.123.234.8/30", "127.123.234.12/30"}, + }, + expectedAllocatedCIDRFirstRound: map[int]string{ + 0: "127.123.234.0/30", + }, + cidrsToRelease: [][]string{ + {"127.123.234.0/30"}, + }, + expectedAllocatedCIDRSecondRound: map[int]string{ + 0: "127.123.234.0/30", + }, + }, + } + + testFunc := func(tc releasetestCaseMultiCIDR) { + fakeClient := &fake.Clientset{} + fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc()) + fakeClusterCIDRInformer := fakeInformerFactory.Networking().V1alpha1().ClusterCIDRs() + // Initialize the range allocator. + allocator, _ := NewMultiCIDRRangeAllocator(tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), fakeClusterCIDRInformer, tc.allocatorParams, nil, tc.testCIDRMap) + rangeAllocator, ok := allocator.(*multiCIDRRangeAllocator) + if !ok { + t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) + return + } + rangeAllocator.nodesSynced = test.AlwaysReady + rangeAllocator.recorder = testutil.NewFakeRecorder() + + // this is a bit of white box testing + for _, allocatedList := range tc.allocatedCIDRs { + for _, allocated := range allocatedList { + _, cidr, err := utilnet.ParseCIDRSloppy(allocated) + if err != nil { + t.Fatalf("%v: unexpected error when parsing CIDR %v: %v", tc.description, allocated, err) + } + + clusterCIDRList, err := getClusterCIDRList("node0", rangeAllocator.cidrMap) + if err != nil { + t.Fatalf("%v: unexpected error when getting associated clusterCIDR for node %v %v", tc.description, "node0", err) + } + + occupied := false + for _, clusterCIDR := range clusterCIDRList { + if err := rangeAllocator.Occupy(clusterCIDR, cidr); err == nil { + occupied = true + clusterCIDR.AssociatedNodes["fakeNode"] = true + break + } + } + if !occupied { + t.Fatalf("%v: unable to occupy CIDR %v", tc.description, allocated) + } + } + } + + err := allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]) + if len(tc.expectedAllocatedCIDRFirstRound) != 0 { + if err != nil { + t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) + } + if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil { + t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err) + } + } else { + if err == nil { + t.Fatalf("%v: unexpected success in AllocateOrOccupyCIDR: %v", tc.description, err) + } + // We don't expect any updates here + time.Sleep(time.Second) + if len(tc.fakeNodeHandler.GetUpdatedNodesCopy()) != 0 { + t.Fatalf("%v: unexpected update of nodes: %v", tc.description, tc.fakeNodeHandler.GetUpdatedNodesCopy()) + } + } + + for _, cidrToRelease := range tc.cidrsToRelease { + + nodeToRelease := v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: "fakeNode", + Labels: map[string]string{ + "testLabel-0": "node0", + }, + }, + } + nodeToRelease.Spec.PodCIDRs = cidrToRelease + err = allocator.ReleaseCIDR(&nodeToRelease) + if err != nil { + t.Fatalf("%v: unexpected error in ReleaseCIDR: %v", tc.description, err) + } + } + if err = allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]); err != nil { + t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) + } + if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil { + t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err) + } + + if len(tc.expectedAllocatedCIDRSecondRound) == 0 { + // nothing further expected + return + } + for _, updatedNode := range tc.fakeNodeHandler.GetUpdatedNodesCopy() { + if len(updatedNode.Spec.PodCIDRs) == 0 { + continue // not assigned yet + } + //match + for podCIDRIdx, expectedPodCIDR := range tc.expectedAllocatedCIDRSecondRound { + if updatedNode.Spec.PodCIDRs[podCIDRIdx] != expectedPodCIDR { + t.Errorf("%v: found cidr %v that should not be allocated on node with CIDRs:%v", tc.description, expectedPodCIDR, updatedNode.Spec.PodCIDRs) + break + } + } + } + } + + for _, tc := range testCaseMultiCIDRs { + testFunc(tc) + } +} + +// ClusterCIDR tests. + +var alwaysReady = func() bool { return true } + +type clusterCIDRController struct { + *multiCIDRRangeAllocator + clusterCIDRStore cache.Store +} + +func newController() (*fake.Clientset, *clusterCIDRController) { + client := fake.NewSimpleClientset() + + informerFactory := informers.NewSharedInformerFactory(client, controller.NoResyncPeriodFunc()) + cccInformer := informerFactory.Networking().V1alpha1().ClusterCIDRs() + cccIndexer := cccInformer.Informer().GetIndexer() + + nodeInformer := informerFactory.Core().V1().Nodes() + + // These reactors are required to mock functionality that would be covered + // automatically if we weren't using the fake client. + client.PrependReactor("create", "clustercidrs", k8stesting.ReactionFunc(func(action k8stesting.Action) (bool, runtime.Object, error) { + clusterCIDR := action.(k8stesting.CreateAction).GetObject().(*networkingv1alpha1.ClusterCIDR) + + if clusterCIDR.ObjectMeta.GenerateName != "" { + clusterCIDR.ObjectMeta.Name = fmt.Sprintf("%s-%s", clusterCIDR.ObjectMeta.GenerateName, rand.String(8)) + clusterCIDR.ObjectMeta.GenerateName = "" + } + clusterCIDR.Generation = 1 + cccIndexer.Add(clusterCIDR) + + return false, clusterCIDR, nil + })) + client.PrependReactor("update", "clustercidrs", k8stesting.ReactionFunc(func(action k8stesting.Action) (bool, runtime.Object, error) { + clusterCIDR := action.(k8stesting.CreateAction).GetObject().(*networkingv1alpha1.ClusterCIDR) + clusterCIDR.Generation++ + cccIndexer.Update(clusterCIDR) + + return false, clusterCIDR, nil + })) + + _, clusterCIDR, _ := utilnet.ParseCIDRSloppy("192.168.0.0/16") + _, serviceCIDR, _ := utilnet.ParseCIDRSloppy("10.1.0.0/16") + + allocatorParams := CIDRAllocatorParams{ + ClusterCIDRs: []*net.IPNet{clusterCIDR}, + ServiceCIDR: serviceCIDR, + SecondaryServiceCIDR: nil, + NodeCIDRMaskSizes: []int{24}, + } + testCIDRMap := make(map[string][]*cidrset.ClusterCIDR, 0) + + // Initialize the range allocator. + ra, _ := NewMultiCIDRRangeAllocator(client, nodeInformer, cccInformer, allocatorParams, nil, testCIDRMap) + cccController := ra.(*multiCIDRRangeAllocator) + + cccController.clusterCIDRSynced = alwaysReady + + return client, &clusterCIDRController{ + cccController, + informerFactory.Networking().V1alpha1().ClusterCIDRs().Informer().GetStore(), + } +} + +// Ensure default ClusterCIDR is created during bootstrap. +func TestClusterCIDRDefault(t *testing.T) { + defaultCCC := makeClusterCIDR(defaultClusterCIDRName, "192.168.0.0/16", "", 8, nil) + + client, _ := newController() + createdCCC, err := client.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), defaultClusterCIDRName, metav1.GetOptions{}) + assert.Nil(t, err, "Expected no error getting clustercidr objects") + assert.Equal(t, defaultCCC.Spec, createdCCC.Spec) +} + +// Ensure SyncClusterCIDR creates a new valid ClusterCIDR. +func TestSyncClusterCIDRCreate(t *testing.T) { + tests := []struct { + name string + ccc *networkingv1alpha1.ClusterCIDR + wantErr bool + }{ + { + name: "valid IPv4 ClusterCIDR with no NodeSelector", + ccc: makeClusterCIDR("ipv4-ccc", "10.2.0.0/16", "", 8, nil), + wantErr: false, + }, + { + name: "valid IPv4 ClusterCIDR with NodeSelector", + ccc: makeClusterCIDR("ipv4-ccc-label", "10.3.0.0/16", "", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), + wantErr: false, + }, + { + name: "valid IPv4 ClusterCIDR with overlapping CIDRs", + ccc: makeClusterCIDR("ipv4-ccc-overlap", "10.2.0.0/24", "", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), + wantErr: false, + }, + { + name: "valid IPv6 ClusterCIDR with no NodeSelector", + ccc: makeClusterCIDR("ipv6-ccc", "", "fd00:1::/112", 8, nil), + wantErr: false, + }, + { + name: "valid IPv6 ClusterCIDR with NodeSelector", + ccc: makeClusterCIDR("ipv6-ccc-label", "", "fd00:2::/112", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), + wantErr: false, + }, + { + name: "valid IPv6 ClusterCIDR with overlapping CIDRs", + ccc: makeClusterCIDR("ipv6-ccc-overlap", "", "fd00:1:1::/112", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), + wantErr: false, + }, + { + name: "valid Dualstack ClusterCIDR with no NodeSelector", + ccc: makeClusterCIDR("dual-ccc", "10.2.0.0/16", "fd00:1::/112", 8, nil), + wantErr: false, + }, + { + name: "valid DualStack ClusterCIDR with NodeSelector", + ccc: makeClusterCIDR("dual-ccc-label", "10.3.0.0/16", "fd00:2::/112", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), + wantErr: false, + }, + { + name: "valid Dualstack ClusterCIDR with overlapping CIDRs", + ccc: makeClusterCIDR("dual-ccc-overlap", "10.2.0.0/16", "fd00:1:1::/112", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), + wantErr: false, + }, + // invalid ClusterCIDRs. + { + name: "invalid ClusterCIDR with both IPv4 and IPv6 CIDRs nil", + ccc: makeClusterCIDR("invalid-ccc", "", "", 0, nil), + wantErr: true, + }, + { + name: "invalid IPv4 ClusterCIDR", + ccc: makeClusterCIDR("invalid-ipv4-ccc", "1000.2.0.0/16", "", 8, nil), + wantErr: true, + }, + { + name: "invalid IPv6 ClusterCIDR", + ccc: makeClusterCIDR("invalid-ipv6-ccc", "", "aaaaa:1:1::/112", 8, nil), + wantErr: true, + }, + { + name: "invalid dualstack ClusterCIDR", + ccc: makeClusterCIDR("invalid-dual-ccc", "10.2.0.0/16", "aaaaa:1:1::/112", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})), + wantErr: true, + }, + } + + client, cccController := newController() + for _, tc := range tests { + cccController.clusterCIDRStore.Add(tc.ccc) + err := cccController.syncClusterCIDR(tc.ccc.Name) + if tc.wantErr { + assert.Error(t, err) + continue + } + assert.NoError(t, err) + expectActions(t, client.Actions(), 1, "create", "clustercidrs") + + createdCCC, err := client.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), tc.ccc.Name, metav1.GetOptions{}) + assert.Nil(t, err, "Expected no error getting clustercidr object") + assert.Equal(t, tc.ccc.Spec, createdCCC.Spec) + assert.Equal(t, []string{clusterCIDRFinalizer}, createdCCC.Finalizers) + } +} + +// Ensure syncClusterCIDR for ClusterCIDR delete removes the ClusterCIDR. +func TestSyncClusterCIDRDelete(t *testing.T) { + _, cccController := newController() + + testCCC := makeClusterCIDR("testing-1", "10.1.0.0/16", "", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})) + + cccController.clusterCIDRStore.Add(testCCC) + err := cccController.syncClusterCIDR(testCCC.Name) + assert.NoError(t, err) + + deletionTimestamp := metav1.Now() + testCCC.DeletionTimestamp = &deletionTimestamp + cccController.clusterCIDRStore.Update(testCCC) + err = cccController.syncClusterCIDR(testCCC.Name) + assert.NoError(t, err) +} + +// Ensure syncClusterCIDR for ClusterCIDR delete does not remove ClusterCIDR +// if a node is associated with the ClusterCIDR. +func TestSyncClusterCIDRDeleteWithNodesAssociated(t *testing.T) { + client, cccController := newController() + + testCCC := makeClusterCIDR("testing-1", "10.1.0.0/16", "", 8, makeNodeSelector("foo", v1.NodeSelectorOpIn, []string{"bar"})) + + cccController.clusterCIDRStore.Add(testCCC) + err := cccController.syncClusterCIDR(testCCC.Name) + assert.NoError(t, err) + + // Mock the IPAM controller behavior associating node with ClusterCIDR. + nodeSelectorKey, _ := cccController.nodeSelectorKey(testCCC) + clusterCIDRs, _ := cccController.cidrMap[nodeSelectorKey] + clusterCIDRs[0].AssociatedNodes["test-node"] = true + + createdCCC, err := client.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), testCCC.Name, metav1.GetOptions{}) + assert.Nil(t, err, "Expected no error getting clustercidr object") + + deletionTimestamp := metav1.Now() + createdCCC.DeletionTimestamp = &deletionTimestamp + cccController.clusterCIDRStore.Update(createdCCC) + err = cccController.syncClusterCIDR(createdCCC.Name) + assert.Error(t, err, fmt.Sprintf("ClusterCIDR %s marked as terminating, won't be deleted until all associated nodes are deleted", createdCCC.Name)) +} + +func expectActions(t *testing.T, actions []k8stesting.Action, num int, verb, resource string) { + t.Helper() + // if actions are less, the below logic will panic. + if num > len(actions) { + t.Fatalf("len of actions %v is unexpected. Expected to be at least %v", len(actions), num+1) + } + + for i := 0; i < num; i++ { + relativePos := len(actions) - i - 1 + assert.Equal(t, verb, actions[relativePos].GetVerb(), "Expected action -%d verb to be %s", i, verb) + assert.Equal(t, resource, actions[relativePos].GetResource().Resource, "Expected action -%d resource to be %s", i, resource) + } +} + +func makeNodeSelector(key string, op v1.NodeSelectorOperator, values []string) *v1.NodeSelector { + return &v1.NodeSelector{ + NodeSelectorTerms: []v1.NodeSelectorTerm{ + { + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: key, + Operator: op, + Values: values, + }, + }, + }, + }, + } +} + +// makeClusterCIDR returns a mock ClusterCIDR object. +func makeClusterCIDR(cccName, ipv4CIDR, ipv6CIDR string, perNodeHostBits int32, nodeSelector *v1.NodeSelector) *networkingv1alpha1.ClusterCIDR { + testCCC := &networkingv1alpha1.ClusterCIDR{ + ObjectMeta: metav1.ObjectMeta{Name: cccName}, + Spec: networkingv1alpha1.ClusterCIDRSpec{}, + } + + testCCC.Spec.PerNodeHostBits = perNodeHostBits + + if ipv4CIDR != "" { + testCCC.Spec.IPv4 = ipv4CIDR + } + + if ipv6CIDR != "" { + testCCC.Spec.IPv6 = ipv6CIDR + } + + if nodeSelector != nil { + testCCC.Spec.NodeSelector = nodeSelector + } + + return testCCC +} diff --git a/pkg/controller/nodeipam/ipam/multicidrset/metrics.go b/pkg/controller/nodeipam/ipam/multicidrset/metrics.go new file mode 100644 index 00000000000..af7c5e2c6e3 --- /dev/null +++ b/pkg/controller/nodeipam/ipam/multicidrset/metrics.go @@ -0,0 +1,78 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multicidrset + +import ( + "sync" + + "k8s.io/component-base/metrics" + "k8s.io/component-base/metrics/legacyregistry" +) + +const nodeIpamSubsystem = "node_ipam_controller" + +var ( + cidrSetAllocations = metrics.NewCounterVec( + &metrics.CounterOpts{ + Subsystem: nodeIpamSubsystem, + Name: "multicidrset_cidrs_allocations_total", + Help: "Counter measuring total number of CIDR allocations.", + StabilityLevel: metrics.ALPHA, + }, + []string{"clusterCIDR"}, + ) + cidrSetReleases = metrics.NewCounterVec( + &metrics.CounterOpts{ + Subsystem: nodeIpamSubsystem, + Name: "multicidrset_cidrs_releases_total", + Help: "Counter measuring total number of CIDR releases.", + StabilityLevel: metrics.ALPHA, + }, + []string{"clusterCIDR"}, + ) + cidrSetUsage = metrics.NewGaugeVec( + &metrics.GaugeOpts{ + Subsystem: nodeIpamSubsystem, + Name: "multicidrset_usage_cidrs", + Help: "Gauge measuring percentage of allocated CIDRs.", + StabilityLevel: metrics.ALPHA, + }, + []string{"clusterCIDR"}, + ) + cidrSetAllocationTriesPerRequest = metrics.NewHistogramVec( + &metrics.HistogramOpts{ + Subsystem: nodeIpamSubsystem, + Name: "multicidrset_allocation_tries_per_request", + Help: "Histogram measuring CIDR allocation tries per request.", + StabilityLevel: metrics.ALPHA, + Buckets: metrics.ExponentialBuckets(1, 5, 5), + }, + []string{"clusterCIDR"}, + ) +) + +var registerMetrics sync.Once + +// registerCidrsetMetrics the metrics that are to be monitored. +func registerCidrsetMetrics() { + registerMetrics.Do(func() { + legacyregistry.MustRegister(cidrSetAllocations) + legacyregistry.MustRegister(cidrSetReleases) + legacyregistry.MustRegister(cidrSetUsage) + legacyregistry.MustRegister(cidrSetAllocationTriesPerRequest) + }) +} diff --git a/pkg/controller/nodeipam/ipam/multicidrset/multi_cidr_set.go b/pkg/controller/nodeipam/ipam/multicidrset/multi_cidr_set.go new file mode 100644 index 00000000000..45527c80bd5 --- /dev/null +++ b/pkg/controller/nodeipam/ipam/multicidrset/multi_cidr_set.go @@ -0,0 +1,361 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multicidrset + +import ( + "encoding/binary" + "fmt" + "math/big" + "math/bits" + "net" + "sync" + + netutils "k8s.io/utils/net" +) + +// MultiCIDRSet manages a set of CIDR ranges from which blocks of IPs can +// be allocated from. +type MultiCIDRSet struct { + sync.Mutex + // ClusterCIDR is the CIDR assigned to the cluster. + ClusterCIDR *net.IPNet + // NodeMaskSize is the mask size, in bits,assigned to the nodes + // caches the mask size to avoid the penalty of calling nodeMask.Size(). + NodeMaskSize int + // MaxCIDRs is the maximum number of CIDRs that can be allocated. + MaxCIDRs int + // Label stores the CIDR in a string, it is used to identify the metrics such + // as Number of allocations, Total number of CIDR releases, Percentage of + // allocated CIDRs, Tries required for allocating a CIDR for a particular CIDRSet. + Label string + // AllocatedCIDRMap stores all the allocated CIDRs from the current CIDRSet. + // Stores a mapping of the next candidate CIDR for allocation to it's + // allocation status. Next candidate is used only if allocation status is false. + AllocatedCIDRMap map[string]bool + + // clusterMaskSize is the mask size, in bits, assigned to the cluster. + // caches the mask size to avoid the penalty of calling clusterCIDR.Mask.Size(). + clusterMaskSize int + // nodeMask is the network mask assigned to the nodes. + nodeMask net.IPMask + // allocatedCIDRs counts the number of CIDRs allocated. + allocatedCIDRs int + // nextCandidate points to the next CIDR that should be free. + nextCandidate int +} + +// ClusterCIDR is an internal representation of the ClusterCIDR API object. +type ClusterCIDR struct { + // Name of the associated ClusterCIDR API object. + Name string + // IPv4CIDRSet is the MultiCIDRSet representation of ClusterCIDR.spec.ipv4 + // of the associated ClusterCIDR API object. + IPv4CIDRSet *MultiCIDRSet + // IPv6CIDRSet is the MultiCIDRSet representation of ClusterCIDR.spec.ipv6 + // of the associated ClusterCIDR API object. + IPv6CIDRSet *MultiCIDRSet + // AssociatedNodes is used to identify which nodes have CIDRs allocated from this ClusterCIDR. + // Stores a mapping of node name to association status. + AssociatedNodes map[string]bool + // Terminating is used to identify whether ClusterCIDR has been marked for termination. + Terminating bool +} + +const ( + // The subnet mask size cannot be greater than 16 more than the cluster mask size + // TODO: https://github.com/kubernetes/kubernetes/issues/44918 + // clusterSubnetMaxDiff limited to 16 due to the uncompressed bitmap. + // Due to this limitation the subnet mask for IPv6 cluster cidr needs to be >= 48 + // as default mask size for IPv6 is 64. + clusterSubnetMaxDiff = 16 + // halfIPv6Len is the half of the IPv6 length. + halfIPv6Len = net.IPv6len / 2 +) + +// CIDRRangeNoCIDRsRemainingErr is an error type used to denote there is no more +// space to allocate CIDR ranges from the given CIDR. +type CIDRRangeNoCIDRsRemainingErr struct { + // CIDR represents the CIDR which is exhausted. + CIDR string +} + +func (err *CIDRRangeNoCIDRsRemainingErr) Error() string { + return fmt.Sprintf("CIDR allocation failed; there are no remaining CIDRs left to allocate in the range %s", err.CIDR) +} + +// CIDRSetSubNetTooBigErr is an error type to denote that subnet mask size is too +// big compared to the CIDR mask size. +type CIDRSetSubNetTooBigErr struct { + cidr string + subnetMaskSize int + clusterMaskSize int +} + +func (err *CIDRSetSubNetTooBigErr) Error() string { + return fmt.Sprintf("Creation of New CIDR Set failed for %s. "+ + "PerNodeMaskSize %d is too big for CIDR Mask %d, Maximum difference allowed "+ + "is %d", err.cidr, err.subnetMaskSize, err.clusterMaskSize, clusterSubnetMaxDiff) +} + +// NewMultiCIDRSet creates a new MultiCIDRSet. +func NewMultiCIDRSet(cidrConfig *net.IPNet, perNodeHostBits int) (*MultiCIDRSet, error) { + clusterMask := cidrConfig.Mask + clusterMaskSize, bits := clusterMask.Size() + + var subNetMaskSize int + switch /*v4 or v6*/ { + case netutils.IsIPv4(cidrConfig.IP): + subNetMaskSize = 32 - perNodeHostBits + case netutils.IsIPv6(cidrConfig.IP): + subNetMaskSize = 128 - perNodeHostBits + } + + if netutils.IsIPv6(cidrConfig.IP) && (subNetMaskSize-clusterMaskSize > clusterSubnetMaxDiff) { + return nil, &CIDRSetSubNetTooBigErr{ + cidr: cidrConfig.String(), + subnetMaskSize: subNetMaskSize, + clusterMaskSize: clusterMaskSize, + } + } + + // Register MultiCIDRSet metrics. + registerCidrsetMetrics() + + return &MultiCIDRSet{ + ClusterCIDR: cidrConfig, + nodeMask: net.CIDRMask(subNetMaskSize, bits), + clusterMaskSize: clusterMaskSize, + MaxCIDRs: 1 << uint32(subNetMaskSize-clusterMaskSize), + NodeMaskSize: subNetMaskSize, + Label: cidrConfig.String(), + AllocatedCIDRMap: make(map[string]bool, 0), + }, nil +} + +func (s *MultiCIDRSet) indexToCIDRBlock(index int) (*net.IPNet, error) { + var ip []byte + switch /*v4 or v6*/ { + case netutils.IsIPv4(s.ClusterCIDR.IP): + j := uint32(index) << uint32(32-s.NodeMaskSize) + ipInt := (binary.BigEndian.Uint32(s.ClusterCIDR.IP)) | j + ip = make([]byte, net.IPv4len) + binary.BigEndian.PutUint32(ip, ipInt) + case netutils.IsIPv6(s.ClusterCIDR.IP): + // leftClusterIP | rightClusterIP + // 2001:0DB8:1234:0000:0000:0000:0000:0000 + const v6NBits = 128 + const halfV6NBits = v6NBits / 2 + leftClusterIP := binary.BigEndian.Uint64(s.ClusterCIDR.IP[:halfIPv6Len]) + rightClusterIP := binary.BigEndian.Uint64(s.ClusterCIDR.IP[halfIPv6Len:]) + + ip = make([]byte, net.IPv6len) + + if s.NodeMaskSize <= halfV6NBits { + // We only care about left side IP. + leftClusterIP |= uint64(index) << uint(halfV6NBits-s.NodeMaskSize) + } else { + if s.clusterMaskSize < halfV6NBits { + // see how many bits are needed to reach the left side. + btl := uint(s.NodeMaskSize - halfV6NBits) + indexMaxBit := uint(64 - bits.LeadingZeros64(uint64(index))) + if indexMaxBit > btl { + leftClusterIP |= uint64(index) >> btl + } + } + // the right side will be calculated the same way either the + // subNetMaskSize affects both left and right sides. + rightClusterIP |= uint64(index) << uint(v6NBits-s.NodeMaskSize) + } + binary.BigEndian.PutUint64(ip[:halfIPv6Len], leftClusterIP) + binary.BigEndian.PutUint64(ip[halfIPv6Len:], rightClusterIP) + default: + return nil, fmt.Errorf("invalid IP: %s", s.ClusterCIDR.IP) + } + return &net.IPNet{ + IP: ip, + Mask: s.nodeMask, + }, nil +} + +// NextCandidate returns the next candidate and the last evaluated index +// for the current cidrSet. Returns nil if the candidate is already allocated. +func (s *MultiCIDRSet) NextCandidate() (*net.IPNet, int, error) { + s.Lock() + defer s.Unlock() + + if s.allocatedCIDRs == s.MaxCIDRs { + return nil, 0, &CIDRRangeNoCIDRsRemainingErr{ + CIDR: s.Label, + } + } + + candidate := s.nextCandidate + for i := 0; i < s.MaxCIDRs; i++ { + nextCandidateCIDR, err := s.indexToCIDRBlock(candidate) + if err != nil { + return nil, i, err + } + // Check if the nextCandidate is not already allocated. + if _, ok := s.AllocatedCIDRMap[nextCandidateCIDR.String()]; !ok { + s.nextCandidate = (candidate + 1) % s.MaxCIDRs + return nextCandidateCIDR, i, nil + } + candidate = (candidate + 1) % s.MaxCIDRs + } + + return nil, s.MaxCIDRs, &CIDRRangeNoCIDRsRemainingErr{ + CIDR: s.Label, + } +} + +// getBeginningAndEndIndices returns the indices for the given CIDR, returned +// values are inclusive indices [beginning, end]. +func (s *MultiCIDRSet) getBeginningAndEndIndices(cidr *net.IPNet) (int, int, error) { + if cidr == nil { + return -1, -1, fmt.Errorf("error getting indices for cluster cidr %v, cidr is nil", s.ClusterCIDR) + } + begin, end := 0, s.MaxCIDRs-1 + cidrMask := cidr.Mask + maskSize, _ := cidrMask.Size() + var ipSize int + + if !s.ClusterCIDR.Contains(cidr.IP.Mask(s.ClusterCIDR.Mask)) && !cidr.Contains(s.ClusterCIDR.IP.Mask(cidr.Mask)) { + return -1, -1, fmt.Errorf("cidr %v is out the range of cluster cidr %v", cidr, s.ClusterCIDR) + } + + if s.clusterMaskSize < maskSize { + var err error + ipSize = net.IPv4len + if netutils.IsIPv6(cidr.IP) { + ipSize = net.IPv6len + } + begin, err = s.getIndexForCIDR(&net.IPNet{ + IP: cidr.IP.Mask(s.nodeMask), + Mask: s.nodeMask, + }) + if err != nil { + return -1, -1, err + } + ip := make([]byte, ipSize) + if netutils.IsIPv4(cidr.IP) { + ipInt := binary.BigEndian.Uint32(cidr.IP) | (^binary.BigEndian.Uint32(cidr.Mask)) + binary.BigEndian.PutUint32(ip, ipInt) + } else { + // ipIntLeft | ipIntRight + // 2001:0DB8:1234:0000:0000:0000:0000:0000 + ipIntLeft := binary.BigEndian.Uint64(cidr.IP[:net.IPv6len/2]) | (^binary.BigEndian.Uint64(cidr.Mask[:net.IPv6len/2])) + ipIntRight := binary.BigEndian.Uint64(cidr.IP[net.IPv6len/2:]) | (^binary.BigEndian.Uint64(cidr.Mask[net.IPv6len/2:])) + binary.BigEndian.PutUint64(ip[:net.IPv6len/2], ipIntLeft) + binary.BigEndian.PutUint64(ip[net.IPv6len/2:], ipIntRight) + } + end, err = s.getIndexForCIDR(&net.IPNet{ + IP: net.IP(ip).Mask(s.nodeMask), + Mask: s.nodeMask, + }) + if err != nil { + return -1, -1, err + } + } + return begin, end, nil +} + +// Release releases the given CIDR range. +func (s *MultiCIDRSet) Release(cidr *net.IPNet) error { + begin, end, err := s.getBeginningAndEndIndices(cidr) + if err != nil { + return err + } + s.Lock() + defer s.Unlock() + + for i := begin; i <= end; i++ { + // Remove from the allocated CIDR Map and decrement the counter only if currently + // marked allocated. Avoids double counting. + currCIDR, err := s.indexToCIDRBlock(i) + if err != nil { + return err + } + if _, ok := s.AllocatedCIDRMap[currCIDR.String()]; ok { + delete(s.AllocatedCIDRMap, currCIDR.String()) + s.allocatedCIDRs-- + cidrSetReleases.WithLabelValues(s.Label).Inc() + } + } + + cidrSetUsage.WithLabelValues(s.Label).Set(float64(s.allocatedCIDRs) / float64(s.MaxCIDRs)) + + return nil +} + +// Occupy marks the given CIDR range as used. Occupy succeeds even if the CIDR +// range was previously used. +func (s *MultiCIDRSet) Occupy(cidr *net.IPNet) (err error) { + begin, end, err := s.getBeginningAndEndIndices(cidr) + if err != nil { + return err + } + s.Lock() + defer s.Unlock() + + for i := begin; i <= end; i++ { + // Add to the allocated CIDR Map and increment the counter only if not already + // marked allocated. Prevents double counting. + currCIDR, err := s.indexToCIDRBlock(i) + if err != nil { + return err + } + if _, ok := s.AllocatedCIDRMap[currCIDR.String()]; !ok { + s.AllocatedCIDRMap[currCIDR.String()] = true + cidrSetAllocations.WithLabelValues(s.Label).Inc() + s.allocatedCIDRs++ + } + } + cidrSetUsage.WithLabelValues(s.Label).Set(float64(s.allocatedCIDRs) / float64(s.MaxCIDRs)) + + return nil +} + +func (s *MultiCIDRSet) getIndexForCIDR(cidr *net.IPNet) (int, error) { + return s.getIndexForIP(cidr.IP) +} + +func (s *MultiCIDRSet) getIndexForIP(ip net.IP) (int, error) { + if ip.To4() != nil { + cidrIndex := (binary.BigEndian.Uint32(s.ClusterCIDR.IP) ^ binary.BigEndian.Uint32(ip.To4())) >> uint32(32-s.NodeMaskSize) + if cidrIndex >= uint32(s.MaxCIDRs) { + return 0, fmt.Errorf("CIDR: %v/%v is out of the range of CIDR allocator", ip, s.NodeMaskSize) + } + return int(cidrIndex), nil + } + if netutils.IsIPv6(ip) { + bigIP := big.NewInt(0).SetBytes(s.ClusterCIDR.IP) + bigIP = bigIP.Xor(bigIP, big.NewInt(0).SetBytes(ip)) + cidrIndexBig := bigIP.Rsh(bigIP, uint(net.IPv6len*8-s.NodeMaskSize)) + cidrIndex := cidrIndexBig.Uint64() + if cidrIndex >= uint64(s.MaxCIDRs) { + return 0, fmt.Errorf("CIDR: %v/%v is out of the range of CIDR allocator", ip, s.NodeMaskSize) + } + return int(cidrIndex), nil + } + + return 0, fmt.Errorf("invalid IP: %v", ip) +} + +// UpdateEvaluatedCount increments the evaluated count. +func (s *MultiCIDRSet) UpdateEvaluatedCount(evaluated int) { + cidrSetAllocationTriesPerRequest.WithLabelValues(s.Label).Observe(float64(evaluated)) +} diff --git a/pkg/controller/nodeipam/ipam/multicidrset/multi_cidr_set_test.go b/pkg/controller/nodeipam/ipam/multicidrset/multi_cidr_set_test.go new file mode 100644 index 00000000000..b6dbf99ac95 --- /dev/null +++ b/pkg/controller/nodeipam/ipam/multicidrset/multi_cidr_set_test.go @@ -0,0 +1,874 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package multicidrset + +import ( + "net" + "reflect" + "testing" + + "k8s.io/component-base/metrics/testutil" + "k8s.io/klog/v2" + utilnet "k8s.io/utils/net" +) + +func allocateNext(s *MultiCIDRSet) (*net.IPNet, error) { + candidate, _, err := s.NextCandidate() + if err != nil { + return nil, err + } + + err = s.Occupy(candidate) + + return candidate, err +} + +func TestCIDRSetFullyAllocated(t *testing.T) { + cases := []struct { + clusterCIDRStr string + perNodeHostBits int + expectedCIDR string + description string + }{ + { + clusterCIDRStr: "127.123.234.0/28", + perNodeHostBits: 4, + expectedCIDR: "127.123.234.0/28", + description: "Fully allocated CIDR with IPv4", + }, + { + clusterCIDRStr: "beef:1234::/112", + perNodeHostBits: 16, + expectedCIDR: "beef:1234::/112", + description: "Fully allocated CIDR with IPv6", + }, + } + for _, tc := range cases { + _, clusterCIDR, _ := utilnet.ParseCIDRSloppy(tc.clusterCIDRStr) + a, err := NewMultiCIDRSet(clusterCIDR, tc.perNodeHostBits) + if err != nil { + t.Fatalf("unexpected error: %v for %v", err, tc.description) + } + p, err := allocateNext(a) + if err != nil { + t.Fatalf("unexpected error: %v for %v", err, tc.description) + } + if p.String() != tc.expectedCIDR { + t.Fatalf("unexpected allocated cidr: %v, expecting %v for %v", + p.String(), tc.expectedCIDR, tc.description) + } + + _, err = allocateNext(a) + if err == nil { + t.Fatalf("expected error because of fully-allocated range for %v", tc.description) + } + + a.Release(p) + + p, err = allocateNext(a) + if err != nil { + t.Fatalf("unexpected error: %v for %v", err, tc.description) + } + if p.String() != tc.expectedCIDR { + t.Fatalf("unexpected allocated cidr: %v, expecting %v for %v", + p.String(), tc.expectedCIDR, tc.description) + } + _, err = allocateNext(a) + if err == nil { + t.Fatalf("expected error because of fully-allocated range for %v", tc.description) + } + } +} + +func TestIndexToCIDRBlock(t *testing.T) { + cases := []struct { + clusterCIDRStr string + perNodeHostBits int + index int + CIDRBlock string + description string + }{ + { + clusterCIDRStr: "127.123.3.0/16", + perNodeHostBits: 8, + index: 0, + CIDRBlock: "127.123.0.0/24", + description: "1st IP address indexed with IPv4", + }, + { + clusterCIDRStr: "127.123.0.0/16", + perNodeHostBits: 8, + index: 15, + CIDRBlock: "127.123.15.0/24", + description: "16th IP address indexed with IPv4", + }, + { + clusterCIDRStr: "192.168.5.219/28", + perNodeHostBits: 0, + index: 5, + CIDRBlock: "192.168.5.213/32", + description: "5th IP address indexed with IPv4", + }, + { + clusterCIDRStr: "2001:0db8:1234:3::/48", + perNodeHostBits: 64, + index: 0, + CIDRBlock: "2001:db8:1234::/64", + description: "1st IP address indexed with IPv6 /64", + }, + { + clusterCIDRStr: "2001:0db8:1234::/48", + perNodeHostBits: 64, + index: 15, + CIDRBlock: "2001:db8:1234:f::/64", + description: "16th IP address indexed with IPv6 /64", + }, + { + clusterCIDRStr: "2001:0db8:85a3::8a2e:0370:7334/50", + perNodeHostBits: 65, + index: 6425, + CIDRBlock: "2001:db8:85a3:3232::/63", + description: "6426th IP address indexed with IPv6 /63", + }, + { + clusterCIDRStr: "2001:0db8::/32", + perNodeHostBits: 80, + index: 0, + CIDRBlock: "2001:db8::/48", + description: "1st IP address indexed with IPv6 /48", + }, + { + clusterCIDRStr: "2001:0db8::/32", + perNodeHostBits: 80, + index: 15, + CIDRBlock: "2001:db8:f::/48", + description: "16th IP address indexed with IPv6 /48", + }, + { + clusterCIDRStr: "2001:0db8:85a3::8a2e:0370:7334/32", + perNodeHostBits: 80, + index: 6425, + CIDRBlock: "2001:db8:1919::/48", + description: "6426th IP address indexed with IPv6 /48", + }, + { + clusterCIDRStr: "2001:0db8:1234:ff00::/56", + perNodeHostBits: 56, + index: 0, + CIDRBlock: "2001:db8:1234:ff00::/72", + description: "1st IP address indexed with IPv6 /72", + }, + { + clusterCIDRStr: "2001:0db8:1234:ff00::/56", + perNodeHostBits: 56, + index: 15, + CIDRBlock: "2001:db8:1234:ff00:f00::/72", + description: "16th IP address indexed with IPv6 /72", + }, + { + clusterCIDRStr: "2001:0db8:1234:ff00::0370:7334/56", + perNodeHostBits: 56, + index: 6425, + CIDRBlock: "2001:db8:1234:ff19:1900::/72", + description: "6426th IP address indexed with IPv6 /72", + }, + { + clusterCIDRStr: "2001:0db8:1234:0:1234::/80", + perNodeHostBits: 32, + index: 0, + CIDRBlock: "2001:db8:1234:0:1234::/96", + description: "1st IP address indexed with IPv6 /96", + }, + { + clusterCIDRStr: "2001:0db8:1234:0:1234::/80", + perNodeHostBits: 32, + index: 15, + CIDRBlock: "2001:db8:1234:0:1234:f::/96", + description: "16th IP address indexed with IPv6 /96", + }, + { + clusterCIDRStr: "2001:0db8:1234:ff00::0370:7334/80", + perNodeHostBits: 32, + index: 6425, + CIDRBlock: "2001:db8:1234:ff00:0:1919::/96", + description: "6426th IP address indexed with IPv6 /96", + }, + } + for _, tc := range cases { + _, clusterCIDR, _ := utilnet.ParseCIDRSloppy(tc.clusterCIDRStr) + a, err := NewMultiCIDRSet(clusterCIDR, tc.perNodeHostBits) + if err != nil { + t.Fatalf("error for %v ", tc.description) + } + cidr, err := a.indexToCIDRBlock(tc.index) + if err != nil { + t.Fatalf("error for %v ", tc.description) + } + if cidr.String() != tc.CIDRBlock { + t.Fatalf("error for %v index %d %s", tc.description, tc.index, cidr.String()) + } + } +} + +func TestCIDRSet_RandomishAllocation(t *testing.T) { + cases := []struct { + clusterCIDRStr string + description string + }{ + { + clusterCIDRStr: "127.123.234.0/16", + description: "RandomishAllocation with IPv4", + }, + { + clusterCIDRStr: "beef:1234::/112", + description: "RandomishAllocation with IPv6", + }, + } + for _, tc := range cases { + _, clusterCIDR, _ := utilnet.ParseCIDRSloppy(tc.clusterCIDRStr) + a, err := NewMultiCIDRSet(clusterCIDR, 8) + if err != nil { + t.Fatalf("Error allocating CIDRSet for %v", tc.description) + } + // allocate all the CIDRs. + var cidrs []*net.IPNet + + for i := 0; i < 256; i++ { + if c, err := allocateNext(a); err == nil { + cidrs = append(cidrs, c) + } else { + t.Fatalf("unexpected error: %v for %v", err, tc.description) + } + } + + _, err = allocateNext(a) + if err == nil { + t.Fatalf("expected error because of fully-allocated range for %v", tc.description) + } + // release all the CIDRs. + for i := 0; i < len(cidrs); i++ { + a.Release(cidrs[i]) + } + + // allocate the CIDRs again. + var rcidrs []*net.IPNet + for i := 0; i < 256; i++ { + if c, err := allocateNext(a); err == nil { + rcidrs = append(rcidrs, c) + } else { + t.Fatalf("unexpected error: %d, %v for %v", i, err, tc.description) + } + } + _, err = allocateNext(a) + if err == nil { + t.Fatalf("expected error because of fully-allocated range for %v", tc.description) + } + + if !reflect.DeepEqual(cidrs, rcidrs) { + t.Fatalf("expected re-allocated cidrs are the same collection for %v", tc.description) + } + } +} + +func TestCIDRSet_AllocationOccupied(t *testing.T) { + cases := []struct { + clusterCIDRStr string + description string + }{ + { + clusterCIDRStr: "127.123.234.0/16", + description: "AllocationOccupied with IPv4", + }, + { + clusterCIDRStr: "beef:1234::/112", + description: "AllocationOccupied with IPv6", + }, + } + for _, tc := range cases { + _, clusterCIDR, _ := utilnet.ParseCIDRSloppy(tc.clusterCIDRStr) + a, err := NewMultiCIDRSet(clusterCIDR, 8) + if err != nil { + t.Fatalf("Error allocating CIDRSet for %v", tc.description) + } + // allocate all the CIDRs. + var cidrs []*net.IPNet + var numCIDRs = 256 + + for i := 0; i < numCIDRs; i++ { + if c, err := allocateNext(a); err == nil { + cidrs = append(cidrs, c) + } else { + t.Fatalf("unexpected error: %v for %v", err, tc.description) + } + } + + _, err = allocateNext(a) + if err == nil { + t.Fatalf("expected error because of fully-allocated range for %v", tc.description) + } + // release all the CIDRs. + for i := 0; i < len(cidrs); i++ { + a.Release(cidrs[i]) + } + // occupy the last 128 CIDRs. + for i := numCIDRs / 2; i < numCIDRs; i++ { + a.Occupy(cidrs[i]) + } + // occupy the first of the last 128 again. + a.Occupy(cidrs[numCIDRs/2]) + + // allocate the first 128 CIDRs again. + var rcidrs []*net.IPNet + for i := 0; i < numCIDRs/2; i++ { + if c, err := allocateNext(a); err == nil { + rcidrs = append(rcidrs, c) + } else { + t.Fatalf("unexpected error: %d, %v for %v", i, err, tc.description) + } + } + _, err = allocateNext(a) + if err == nil { + t.Fatalf("expected error because of fully-allocated range for %v", tc.description) + } + + // check Occupy() works properly. + for i := numCIDRs / 2; i < numCIDRs; i++ { + rcidrs = append(rcidrs, cidrs[i]) + } + if !reflect.DeepEqual(cidrs, rcidrs) { + t.Fatalf("expected re-allocated cidrs are the same collection for %v", tc.description) + } + } +} + +func TestDoubleOccupyRelease(t *testing.T) { + // Run a sequence of operations and check the number of occupied CIDRs + // after each one. + clusterCIDRStr := "10.42.0.0/16" + operations := []struct { + cidrStr string + operation string + numOccupied int + }{ + // Occupy 1 element: +1 + { + cidrStr: "10.42.5.0/24", + operation: "occupy", + numOccupied: 1, + }, + // Occupy 1 more element: +1 + { + cidrStr: "10.42.9.0/24", + operation: "occupy", + numOccupied: 2, + }, + // Occupy 4 elements overlapping with one from the above: +3 + { + cidrStr: "10.42.8.0/22", + operation: "occupy", + numOccupied: 5, + }, + // Occupy an already-occupied element: no change + { + cidrStr: "10.42.9.0/24", + operation: "occupy", + numOccupied: 5, + }, + // Release an coccupied element: -1 + { + cidrStr: "10.42.9.0/24", + operation: "release", + numOccupied: 4, + }, + // Release an unoccupied element: no change + { + cidrStr: "10.42.9.0/24", + operation: "release", + numOccupied: 4, + }, + // Release 4 elements, only one of which is occupied: -1 + { + cidrStr: "10.42.4.0/22", + operation: "release", + numOccupied: 3, + }, + } + // Check that there are exactly that many allocatable CIDRs after all + // operations have been executed. + numAllocatable24s := (1 << 8) - 3 + + _, clusterCIDR, _ := utilnet.ParseCIDRSloppy(clusterCIDRStr) + a, err := NewMultiCIDRSet(clusterCIDR, 8) + if err != nil { + t.Fatalf("Error allocating CIDRSet") + } + + // Execute the operations. + for _, op := range operations { + _, cidr, _ := utilnet.ParseCIDRSloppy(op.cidrStr) + switch op.operation { + case "occupy": + a.Occupy(cidr) + case "release": + a.Release(cidr) + default: + t.Fatalf("test error: unknown operation %v", op.operation) + } + if a.allocatedCIDRs != op.numOccupied { + t.Fatalf("CIDR %v Expected %d occupied CIDRS, got %d", cidr, op.numOccupied, a.allocatedCIDRs) + } + } + + // Make sure that we can allocate exactly `numAllocatable24s` elements. + for i := 0; i < numAllocatable24s; i++ { + _, err := allocateNext(a) + if err != nil { + t.Fatalf("Expected to be able to allocate %d CIDRS, failed after %d", numAllocatable24s, i) + } + } + + _, err = allocateNext(a) + if err == nil { + t.Fatalf("Expected to be able to allocate exactly %d CIDRS, got one more", numAllocatable24s) + } +} + +func TestGetBitforCIDR(t *testing.T) { + cases := []struct { + clusterCIDRStr string + perNodeHostBits int + subNetCIDRStr string + expectedBit int + expectErr bool + description string + }{ + { + clusterCIDRStr: "127.0.0.0/8", + perNodeHostBits: 16, + subNetCIDRStr: "127.0.0.0/16", + expectedBit: 0, + expectErr: false, + description: "Get 0 Bit with IPv4", + }, + { + clusterCIDRStr: "be00::/8", + perNodeHostBits: 112, + subNetCIDRStr: "be00::/16", + expectedBit: 0, + expectErr: false, + description: "Get 0 Bit with IPv6", + }, + { + clusterCIDRStr: "127.0.0.0/8", + perNodeHostBits: 16, + subNetCIDRStr: "127.123.0.0/16", + expectedBit: 123, + expectErr: false, + description: "Get 123rd Bit with IPv4", + }, + { + clusterCIDRStr: "be00::/8", + perNodeHostBits: 112, + subNetCIDRStr: "beef::/16", + expectedBit: 0xef, + expectErr: false, + description: "Get xef Bit with IPv6", + }, + { + clusterCIDRStr: "127.0.0.0/8", + perNodeHostBits: 16, + subNetCIDRStr: "127.168.0.0/16", + expectedBit: 168, + expectErr: false, + description: "Get 168th Bit with IPv4", + }, + { + clusterCIDRStr: "be00::/8", + perNodeHostBits: 112, + subNetCIDRStr: "be68::/16", + expectedBit: 0x68, + expectErr: false, + description: "Get x68th Bit with IPv6", + }, + { + clusterCIDRStr: "127.0.0.0/8", + perNodeHostBits: 16, + subNetCIDRStr: "127.224.0.0/16", + expectedBit: 224, + expectErr: false, + description: "Get 224th Bit with IPv4", + }, + { + clusterCIDRStr: "be00::/8", + perNodeHostBits: 112, + subNetCIDRStr: "be24::/16", + expectedBit: 0x24, + expectErr: false, + description: "Get x24th Bit with IPv6", + }, + { + clusterCIDRStr: "192.168.0.0/16", + perNodeHostBits: 8, + subNetCIDRStr: "192.168.12.0/24", + expectedBit: 12, + expectErr: false, + description: "Get 12th Bit with IPv4", + }, + { + clusterCIDRStr: "beef::/16", + perNodeHostBits: 104, + subNetCIDRStr: "beef:1200::/24", + expectedBit: 0x12, + expectErr: false, + description: "Get x12th Bit with IPv6", + }, + { + clusterCIDRStr: "192.168.0.0/16", + perNodeHostBits: 8, + subNetCIDRStr: "192.168.151.0/24", + expectedBit: 151, + expectErr: false, + description: "Get 151st Bit with IPv4", + }, + { + clusterCIDRStr: "beef::/16", + perNodeHostBits: 104, + subNetCIDRStr: "beef:9700::/24", + expectedBit: 0x97, + expectErr: false, + description: "Get x97st Bit with IPv6", + }, + { + clusterCIDRStr: "192.168.0.0/16", + perNodeHostBits: 8, + subNetCIDRStr: "127.168.224.0/24", + expectErr: true, + description: "Get error with IPv4", + }, + { + clusterCIDRStr: "beef::/16", + perNodeHostBits: 104, + subNetCIDRStr: "2001:db00::/24", + expectErr: true, + description: "Get error with IPv6", + }, + } + + for _, tc := range cases { + _, clusterCIDR, err := utilnet.ParseCIDRSloppy(tc.clusterCIDRStr) + if err != nil { + t.Fatalf("unexpected error: %v for %v", err, tc.description) + } + + cs, err := NewMultiCIDRSet(clusterCIDR, tc.perNodeHostBits) + if err != nil { + t.Fatalf("Error allocating CIDRSet for %v", tc.description) + } + _, subnetCIDR, err := utilnet.ParseCIDRSloppy(tc.subNetCIDRStr) + if err != nil { + t.Fatalf("unexpected error: %v for %v", err, tc.description) + } + + got, err := cs.getIndexForCIDR(subnetCIDR) + if err == nil && tc.expectErr { + klog.Errorf("expected error but got null for %v", tc.description) + continue + } + + if err != nil && !tc.expectErr { + klog.Errorf("unexpected error: %v for %v", err, tc.description) + continue + } + + if got != tc.expectedBit { + klog.Errorf("expected %v, but got %v for %v", tc.expectedBit, got, tc.description) + } + } +} + +func TestCIDRSetv6(t *testing.T) { + cases := []struct { + clusterCIDRStr string + perNodeHostBits int + expectedCIDR string + expectedCIDR2 string + expectErr bool + description string + }{ + { + clusterCIDRStr: "127.0.0.0/8", + perNodeHostBits: 0, + expectErr: false, + expectedCIDR: "127.0.0.0/32", + expectedCIDR2: "127.0.0.1/32", + description: "Max cluster subnet size with IPv4", + }, + { + clusterCIDRStr: "beef:1234::/32", + perNodeHostBits: 79, + expectErr: true, + description: "Max cluster subnet size with IPv6", + }, + { + clusterCIDRStr: "2001:beef:1234:369b::/60", + perNodeHostBits: 64, + expectedCIDR: "2001:beef:1234:3690::/64", + expectedCIDR2: "2001:beef:1234:3691::/64", + expectErr: false, + description: "Allocate a few IPv6", + }, + } + for _, tc := range cases { + t.Run(tc.description, func(t *testing.T) { + _, clusterCIDR, _ := utilnet.ParseCIDRSloppy(tc.clusterCIDRStr) + a, err := NewMultiCIDRSet(clusterCIDR, tc.perNodeHostBits) + if gotErr := err != nil; gotErr != tc.expectErr { + t.Fatalf("NewMultiCIDRSet(%v, %v) = %v, %v; gotErr = %t, want %t", clusterCIDR, tc.perNodeHostBits, a, err, gotErr, tc.expectErr) + } + if a == nil { + return + } + p, err := allocateNext(a) + if err == nil && tc.expectErr { + t.Errorf("allocateNext(a) = nil, want error") + } + if err != nil && !tc.expectErr { + t.Errorf("allocateNext(a) = %+v, want no error", err) + } + if !tc.expectErr { + if p != nil && p.String() != tc.expectedCIDR { + t.Fatalf("allocateNext(a) got %+v, want %+v", p.String(), tc.expectedCIDR) + } + } + p2, err := allocateNext(a) + if err == nil && tc.expectErr { + t.Errorf("allocateNext(a) = nil, want error") + } + if err != nil && !tc.expectErr { + t.Errorf("allocateNext(a) = %+v, want no error", err) + } + if !tc.expectErr { + if p2 != nil && p2.String() != tc.expectedCIDR2 { + t.Fatalf("allocateNext(a) got %+v, want %+v", p2.String(), tc.expectedCIDR) + } + } + }) + } +} + +func TestMultiCIDRSetMetrics(t *testing.T) { + cidr := "10.0.0.0/16" + _, clusterCIDR, _ := utilnet.ParseCIDRSloppy(cidr) + // We have 256 free cidrs + a, err := NewMultiCIDRSet(clusterCIDR, 8) + if err != nil { + t.Fatalf("unexpected error creating MultiCIDRSet: %v", err) + } + clearMetrics(map[string]string{"clusterCIDR": cidr}) + + // Allocate next all. + for i := 1; i <= 256; i++ { + _, err := allocateNext(a) + if err != nil { + t.Fatalf("unexpected error allocating a new CIDR: %v", err) + } + em := testMetrics{ + usage: float64(i) / float64(256), + allocs: float64(i), + releases: 0, + allocTries: 0, + } + expectMetrics(t, cidr, em) + } + // Release all CIDRs. + a.Release(clusterCIDR) + em := testMetrics{ + usage: 0, + allocs: 256, + releases: 256, + allocTries: 0, + } + expectMetrics(t, cidr, em) + + // Allocate all CIDRs. + a.Occupy(clusterCIDR) + em = testMetrics{ + usage: 1, + allocs: 512, + releases: 256, + allocTries: 0, + } + expectMetrics(t, cidr, em) + +} + +func TestMultiCIDRSetMetricsHistogram(t *testing.T) { + cidr := "10.0.0.0/16" + _, clusterCIDR, _ := utilnet.ParseCIDRSloppy(cidr) + // We have 256 free cidrs. + a, err := NewMultiCIDRSet(clusterCIDR, 8) + if err != nil { + t.Fatalf("unexpected error creating MultiCIDRSet: %v", err) + } + clearMetrics(map[string]string{"clusterCIDR": cidr}) + + // Allocate half of the range. + // Occupy does not update the nextCandidate. + _, halfClusterCIDR, _ := utilnet.ParseCIDRSloppy("10.0.0.0/17") + a.Occupy(halfClusterCIDR) + em := testMetrics{ + usage: 0.5, + allocs: 128, + releases: 0, + } + expectMetrics(t, cidr, em) + // Allocate next should iterate until the next free cidr + // that is exactly the same number we allocated previously. + _, err = allocateNext(a) + if err != nil { + t.Fatalf("unexpected error allocating a new CIDR: %v", err) + } + em = testMetrics{ + usage: float64(129) / float64(256), + allocs: 129, + releases: 0, + } + expectMetrics(t, cidr, em) +} + +func TestMultiCIDRSetMetricsDual(t *testing.T) { + // create IPv4 cidrSet. + cidrIPv4 := "10.0.0.0/16" + _, clusterCIDRv4, _ := utilnet.ParseCIDRSloppy(cidrIPv4) + a, err := NewMultiCIDRSet(clusterCIDRv4, 8) + if err != nil { + t.Fatalf("unexpected error creating MultiCIDRSet: %v", err) + } + clearMetrics(map[string]string{"clusterCIDR": cidrIPv4}) + // create IPv6 cidrSet. + cidrIPv6 := "2001:db8::/48" + _, clusterCIDRv6, _ := utilnet.ParseCIDRSloppy(cidrIPv6) + b, err := NewMultiCIDRSet(clusterCIDRv6, 64) + if err != nil { + t.Fatalf("unexpected error creating MultiCIDRSet: %v", err) + } + clearMetrics(map[string]string{"clusterCIDR": cidrIPv6}) + // Allocate all. + a.Occupy(clusterCIDRv4) + em := testMetrics{ + usage: 1, + allocs: 256, + releases: 0, + allocTries: 0, + } + expectMetrics(t, cidrIPv4, em) + + b.Occupy(clusterCIDRv6) + em = testMetrics{ + usage: 1, + allocs: 65536, + releases: 0, + allocTries: 0, + } + expectMetrics(t, cidrIPv6, em) + + // Release all. + a.Release(clusterCIDRv4) + em = testMetrics{ + usage: 0, + allocs: 256, + releases: 256, + allocTries: 0, + } + expectMetrics(t, cidrIPv4, em) + b.Release(clusterCIDRv6) + em = testMetrics{ + usage: 0, + allocs: 65536, + releases: 65536, + allocTries: 0, + } + expectMetrics(t, cidrIPv6, em) + +} + +// Metrics helpers. +func clearMetrics(labels map[string]string) { + cidrSetAllocations.Delete(labels) + cidrSetReleases.Delete(labels) + cidrSetUsage.Delete(labels) + cidrSetAllocationTriesPerRequest.Delete(labels) +} + +type testMetrics struct { + usage float64 + allocs float64 + releases float64 + allocTries float64 +} + +func expectMetrics(t *testing.T, label string, em testMetrics) { + var m testMetrics + var err error + m.usage, err = testutil.GetGaugeMetricValue(cidrSetUsage.WithLabelValues(label)) + if err != nil { + t.Errorf("failed to get %s value, err: %v", cidrSetUsage.Name, err) + } + m.allocs, err = testutil.GetCounterMetricValue(cidrSetAllocations.WithLabelValues(label)) + if err != nil { + t.Errorf("failed to get %s value, err: %v", cidrSetAllocations.Name, err) + } + m.releases, err = testutil.GetCounterMetricValue(cidrSetReleases.WithLabelValues(label)) + if err != nil { + t.Errorf("failed to get %s value, err: %v", cidrSetReleases.Name, err) + } + m.allocTries, err = testutil.GetHistogramMetricValue(cidrSetAllocationTriesPerRequest.WithLabelValues(label)) + if err != nil { + t.Errorf("failed to get %s value, err: %v", cidrSetAllocationTriesPerRequest.Name, err) + } + + if m != em { + t.Fatalf("metrics error: expected %v, received %v", em, m) + } +} + +// Benchmarks +func benchmarkAllocateAllIPv6(cidr string, perNodeHostBits int, b *testing.B) { + _, clusterCIDR, _ := utilnet.ParseCIDRSloppy(cidr) + a, _ := NewMultiCIDRSet(clusterCIDR, perNodeHostBits) + for n := 0; n < b.N; n++ { + // Allocate the whole range + 1. + for i := 0; i <= a.MaxCIDRs; i++ { + allocateNext(a) + } + // Release all. + a.Release(clusterCIDR) + } +} + +func BenchmarkAllocateAll_48_52(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/48", 52, b) } +func BenchmarkAllocateAll_48_56(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/48", 56, b) } + +func BenchmarkAllocateAll_48_60(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/48", 60, b) } +func BenchmarkAllocateAll_48_64(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/48", 64, b) } + +func BenchmarkAllocateAll_64_68(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/64", 68, b) } + +func BenchmarkAllocateAll_64_72(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/64", 72, b) } +func BenchmarkAllocateAll_64_76(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/64", 76, b) } + +func BenchmarkAllocateAll_64_80(b *testing.B) { benchmarkAllocateAllIPv6("2001:db8::/64", 80, b) } diff --git a/pkg/controller/nodeipam/ipam/range_allocator.go b/pkg/controller/nodeipam/ipam/range_allocator.go index 3cf6794b228..54a0db9f51f 100644 --- a/pkg/controller/nodeipam/ipam/range_allocator.go +++ b/pkg/controller/nodeipam/ipam/range_allocator.go @@ -41,13 +41,6 @@ import ( controllerutil "k8s.io/kubernetes/pkg/controller/util/node" ) -// cidrs are reserved, then node resource is patched with them -// this type holds the reservation info for a node -type nodeReservedCIDRs struct { - allocatedCIDRs []*net.IPNet - nodeName string -} - type rangeAllocator struct { client clientset.Interface // cluster cidrs as passed in during controller creation @@ -333,7 +326,7 @@ func (r *rangeAllocator) updateCIDRsAllocation(data nodeReservedCIDRs) error { var err error var node *v1.Node defer r.removeNodeFromProcessing(data.nodeName) - cidrsString := cidrsAsString(data.allocatedCIDRs) + cidrsString := ipnetToStringList(data.allocatedCIDRs) node, err = r.nodeLister.Get(data.nodeName) if err != nil { klog.Errorf("Failed while getting node %v for updating Node.Spec.PodCIDRs: %v", data.nodeName, err) @@ -391,12 +384,3 @@ func (r *rangeAllocator) updateCIDRsAllocation(data nodeReservedCIDRs) error { } return err } - -// converts a slice of cidrs into ,, -func cidrsAsString(inCIDRs []*net.IPNet) []string { - outCIDRs := make([]string, len(inCIDRs)) - for idx, inCIDR := range inCIDRs { - outCIDRs[idx] = inCIDR.String() - } - return outCIDRs -} diff --git a/pkg/controller/nodeipam/ipam/range_allocator_test.go b/pkg/controller/nodeipam/ipam/range_allocator_test.go index 0e7c452e01f..b0dd1c32f47 100644 --- a/pkg/controller/nodeipam/ipam/range_allocator_test.go +++ b/pkg/controller/nodeipam/ipam/range_allocator_test.go @@ -25,40 +25,12 @@ import ( v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/informers" - coreinformers "k8s.io/client-go/informers/core/v1" "k8s.io/client-go/kubernetes/fake" - "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam/test" "k8s.io/kubernetes/pkg/controller/testutil" netutils "k8s.io/utils/net" ) -const testNodePollInterval = 10 * time.Millisecond - -var alwaysReady = func() bool { return true } - -func waitForUpdatedNodeWithTimeout(nodeHandler *testutil.FakeNodeHandler, number int, timeout time.Duration) error { - return wait.Poll(nodePollInterval, timeout, func() (bool, error) { - if len(nodeHandler.GetUpdatedNodesCopy()) >= number { - return true, nil - } - return false, nil - }) -} - -// Creates a fakeNodeInformer using the provided fakeNodeHandler. -func getFakeNodeInformer(fakeNodeHandler *testutil.FakeNodeHandler) coreinformers.NodeInformer { - fakeClient := &fake.Clientset{} - fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc()) - fakeNodeInformer := fakeInformerFactory.Core().V1().Nodes() - - for _, node := range fakeNodeHandler.Existing { - fakeNodeInformer.Informer().GetStore().Add(node) - } - - return fakeNodeInformer -} - type testCase struct { description string fakeNodeHandler *testutil.FakeNodeHandler @@ -305,7 +277,7 @@ func TestOccupyPreExistingCIDR(t *testing.T) { for _, tc := range testCases { t.Run(tc.description, func(t *testing.T) { // Initialize the range allocator. - fakeNodeInformer := getFakeNodeInformer(tc.fakeNodeHandler) + fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler) nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{}) _, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList) if err == nil && tc.ctrlCreateFail { @@ -321,7 +293,7 @@ func TestOccupyPreExistingCIDR(t *testing.T) { func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { // Non-parallel test (overrides global var) oldNodePollInterval := nodePollInterval - nodePollInterval = testNodePollInterval + nodePollInterval = test.NodePollInterval defer func() { nodePollInterval = oldNodePollInterval }() @@ -537,7 +509,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { // test function testFunc := func(tc testCase) { - fakeNodeInformer := getFakeNodeInformer(tc.fakeNodeHandler) + fakeNodeInformer := test.FakeNodeInformer(tc.fakeNodeHandler) nodeList, _ := tc.fakeNodeHandler.List(context.TODO(), metav1.ListOptions{}) // Initialize the range allocator. allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, fakeNodeInformer, tc.allocatorParams, nodeList) @@ -550,7 +522,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) return } - rangeAllocator.nodesSynced = alwaysReady + rangeAllocator.nodesSynced = test.AlwaysReady rangeAllocator.recorder = testutil.NewFakeRecorder() go allocator.Run(wait.NeverStop) @@ -580,7 +552,7 @@ func TestAllocateOrOccupyCIDRSuccess(t *testing.T) { if updateCount != 1 { t.Fatalf("test error: all tests must update exactly one node") } - if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, updateCount, wait.ForeverTestTimeout); err != nil { + if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, updateCount, wait.ForeverTestTimeout); err != nil { t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err) } @@ -639,7 +611,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) { testFunc := func(tc testCase) { // Initialize the range allocator. - allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil) + allocator, err := NewCIDRRangeAllocator(tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil) if err != nil { t.Logf("%v: failed to create CIDRRangeAllocator with error %v", tc.description, err) } @@ -648,7 +620,7 @@ func TestAllocateOrOccupyCIDRFailure(t *testing.T) { t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) return } - rangeAllocator.nodesSynced = alwaysReady + rangeAllocator.nodesSynced = test.AlwaysReady rangeAllocator.recorder = testutil.NewFakeRecorder() go allocator.Run(wait.NeverStop) @@ -708,7 +680,7 @@ type releaseTestCase struct { func TestReleaseCIDRSuccess(t *testing.T) { // Non-parallel test (overrides global var) oldNodePollInterval := nodePollInterval - nodePollInterval = testNodePollInterval + nodePollInterval = test.NodePollInterval defer func() { nodePollInterval = oldNodePollInterval }() @@ -784,13 +756,13 @@ func TestReleaseCIDRSuccess(t *testing.T) { testFunc := func(tc releaseTestCase) { // Initialize the range allocator. - allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, getFakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil) + allocator, _ := NewCIDRRangeAllocator(tc.fakeNodeHandler, test.FakeNodeInformer(tc.fakeNodeHandler), tc.allocatorParams, nil) rangeAllocator, ok := allocator.(*rangeAllocator) if !ok { t.Logf("%v: found non-default implementation of CIDRAllocator, skipping white-box test...", tc.description) return } - rangeAllocator.nodesSynced = alwaysReady + rangeAllocator.nodesSynced = test.AlwaysReady rangeAllocator.recorder = testutil.NewFakeRecorder() go allocator.Run(wait.NeverStop) @@ -813,7 +785,7 @@ func TestReleaseCIDRSuccess(t *testing.T) { if err != nil { t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) } - if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil { + if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil { t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err) } } else { @@ -841,7 +813,7 @@ func TestReleaseCIDRSuccess(t *testing.T) { if err = allocator.AllocateOrOccupyCIDR(tc.fakeNodeHandler.Existing[0]); err != nil { t.Fatalf("%v: unexpected error in AllocateOrOccupyCIDR: %v", tc.description, err) } - if err := waitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil { + if err := test.WaitForUpdatedNodeWithTimeout(tc.fakeNodeHandler, 1, wait.ForeverTestTimeout); err != nil { t.Fatalf("%v: timeout while waiting for Node update: %v", tc.description, err) } diff --git a/pkg/controller/nodeipam/ipam/test/utils.go b/pkg/controller/nodeipam/ipam/test/utils.go index 42242e1899b..2586484e975 100644 --- a/pkg/controller/nodeipam/ipam/test/utils.go +++ b/pkg/controller/nodeipam/ipam/test/utils.go @@ -18,10 +18,21 @@ package test import ( "net" + "time" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/informers" + coreinformers "k8s.io/client-go/informers/core/v1" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/kubernetes/pkg/controller" + "k8s.io/kubernetes/pkg/controller/testutil" netutils "k8s.io/utils/net" ) +const NodePollInterval = 10 * time.Millisecond + +var AlwaysReady = func() bool { return true } + // MustParseCIDR returns the CIDR range parsed from s or panics if the string // cannot be parsed. func MustParseCIDR(s string) *net.IPNet { @@ -31,3 +42,25 @@ func MustParseCIDR(s string) *net.IPNet { } return ret } + +// FakeNodeInformer creates a fakeNodeInformer using the provided fakeNodeHandler. +func FakeNodeInformer(fakeNodeHandler *testutil.FakeNodeHandler) coreinformers.NodeInformer { + fakeClient := &fake.Clientset{} + fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc()) + fakeNodeInformer := fakeInformerFactory.Core().V1().Nodes() + + for _, node := range fakeNodeHandler.Existing { + fakeNodeInformer.Informer().GetStore().Add(node) + } + + return fakeNodeInformer +} + +func WaitForUpdatedNodeWithTimeout(nodeHandler *testutil.FakeNodeHandler, number int, timeout time.Duration) error { + return wait.Poll(NodePollInterval, timeout, func() (bool, error) { + if len(nodeHandler.GetUpdatedNodesCopy()) >= number { + return true, nil + } + return false, nil + }) +} diff --git a/pkg/controller/nodeipam/node_ipam_controller.go b/pkg/controller/nodeipam/node_ipam_controller.go index 3a3c8ce1919..2cae14bf532 100644 --- a/pkg/controller/nodeipam/node_ipam_controller.go +++ b/pkg/controller/nodeipam/node_ipam_controller.go @@ -20,20 +20,18 @@ import ( "net" "time" - "k8s.io/klog/v2" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - + coreinformers "k8s.io/client-go/informers/core/v1" + networkinginformers "k8s.io/client-go/informers/networking/v1alpha1" + clientset "k8s.io/client-go/kubernetes" v1core "k8s.io/client-go/kubernetes/typed/core/v1" + corelisters "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" - - coreinformers "k8s.io/client-go/informers/core/v1" - clientset "k8s.io/client-go/kubernetes" - corelisters "k8s.io/client-go/listers/core/v1" cloudprovider "k8s.io/cloud-provider" controllersmetrics "k8s.io/component-base/metrics/prometheus/controllers" "k8s.io/component-base/metrics/prometheus/ratelimiter" + "k8s.io/klog/v2" "k8s.io/kubernetes/pkg/controller/nodeipam/ipam" ) @@ -74,6 +72,7 @@ type Controller struct { // currently, this should be handled as a fatal error. func NewNodeIpamController( nodeInformer coreinformers.NodeInformer, + clusterCIDRInformer networkinginformers.ClusterCIDRInformer, cloud cloudprovider.Interface, kubeClient clientset.Interface, clusterCIDRs []*net.IPNet, @@ -136,7 +135,7 @@ func NewNodeIpamController( NodeCIDRMaskSizes: nodeCIDRMaskSizes, } - ic.cidrAllocator, err = ipam.New(kubeClient, cloud, nodeInformer, ic.allocatorType, allocatorParams) + ic.cidrAllocator, err = ipam.New(kubeClient, cloud, nodeInformer, clusterCIDRInformer, ic.allocatorType, allocatorParams) if err != nil { return nil, err } diff --git a/pkg/controller/nodeipam/node_ipam_controller_test.go b/pkg/controller/nodeipam/node_ipam_controller_test.go index 48e850b9e78..ad4b433789f 100644 --- a/pkg/controller/nodeipam/node_ipam_controller_test.go +++ b/pkg/controller/nodeipam/node_ipam_controller_test.go @@ -48,6 +48,7 @@ func newTestNodeIpamController(clusterCIDR []*net.IPNet, serviceCIDR *net.IPNet, fakeClient := &fake.Clientset{} fakeInformerFactory := informers.NewSharedInformerFactory(fakeClient, controller.NoResyncPeriodFunc()) fakeNodeInformer := fakeInformerFactory.Core().V1().Nodes() + fakeClusterCIDRInformer := fakeInformerFactory.Networking().V1alpha1().ClusterCIDRs() for _, node := range fakeNodeHandler.Existing { fakeNodeInformer.Informer().GetStore().Add(node) @@ -55,7 +56,7 @@ func newTestNodeIpamController(clusterCIDR []*net.IPNet, serviceCIDR *net.IPNet, fakeGCE := gce.NewFakeGCECloud(gce.DefaultTestClusterValues()) return NewNodeIpamController( - fakeNodeInformer, fakeGCE, clientSet, + fakeNodeInformer, fakeClusterCIDRInformer, fakeGCE, clientSet, clusterCIDR, serviceCIDR, secondaryServiceCIDR, nodeCIDRMaskSizes, allocatorType, ) } @@ -78,6 +79,9 @@ func TestNewNodeIpamControllerWithCIDRMasks(t *testing.T) { {"valid_range_allocator_dualstack", "10.0.0.0/21,2000::/10", "10.1.0.0/21", emptyServiceCIDR, []int{24, 98}, ipam.RangeAllocatorType, false}, {"valid_range_allocator_dualstack_dualstackservice", "10.0.0.0/21,2000::/10", "10.1.0.0/21", "3000::/10", []int{24, 98}, ipam.RangeAllocatorType, false}, + {"valid_multi_cidr_range_allocator", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.MultiCIDRRangeAllocatorType, false}, + {"valid_multi_cidr_range_allocator_dualstack", "10.0.0.0/21,2000::/10", "10.1.0.0/21", emptyServiceCIDR, []int{24, 98}, ipam.MultiCIDRRangeAllocatorType, false}, + {"valid_cloud_allocator", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.CloudAllocatorType, false}, {"valid_ipam_from_cluster", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.IPAMFromClusterAllocatorType, false}, {"valid_ipam_from_cloud", "10.0.0.0/21", "10.1.0.0/21", emptyServiceCIDR, []int{24}, ipam.IPAMFromCloudAllocatorType, false}, diff --git a/pkg/controlplane/instance.go b/pkg/controlplane/instance.go index 2eca86aba69..0718a061881 100644 --- a/pkg/controlplane/instance.go +++ b/pkg/controlplane/instance.go @@ -45,6 +45,7 @@ import ( eventsv1beta1 "k8s.io/api/events/v1beta1" flowcontrolv1alpha1 "k8s.io/api/flowcontrol/v1alpha1" networkingapiv1 "k8s.io/api/networking/v1" + networkingapiv1alpha1 "k8s.io/api/networking/v1alpha1" nodev1 "k8s.io/api/node/v1" nodev1beta1 "k8s.io/api/node/v1beta1" policyapiv1 "k8s.io/api/policy/v1" @@ -689,6 +690,7 @@ var ( // alphaAPIGroupVersionsDisabledByDefault holds the alpha APIs we have. They are always disabled by default. alphaAPIGroupVersionsDisabledByDefault = []schema.GroupVersion{ apiserverinternalv1alpha1.SchemeGroupVersion, + networkingapiv1alpha1.SchemeGroupVersion, storageapiv1alpha1.SchemeGroupVersion, flowcontrolv1alpha1.SchemeGroupVersion, } diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go index dc8b8b0a467..5a0dbd7e81a 100644 --- a/pkg/features/kube_features.go +++ b/pkg/features/kube_features.go @@ -588,6 +588,13 @@ const ( // Enables the usage of different protocols in the same Service with type=LoadBalancer MixedProtocolLBService featuregate.Feature = "MixedProtocolLBService" + // owner: @sarveshr7 + // kep: http://kep.k8s.io/2593 + // alpha: v1.25 + // + // Enables the MultiCIDR Range allocator. + MultiCIDRRangeAllocator featuregate.Feature = "MultiCIDRRangeAllocator" + // owner: @rikatz // kep: http://kep.k8s.io/2079 // alpha: v1.21 @@ -1042,6 +1049,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS MixedProtocolLBService: {Default: true, PreRelease: featuregate.Beta}, + MultiCIDRRangeAllocator: {Default: false, PreRelease: featuregate.Alpha}, + NetworkPolicyEndPort: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 NetworkPolicyStatus: {Default: false, PreRelease: featuregate.Alpha}, diff --git a/pkg/generated/openapi/zz_generated.openapi.go b/pkg/generated/openapi/zz_generated.openapi.go index 16db8f7c453..b2816df6974 100644 --- a/pkg/generated/openapi/zz_generated.openapi.go +++ b/pkg/generated/openapi/zz_generated.openapi.go @@ -687,6 +687,9 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA "k8s.io/api/networking/v1.NetworkPolicySpec": schema_k8sio_api_networking_v1_NetworkPolicySpec(ref), "k8s.io/api/networking/v1.NetworkPolicyStatus": schema_k8sio_api_networking_v1_NetworkPolicyStatus(ref), "k8s.io/api/networking/v1.ServiceBackendPort": schema_k8sio_api_networking_v1_ServiceBackendPort(ref), + "k8s.io/api/networking/v1alpha1.ClusterCIDR": schema_k8sio_api_networking_v1alpha1_ClusterCIDR(ref), + "k8s.io/api/networking/v1alpha1.ClusterCIDRList": schema_k8sio_api_networking_v1alpha1_ClusterCIDRList(ref), + "k8s.io/api/networking/v1alpha1.ClusterCIDRSpec": schema_k8sio_api_networking_v1alpha1_ClusterCIDRSpec(ref), "k8s.io/api/networking/v1beta1.HTTPIngressPath": schema_k8sio_api_networking_v1beta1_HTTPIngressPath(ref), "k8s.io/api/networking/v1beta1.HTTPIngressRuleValue": schema_k8sio_api_networking_v1beta1_HTTPIngressRuleValue(ref), "k8s.io/api/networking/v1beta1.Ingress": schema_k8sio_api_networking_v1beta1_Ingress(ref), @@ -34339,6 +34342,146 @@ func schema_k8sio_api_networking_v1_ServiceBackendPort(ref common.ReferenceCallb } } +func schema_k8sio_api_networking_v1alpha1_ClusterCIDR(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "spec": { + SchemaProps: spec.SchemaProps{ + Description: "Spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/networking/v1alpha1.ClusterCIDRSpec"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/api/networking/v1alpha1.ClusterCIDRSpec", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"}, + } +} + +func schema_k8sio_api_networking_v1alpha1_ClusterCIDRList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ClusterCIDRList contains a list of ClusterCIDR.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Description: "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + Default: map[string]interface{}{}, + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Description: "Items is the list of ClusterCIDRs.", + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: map[string]interface{}{}, + Ref: ref("k8s.io/api/networking/v1alpha1.ClusterCIDR"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/networking/v1alpha1.ClusterCIDR", "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"}, + } +} + +func schema_k8sio_api_networking_v1alpha1_ClusterCIDRSpec(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "ClusterCIDRSpec defines the desired state of ClusterCIDR.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "nodeSelector": { + SchemaProps: spec.SchemaProps{ + Description: "NodeSelector defines which nodes the config is applicable to. An empty or nil NodeSelector selects all nodes. This field is immutable.", + Ref: ref("k8s.io/api/core/v1.NodeSelector"), + }, + }, + "perNodeHostBits": { + SchemaProps: spec.SchemaProps{ + Description: "PerNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", + Default: 0, + Type: []string{"integer"}, + Format: "int32", + }, + }, + "ipv4": { + SchemaProps: spec.SchemaProps{ + Description: "IPv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + "ipv6": { + SchemaProps: spec.SchemaProps{ + Description: "IPv6 defines an IPv6 IP block in CIDR notation(e.g. \"fd12:3456:789a:1::/64\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.", + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"perNodeHostBits"}, + }, + }, + Dependencies: []string{ + "k8s.io/api/core/v1.NodeSelector"}, + } +} + func schema_k8sio_api_networking_v1beta1_HTTPIngressPath(ref common.ReferenceCallback) common.OpenAPIDefinition { return common.OpenAPIDefinition{ Schema: spec.Schema{ diff --git a/pkg/kubeapiserver/default_storage_factory_builder.go b/pkg/kubeapiserver/default_storage_factory_builder.go index 8c204a094b1..0d1ac4c8caf 100644 --- a/pkg/kubeapiserver/default_storage_factory_builder.go +++ b/pkg/kubeapiserver/default_storage_factory_builder.go @@ -71,6 +71,7 @@ func NewStorageFactoryConfig() *StorageFactoryConfig { // // TODO (https://github.com/kubernetes/kubernetes/issues/108451): remove the override in 1.25. // apisstorage.Resource("csistoragecapacities").WithVersion("v1beta1"), + networking.Resource("clustercidrs").WithVersion("v1alpha1"), } return &StorageFactoryConfig{ diff --git a/pkg/printers/internalversion/printers.go b/pkg/printers/internalversion/printers.go index 18d27440a35..bb7385482d2 100644 --- a/pkg/printers/internalversion/printers.go +++ b/pkg/printers/internalversion/printers.go @@ -37,6 +37,7 @@ import ( discoveryv1beta1 "k8s.io/api/discovery/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" schedulingv1 "k8s.io/api/scheduling/v1" @@ -591,6 +592,18 @@ func AddHandlers(h printers.PrintHandler) { {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, } h.TableHandler(scaleColumnDefinitions, printScale) + + clusterCIDRColumnDefinitions := []metav1.TableColumnDefinition{ + {Name: "Name", Type: "string", Format: "name", Description: metav1.ObjectMeta{}.SwaggerDoc()["name"]}, + {Name: "PerNodeHostBits", Type: "string", Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["perNodeHostBits"]}, + {Name: "IPv4", Type: "string", Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["ipv4"]}, + {Name: "IPv6", Type: "string", Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["ipv6"]}, + {Name: "Age", Type: "string", Description: metav1.ObjectMeta{}.SwaggerDoc()["creationTimestamp"]}, + {Name: "NodeSelector", Type: "string", Priority: 1, Description: networkingv1alpha1.ClusterCIDRSpec{}.SwaggerDoc()["nodeSelector"]}, + } + + h.TableHandler(clusterCIDRColumnDefinitions, printClusterCIDR) + h.TableHandler(clusterCIDRColumnDefinitions, printClusterCIDRList) } // Pass ports=nil for all ports. @@ -2624,6 +2637,57 @@ func printPriorityLevelConfigurationList(list *flowcontrol.PriorityLevelConfigur return rows, nil } +func printClusterCIDR(obj *networking.ClusterCIDR, options printers.GenerateOptions) ([]metav1.TableRow, error) { + row := metav1.TableRow{ + Object: runtime.RawExtension{Object: obj}, + } + ipv4 := "" + ipv6 := "" + + if obj.Spec.IPv4 != "" { + ipv4 = obj.Spec.IPv4 + } + if obj.Spec.IPv6 != "" { + ipv6 = obj.Spec.IPv6 + } + + row.Cells = append(row.Cells, obj.Name, fmt.Sprint(obj.Spec.PerNodeHostBits), ipv4, ipv6, translateTimestampSince(obj.CreationTimestamp)) + if options.Wide { + nodeSelector := "" + if obj.Spec.NodeSelector != nil { + allTerms := make([]string, 0) + for _, term := range obj.Spec.NodeSelector.NodeSelectorTerms { + if len(term.MatchExpressions) > 0 { + matchExpressions := fmt.Sprintf("MatchExpressions: %v", term.MatchExpressions) + allTerms = append(allTerms, matchExpressions) + } + + if len(term.MatchFields) > 0 { + matchFields := fmt.Sprintf("MatchFields: %v", term.MatchFields) + allTerms = append(allTerms, matchFields) + } + } + nodeSelector = strings.Join(allTerms, ",") + } + + row.Cells = append(row.Cells, nodeSelector) + } + + return []metav1.TableRow{row}, nil +} + +func printClusterCIDRList(list *networking.ClusterCIDRList, options printers.GenerateOptions) ([]metav1.TableRow, error) { + rows := make([]metav1.TableRow, 0, len(list.Items)) + for i := range list.Items { + r, err := printClusterCIDR(&list.Items[i], options) + if err != nil { + return nil, err + } + rows = append(rows, r...) + } + return rows, nil +} + func printScale(obj *autoscaling.Scale, options printers.GenerateOptions) ([]metav1.TableRow, error) { row := metav1.TableRow{ Object: runtime.RawExtension{Object: obj}, diff --git a/pkg/printers/internalversion/printers_test.go b/pkg/printers/internalversion/printers_test.go index 0301a0d27b8..d21da9bff15 100644 --- a/pkg/printers/internalversion/printers_test.go +++ b/pkg/printers/internalversion/printers_test.go @@ -6184,3 +6184,277 @@ func TestTableRowDeepCopyShouldNotPanic(t *testing.T) { }) } } + +func TestPrintClusterCIDR(t *testing.T) { + ipv4CIDR := "10.1.0.0/16" + perNodeHostBits := int32(8) + ipv6CIDR := "fd00:1:1::/64" + + tests := []struct { + ccc networking.ClusterCIDR + options printers.GenerateOptions + expected []metav1.TableRow + }{ + { + // Test name, IPv4 only with no node selector. + ccc: networking.ClusterCIDR{ + ObjectMeta: metav1.ObjectMeta{Name: "test1"}, + Spec: networking.ClusterCIDRSpec{ + PerNodeHostBits: perNodeHostBits, + IPv4: ipv4CIDR, + }, + }, + options: printers.GenerateOptions{}, + // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age. + expected: []metav1.TableRow{{Cells: []interface{}{"test1", "8", ipv4CIDR, "", ""}}}, + }, + { + // Test name, IPv4 only with node selector, Not wide. + ccc: networking.ClusterCIDR{ + ObjectMeta: metav1.ObjectMeta{Name: "test2"}, + Spec: networking.ClusterCIDRSpec{ + PerNodeHostBits: perNodeHostBits, + IPv4: ipv4CIDR, + // Does NOT get printed. + NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), + }, + }, + options: printers.GenerateOptions{}, + // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age. + expected: []metav1.TableRow{{Cells: []interface{}{"test2", "8", ipv4CIDR, "", ""}}}, + }, + { + // Test name, IPv4 only with no node selector, wide. + ccc: networking.ClusterCIDR{ + ObjectMeta: metav1.ObjectMeta{Name: "test3"}, + Spec: networking.ClusterCIDRSpec{ + PerNodeHostBits: perNodeHostBits, + IPv4: ipv4CIDR, + }, + }, + options: printers.GenerateOptions{Wide: true}, + // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector . + expected: []metav1.TableRow{{Cells: []interface{}{"test3", "8", ipv4CIDR, "", "", ""}}}, + }, + { + // Test name, IPv4 only with node selector, wide. + ccc: networking.ClusterCIDR{ + ObjectMeta: metav1.ObjectMeta{Name: "test4"}, + Spec: networking.ClusterCIDRSpec{ + PerNodeHostBits: perNodeHostBits, + IPv4: ipv4CIDR, + NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), + }, + }, + options: printers.GenerateOptions{Wide: true}, + // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector . + expected: []metav1.TableRow{{Cells: []interface{}{"test4", "8", ipv4CIDR, "", "", "MatchExpressions: [{foo In [bar]}]"}}}, + }, + { + // Test name, IPv6 only with no node selector. + ccc: networking.ClusterCIDR{ + ObjectMeta: metav1.ObjectMeta{Name: "test5"}, + Spec: networking.ClusterCIDRSpec{ + PerNodeHostBits: perNodeHostBits, + IPv6: ipv6CIDR, + }, + }, + options: printers.GenerateOptions{}, + // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age + expected: []metav1.TableRow{{Cells: []interface{}{"test5", "8", "", ipv6CIDR, ""}}}, + }, + { + // Test name, IPv6 only with node selector, Not wide. + ccc: networking.ClusterCIDR{ + ObjectMeta: metav1.ObjectMeta{Name: "test6"}, + Spec: networking.ClusterCIDRSpec{ + PerNodeHostBits: perNodeHostBits, + IPv6: ipv6CIDR, + // Does NOT get printed. + NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), + }, + }, + options: printers.GenerateOptions{}, + // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age. + expected: []metav1.TableRow{{Cells: []interface{}{"test6", "8", "", ipv6CIDR, ""}}}, + }, + { + // Test name, IPv6 only with no node selector, wide. + ccc: networking.ClusterCIDR{ + ObjectMeta: metav1.ObjectMeta{Name: "test7"}, + Spec: networking.ClusterCIDRSpec{ + PerNodeHostBits: perNodeHostBits, + IPv6: ipv6CIDR, + }, + }, + options: printers.GenerateOptions{Wide: true}, + // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector . + expected: []metav1.TableRow{{Cells: []interface{}{"test7", "8", "", ipv6CIDR, "", ""}}}, + }, + { + // Test name, IPv6 only with node selector, wide. + ccc: networking.ClusterCIDR{ + ObjectMeta: metav1.ObjectMeta{Name: "test8"}, + Spec: networking.ClusterCIDRSpec{ + PerNodeHostBits: perNodeHostBits, + IPv6: ipv6CIDR, + NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), + }, + }, + options: printers.GenerateOptions{Wide: true}, + // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector . + expected: []metav1.TableRow{{Cells: []interface{}{"test8", "8", "", ipv6CIDR, "", "MatchExpressions: [{foo In [bar]}]"}}}, + }, + { + // Test name, DualStack with no node selector. + ccc: networking.ClusterCIDR{ + ObjectMeta: metav1.ObjectMeta{Name: "test9"}, + Spec: networking.ClusterCIDRSpec{ + PerNodeHostBits: perNodeHostBits, + IPv4: ipv4CIDR, + IPv6: ipv6CIDR, + }, + }, + options: printers.GenerateOptions{}, + // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age. + expected: []metav1.TableRow{{Cells: []interface{}{"test9", "8", ipv4CIDR, ipv6CIDR, ""}}}, + }, + { + // Test name,DualStack with node selector, Not wide. + ccc: networking.ClusterCIDR{ + ObjectMeta: metav1.ObjectMeta{Name: "test10"}, + Spec: networking.ClusterCIDRSpec{ + PerNodeHostBits: perNodeHostBits, + IPv4: ipv4CIDR, + IPv6: ipv6CIDR, + // Does NOT get printed. + NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), + }, + }, + options: printers.GenerateOptions{}, + // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age. + expected: []metav1.TableRow{{Cells: []interface{}{"test10", "8", ipv4CIDR, ipv6CIDR, ""}}}, + }, + { + // Test name, DualStack with no node selector, wide. + ccc: networking.ClusterCIDR{ + ObjectMeta: metav1.ObjectMeta{Name: "test11"}, + Spec: networking.ClusterCIDRSpec{ + PerNodeHostBits: perNodeHostBits, + IPv4: ipv4CIDR, + IPv6: ipv6CIDR, + }, + }, + options: printers.GenerateOptions{Wide: true}, + // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector. + expected: []metav1.TableRow{{Cells: []interface{}{"test11", "8", ipv4CIDR, ipv6CIDR, "", ""}}}, + }, + { + // Test name, DualStack with node selector, wide. + ccc: networking.ClusterCIDR{ + ObjectMeta: metav1.ObjectMeta{Name: "test12"}, + Spec: networking.ClusterCIDRSpec{ + PerNodeHostBits: perNodeHostBits, + IPv4: ipv4CIDR, + IPv6: ipv6CIDR, + NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), + }, + }, + options: printers.GenerateOptions{Wide: true}, + // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector . + expected: []metav1.TableRow{{Cells: []interface{}{"test12", "8", ipv4CIDR, ipv6CIDR, "", "MatchExpressions: [{foo In [bar]}]"}}}, + }, + } + + for i, test := range tests { + rows, err := printClusterCIDR(&test.ccc, test.options) + if err != nil { + t.Fatal(err) + } + for i := range rows { + rows[i].Object.Object = nil + } + if !reflect.DeepEqual(test.expected, rows) { + t.Errorf("%d mismatch: %s", i, diff.ObjectReflectDiff(test.expected, rows)) + } + } +} + +func makeNodeSelector(key string, op api.NodeSelectorOperator, values []string) *api.NodeSelector { + return &api.NodeSelector{ + NodeSelectorTerms: []api.NodeSelectorTerm{ + { + MatchExpressions: []api.NodeSelectorRequirement{ + { + Key: key, + Operator: op, + Values: values, + }, + }, + }, + }, + } +} + +func TestPrintClusterCIDRList(t *testing.T) { + + cccList := networking.ClusterCIDRList{ + Items: []networking.ClusterCIDR{ + { + ObjectMeta: metav1.ObjectMeta{Name: "ccc1"}, + Spec: networking.ClusterCIDRSpec{ + PerNodeHostBits: int32(8), + IPv4: "10.1.0.0/16", + IPv6: "fd00:1:1::/64", + NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), + }, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "ccc2"}, + Spec: networking.ClusterCIDRSpec{ + PerNodeHostBits: int32(8), + IPv4: "10.2.0.0/16", + IPv6: "fd00:2:1::/64", + NodeSelector: makeNodeSelector("foo", api.NodeSelectorOpIn, []string{"bar"}), + }, + }, + }, + } + + tests := []struct { + options printers.GenerateOptions + expected []metav1.TableRow + }{ + { + // Test name, DualStack with node selector, wide. + options: printers.GenerateOptions{Wide: false}, + expected: []metav1.TableRow{ + // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age. + {Cells: []interface{}{"ccc1", "8", "10.1.0.0/16", "fd00:1:1::/64", ""}}, + {Cells: []interface{}{"ccc2", "8", "10.2.0.0/16", "fd00:2:1::/64", ""}}, + }, + }, + { + // Test name, DualStack with node selector, wide. + options: printers.GenerateOptions{Wide: true}, + expected: []metav1.TableRow{ + // Columns: Name, PerNodeHostBits, IPv4, IPv6, Age, NodeSelector. + {Cells: []interface{}{"ccc1", "8", "10.1.0.0/16", "fd00:1:1::/64", "", "MatchExpressions: [{foo In [bar]}]"}}, + {Cells: []interface{}{"ccc2", "8", "10.2.0.0/16", "fd00:2:1::/64", "", "MatchExpressions: [{foo In [bar]}]"}}, + }, + }, + } + + for _, test := range tests { + rows, err := printClusterCIDRList(&cccList, test.options) + if err != nil { + t.Fatalf("Error printing service list: %#v", err) + } + for i := range rows { + rows[i].Object.Object = nil + } + if !reflect.DeepEqual(test.expected, rows) { + t.Errorf("mismatch: %s", diff.ObjectReflectDiff(test.expected, rows)) + } + } +} diff --git a/pkg/registry/networking/clustercidr/doc.go b/pkg/registry/networking/clustercidr/doc.go new file mode 100644 index 00000000000..ebd30f63304 --- /dev/null +++ b/pkg/registry/networking/clustercidr/doc.go @@ -0,0 +1,17 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clustercidr // import "k8s.io/kubernetes/pkg/registry/networking/clustercidr" diff --git a/pkg/registry/networking/clustercidr/storage/storage.go b/pkg/registry/networking/clustercidr/storage/storage.go new file mode 100644 index 00000000000..36e93efc069 --- /dev/null +++ b/pkg/registry/networking/clustercidr/storage/storage.go @@ -0,0 +1,63 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/registry/generic" + genericregistry "k8s.io/apiserver/pkg/registry/generic/registry" + "k8s.io/apiserver/pkg/registry/rest" + networkingapi "k8s.io/kubernetes/pkg/apis/networking" + "k8s.io/kubernetes/pkg/printers" + printersinternal "k8s.io/kubernetes/pkg/printers/internalversion" + printerstorage "k8s.io/kubernetes/pkg/printers/storage" + "k8s.io/kubernetes/pkg/registry/networking/clustercidr" +) + +// REST implements a RESTStorage for ClusterCIDRs against etcd. +type REST struct { + *genericregistry.Store +} + +// NewREST returns a RESTStorage object that will work against ClusterCIDRs. +func NewREST(optsGetter generic.RESTOptionsGetter) (*REST, error) { + store := &genericregistry.Store{ + NewFunc: func() runtime.Object { return &networkingapi.ClusterCIDR{} }, + NewListFunc: func() runtime.Object { return &networkingapi.ClusterCIDRList{} }, + DefaultQualifiedResource: networkingapi.Resource("clustercidrs"), + + CreateStrategy: clustercidr.Strategy, + UpdateStrategy: clustercidr.Strategy, + DeleteStrategy: clustercidr.Strategy, + + TableConvertor: printerstorage.TableConvertor{TableGenerator: printers.NewTableGenerator().With(printersinternal.AddHandlers)}, + } + options := &generic.StoreOptions{RESTOptions: optsGetter} + if err := store.CompleteWithOptions(options); err != nil { + return nil, err + } + + return &REST{store}, nil +} + +// Implement ShortNamesProvider. +var _ rest.ShortNamesProvider = &REST{} + +// ShortNames implements the ShortNamesProvider interface. Returns a list of short names for a resource. +func (r *REST) ShortNames() []string { + return []string{"cc"} +} diff --git a/pkg/registry/networking/clustercidr/storage/storage_test.go b/pkg/registry/networking/clustercidr/storage/storage_test.go new file mode 100644 index 00000000000..774ec59f54d --- /dev/null +++ b/pkg/registry/networking/clustercidr/storage/storage_test.go @@ -0,0 +1,196 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package storage + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apiserver/pkg/registry/generic" + genericregistrytest "k8s.io/apiserver/pkg/registry/generic/testing" + etcd3testing "k8s.io/apiserver/pkg/storage/etcd3/testing" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/networking" + _ "k8s.io/kubernetes/pkg/apis/networking/install" + "k8s.io/kubernetes/pkg/registry/registrytest" +) + +func newStorage(t *testing.T) (*REST, *etcd3testing.EtcdTestServer) { + etcdStorage, server := registrytest.NewEtcdStorageForResource(t, networking.Resource("clustercidrs")) + restOptions := generic.RESTOptions{ + StorageConfig: etcdStorage, + Decorator: generic.UndecoratedStorage, + DeleteCollectionWorkers: 1, + ResourcePrefix: "clustercidrs", + } + clusterCIDRStorage, err := NewREST(restOptions) + if err != nil { + t.Fatalf("unexpected error from REST storage: %v", err) + } + return clusterCIDRStorage, server +} + +var ( + namespace = metav1.NamespaceNone + name = "foo-clustercidr" +) + +func newClusterCIDR() *networking.ClusterCIDR { + return &networking.ClusterCIDR{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: networking.ClusterCIDRSpec{ + PerNodeHostBits: int32(8), + IPv4: "10.1.0.0/16", + IPv6: "fd00:1:1::/64", + NodeSelector: &api.NodeSelector{ + NodeSelectorTerms: []api.NodeSelectorTerm{ + { + MatchExpressions: []api.NodeSelectorRequirement{ + { + Key: "foo", + Operator: api.NodeSelectorOpIn, + Values: []string{"bar"}, + }, + }, + }, + }, + }, + }, + } +} + +func validClusterCIDR() *networking.ClusterCIDR { + return newClusterCIDR() +} + +func TestCreate(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + defer storage.Store.DestroyFunc() + + test := genericregistrytest.New(t, storage.Store) + test = test.ClusterScope() + validCC := validClusterCIDR() + noCIDRCC := validClusterCIDR() + noCIDRCC.Spec.IPv4 = "" + noCIDRCC.Spec.IPv6 = "" + invalidCCPerNodeHostBits := validClusterCIDR() + invalidCCPerNodeHostBits.Spec.PerNodeHostBits = 100 + invalidCCCIDR := validClusterCIDR() + invalidCCCIDR.Spec.IPv6 = "10.1.0.0/16" + + test.TestCreate( + // valid + validCC, + //invalid + noCIDRCC, + invalidCCPerNodeHostBits, + invalidCCCIDR, + ) +} + +func TestUpdate(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + defer storage.Store.DestroyFunc() + test := genericregistrytest.New(t, storage.Store) + test = test.ClusterScope() + test.TestUpdate( + // valid + validClusterCIDR(), + // updateFunc + func(obj runtime.Object) runtime.Object { + object := obj.(*networking.ClusterCIDR) + object.Finalizers = []string{"test.k8s.io/test-finalizer"} + return object + }, + // invalid updateFunc: ObjectMeta is not to be tampered with. + func(obj runtime.Object) runtime.Object { + object := obj.(*networking.ClusterCIDR) + object.Name = "" + return object + }, + ) +} + +func TestDelete(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + defer storage.Store.DestroyFunc() + test := genericregistrytest.New(t, storage.Store) + test = test.ClusterScope() + test.TestDelete(validClusterCIDR()) +} + +func TestGet(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + defer storage.Store.DestroyFunc() + test := genericregistrytest.New(t, storage.Store) + test = test.ClusterScope() + test.TestGet(validClusterCIDR()) +} + +func TestList(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + defer storage.Store.DestroyFunc() + test := genericregistrytest.New(t, storage.Store) + test = test.ClusterScope() + test.TestList(validClusterCIDR()) +} + +func TestWatch(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + defer storage.Store.DestroyFunc() + test := genericregistrytest.New(t, storage.Store) + test = test.ClusterScope() + test.TestWatch( + validClusterCIDR(), + // matching labels + []labels.Set{}, + // not matching labels + []labels.Set{ + {"a": "c"}, + {"foo": "bar"}, + }, + // matching fields + []fields.Set{ + {"metadata.name": name}, + }, + // not matching fields + []fields.Set{ + {"metadata.name": "bar"}, + {"name": name}, + }, + ) +} + +func TestShortNames(t *testing.T) { + storage, server := newStorage(t) + defer server.Terminate(t) + defer storage.Store.DestroyFunc() + expected := []string{"cc"} + registrytest.AssertShortNames(t, storage, expected) +} diff --git a/pkg/registry/networking/clustercidr/strategy.go b/pkg/registry/networking/clustercidr/strategy.go new file mode 100644 index 00000000000..a69a5f90413 --- /dev/null +++ b/pkg/registry/networking/clustercidr/strategy.go @@ -0,0 +1,82 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clustercidr + +import ( + "context" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/validation/field" + "k8s.io/apiserver/pkg/storage/names" + "k8s.io/kubernetes/pkg/api/legacyscheme" + "k8s.io/kubernetes/pkg/apis/networking" + "k8s.io/kubernetes/pkg/apis/networking/validation" +) + +// clusterCIDRStrategy implements verification logic for ClusterCIDRs. +type clusterCIDRStrategy struct { + runtime.ObjectTyper + names.NameGenerator +} + +// Strategy is the default logic that applies when creating and updating clusterCIDR objects. +var Strategy = clusterCIDRStrategy{legacyscheme.Scheme, names.SimpleNameGenerator} + +// NamespaceScoped returns false because all clusterCIDRs do not need to be within a namespace. +func (clusterCIDRStrategy) NamespaceScoped() bool { + return false +} + +func (clusterCIDRStrategy) PrepareForCreate(ctx context.Context, obj runtime.Object) {} + +func (clusterCIDRStrategy) PrepareForUpdate(ctx context.Context, obj, old runtime.Object) {} + +// Validate validates a new ClusterCIDR. +func (clusterCIDRStrategy) Validate(ctx context.Context, obj runtime.Object) field.ErrorList { + clusterCIDR := obj.(*networking.ClusterCIDR) + return validation.ValidateClusterCIDR(clusterCIDR) +} + +// WarningsOnCreate returns warnings for the creation of the given object. +func (clusterCIDRStrategy) WarningsOnCreate(ctx context.Context, obj runtime.Object) []string { + return nil +} + +// Canonicalize normalizes the object after validation. +func (clusterCIDRStrategy) Canonicalize(obj runtime.Object) {} + +// AllowCreateOnUpdate is false for ClusterCIDR; this means POST is needed to create one. +func (clusterCIDRStrategy) AllowCreateOnUpdate() bool { + return false +} + +// ValidateUpdate is the default update validation for an end user. +func (clusterCIDRStrategy) ValidateUpdate(ctx context.Context, obj, old runtime.Object) field.ErrorList { + validationErrorList := validation.ValidateClusterCIDR(obj.(*networking.ClusterCIDR)) + updateErrorList := validation.ValidateClusterCIDRUpdate(obj.(*networking.ClusterCIDR), old.(*networking.ClusterCIDR)) + return append(validationErrorList, updateErrorList...) +} + +// WarningsOnUpdate returns warnings for the given update. +func (clusterCIDRStrategy) WarningsOnUpdate(ctx context.Context, obj, old runtime.Object) []string { + return nil +} + +// AllowUnconditionalUpdate is the default update policy for ClusterCIDR objects. +func (clusterCIDRStrategy) AllowUnconditionalUpdate() bool { + return true +} diff --git a/pkg/registry/networking/clustercidr/strategy_test.go b/pkg/registry/networking/clustercidr/strategy_test.go new file mode 100644 index 00000000000..f3225377666 --- /dev/null +++ b/pkg/registry/networking/clustercidr/strategy_test.go @@ -0,0 +1,86 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clustercidr + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + genericapirequest "k8s.io/apiserver/pkg/endpoints/request" + api "k8s.io/kubernetes/pkg/apis/core" + "k8s.io/kubernetes/pkg/apis/networking" +) + +func newClusterCIDR() networking.ClusterCIDR { + return networking.ClusterCIDR{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo", + }, + Spec: networking.ClusterCIDRSpec{ + PerNodeHostBits: int32(8), + IPv4: "10.1.0.0/16", + IPv6: "fd00:1:1::/64", + NodeSelector: &api.NodeSelector{ + NodeSelectorTerms: []api.NodeSelectorTerm{ + { + MatchExpressions: []api.NodeSelectorRequirement{ + { + Key: "foo", + Operator: api.NodeSelectorOpIn, + Values: []string{"bar"}, + }, + }, + }, + }, + }, + }, + } +} + +func TestClusterCIDRStrategy(t *testing.T) { + ctx := genericapirequest.NewDefaultContext() + apiRequest := genericapirequest.RequestInfo{APIGroup: "networking.k8s.io", + APIVersion: "v1alpha1", + Resource: "clustercidrs", + } + ctx = genericapirequest.WithRequestInfo(ctx, &apiRequest) + if Strategy.NamespaceScoped() { + t.Errorf("ClusterCIDRs must be cluster scoped") + } + if Strategy.AllowCreateOnUpdate() { + t.Errorf("ClusterCIDRs should not allow create on update") + } + + ccc := newClusterCIDR() + Strategy.PrepareForCreate(ctx, &ccc) + + errs := Strategy.Validate(ctx, &ccc) + if len(errs) != 0 { + t.Errorf("Unexpected error validating %v", errs) + } + invalidCCC := newClusterCIDR() + invalidCCC.ResourceVersion = "4" + invalidCCC.Spec = networking.ClusterCIDRSpec{} + Strategy.PrepareForUpdate(ctx, &invalidCCC, &ccc) + errs = Strategy.ValidateUpdate(ctx, &invalidCCC, &ccc) + if len(errs) == 0 { + t.Errorf("Expected a validation error") + } + if invalidCCC.ResourceVersion != "4" { + t.Errorf("Incoming resource version on update should not be mutated") + } +} diff --git a/pkg/registry/networking/rest/storage_settings.go b/pkg/registry/networking/rest/storage_settings.go index cc5a915cd4f..82d9d14a9a2 100644 --- a/pkg/registry/networking/rest/storage_settings.go +++ b/pkg/registry/networking/rest/storage_settings.go @@ -18,12 +18,14 @@ package rest import ( networkingapiv1 "k8s.io/api/networking/v1" + networkingapiv1alpha1 "k8s.io/api/networking/v1alpha1" "k8s.io/apiserver/pkg/registry/generic" "k8s.io/apiserver/pkg/registry/rest" genericapiserver "k8s.io/apiserver/pkg/server" serverstorage "k8s.io/apiserver/pkg/server/storage" "k8s.io/kubernetes/pkg/api/legacyscheme" "k8s.io/kubernetes/pkg/apis/networking" + clustercidrstore "k8s.io/kubernetes/pkg/registry/networking/clustercidr/storage" ingressstore "k8s.io/kubernetes/pkg/registry/networking/ingress/storage" ingressclassstore "k8s.io/kubernetes/pkg/registry/networking/ingressclass/storage" networkpolicystore "k8s.io/kubernetes/pkg/registry/networking/networkpolicy/storage" @@ -36,6 +38,12 @@ func (p RESTStorageProvider) NewRESTStorage(apiResourceConfigSource serverstorag // If you add a version here, be sure to add an entry in `k8s.io/kubernetes/cmd/kube-apiserver/app/aggregator.go with specific priorities. // TODO refactor the plumbing to provide the information in the APIGroupInfo + if storageMap, err := p.v1alpha1Storage(apiResourceConfigSource, restOptionsGetter); err != nil { + return genericapiserver.APIGroupInfo{}, err + } else if len(storageMap) > 0 { + apiGroupInfo.VersionedResourcesStorageMap[networkingapiv1alpha1.SchemeGroupVersion.Version] = storageMap + } + if storageMap, err := p.v1Storage(apiResourceConfigSource, restOptionsGetter); err != nil { return genericapiserver.APIGroupInfo{}, err } else if len(storageMap) > 0 { @@ -80,6 +88,20 @@ func (p RESTStorageProvider) v1Storage(apiResourceConfigSource serverstorage.API return storage, nil } +func (p RESTStorageProvider) v1alpha1Storage(apiResourceConfigSource serverstorage.APIResourceConfigSource, restOptionsGetter generic.RESTOptionsGetter) (map[string]rest.Storage, error) { + storage := map[string]rest.Storage{} + // clustercidrs + if resource := "clustercidrs"; apiResourceConfigSource.ResourceEnabled(networkingapiv1alpha1.SchemeGroupVersion.WithResource(resource)) { + clusterCIDRCStorage, err := clustercidrstore.NewREST(restOptionsGetter) + if err != nil { + return storage, err + } + storage[resource] = clusterCIDRCStorage + } + + return storage, nil +} + func (p RESTStorageProvider) GroupName() string { return networking.GroupName } diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go index f5556335044..336244c7238 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/controller_policy.go @@ -250,6 +250,7 @@ func buildControllerRoles() ([]rbacv1.ClusterRole, []rbacv1.ClusterRoleBinding) // used for pod deletion rbacv1helpers.NewRule("update").Groups(legacyGroup).Resources("pods/status").RuleOrDie(), rbacv1helpers.NewRule("list", "delete").Groups(legacyGroup).Resources("pods").RuleOrDie(), + rbacv1helpers.NewRule("get", "list", "create", "update").Groups(networkingGroup).Resources("clustercidrs").RuleOrDie(), eventsRule(), }, } diff --git a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml index efc66ceb440..d1d6e37c08a 100644 --- a/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml +++ b/plugin/pkg/auth/authorizer/rbac/bootstrappolicy/testdata/controller-roles.yaml @@ -907,6 +907,15 @@ items: verbs: - delete - list + - apiGroups: + - networking.k8s.io + resources: + - clustercidrs + verbs: + - create + - get + - list + - update - apiGroups: - "" - events.k8s.io diff --git a/staging/src/k8s.io/api/networking/v1alpha1/doc.go b/staging/src/k8s.io/api/networking/v1alpha1/doc.go new file mode 100644 index 00000000000..3827b0418f9 --- /dev/null +++ b/staging/src/k8s.io/api/networking/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +k8s:prerelease-lifecycle-gen=true +// +groupName=networking.k8s.io + +package v1alpha1 // import "k8s.io/api/networking/v1alpha1" diff --git a/staging/src/k8s.io/api/networking/v1alpha1/generated.pb.go b/staging/src/k8s.io/api/networking/v1alpha1/generated.pb.go new file mode 100644 index 00000000000..48d401db883 --- /dev/null +++ b/staging/src/k8s.io/api/networking/v1alpha1/generated.pb.go @@ -0,0 +1,913 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ClusterCIDR) Reset() { *m = ClusterCIDR{} } +func (*ClusterCIDR) ProtoMessage() {} +func (*ClusterCIDR) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{0} +} +func (m *ClusterCIDR) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterCIDR) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterCIDR.Merge(m, src) +} +func (m *ClusterCIDR) XXX_Size() int { + return m.Size() +} +func (m *ClusterCIDR) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterCIDR.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterCIDR proto.InternalMessageInfo + +func (m *ClusterCIDRList) Reset() { *m = ClusterCIDRList{} } +func (*ClusterCIDRList) ProtoMessage() {} +func (*ClusterCIDRList) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{1} +} +func (m *ClusterCIDRList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterCIDRList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterCIDRList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterCIDRList.Merge(m, src) +} +func (m *ClusterCIDRList) XXX_Size() int { + return m.Size() +} +func (m *ClusterCIDRList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterCIDRList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterCIDRList proto.InternalMessageInfo + +func (m *ClusterCIDRSpec) Reset() { *m = ClusterCIDRSpec{} } +func (*ClusterCIDRSpec) ProtoMessage() {} +func (*ClusterCIDRSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_c1b7ac8d7d97acec, []int{2} +} +func (m *ClusterCIDRSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterCIDRSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterCIDRSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterCIDRSpec.Merge(m, src) +} +func (m *ClusterCIDRSpec) XXX_Size() int { + return m.Size() +} +func (m *ClusterCIDRSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterCIDRSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterCIDRSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ClusterCIDR)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDR") + proto.RegisterType((*ClusterCIDRList)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRList") + proto.RegisterType((*ClusterCIDRSpec)(nil), "k8s.io.api.networking.v1alpha1.ClusterCIDRSpec") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1alpha1/generated.proto", fileDescriptor_c1b7ac8d7d97acec) +} + +var fileDescriptor_c1b7ac8d7d97acec = []byte{ + // 506 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x93, 0x4f, 0x8f, 0xd2, 0x40, + 0x18, 0xc6, 0xe9, 0x2e, 0x24, 0x6b, 0xc1, 0xb0, 0xe9, 0x45, 0xc2, 0x61, 0x20, 0x9c, 0x48, 0x8c, + 0x33, 0xb2, 0x21, 0xc4, 0xab, 0xdd, 0x4d, 0x94, 0xc4, 0x3f, 0xd8, 0x4d, 0x3c, 0x18, 0x0f, 0x0e, + 0xe5, 0xb5, 0x8c, 0xd0, 0xce, 0x64, 0x66, 0xa8, 0xf1, 0xe6, 0x47, 0xf0, 0x2b, 0xe9, 0x89, 0xe3, + 0x1e, 0xf7, 0x44, 0xa4, 0x7e, 0x01, 0x3f, 0x82, 0x99, 0xa1, 0xbb, 0x94, 0x45, 0x57, 0xbd, 0x75, + 0xde, 0xf9, 0x3d, 0xcf, 0xfb, 0x3e, 0x7d, 0x5b, 0xf7, 0xc9, 0xec, 0x91, 0xc2, 0x8c, 0x93, 0xd9, + 0x62, 0x0c, 0x32, 0x01, 0x0d, 0x8a, 0xa4, 0x90, 0x4c, 0xb8, 0x24, 0xf9, 0x05, 0x15, 0x8c, 0x24, + 0xa0, 0x3f, 0x72, 0x39, 0x63, 0x49, 0x44, 0xd2, 0x1e, 0x9d, 0x8b, 0x29, 0xed, 0x91, 0x08, 0x12, + 0x90, 0x54, 0xc3, 0x04, 0x0b, 0xc9, 0x35, 0xf7, 0xd0, 0x86, 0xc7, 0x54, 0x30, 0xbc, 0xe5, 0xf1, + 0x15, 0xdf, 0x7c, 0x10, 0x31, 0x3d, 0x5d, 0x8c, 0x71, 0xc8, 0x63, 0x12, 0xf1, 0x88, 0x13, 0x2b, + 0x1b, 0x2f, 0xde, 0xdb, 0x93, 0x3d, 0xd8, 0xa7, 0x8d, 0x5d, 0xb3, 0x53, 0x68, 0x1f, 0x72, 0x09, + 0x24, 0xdd, 0x6b, 0xd9, 0xec, 0x6f, 0x99, 0x98, 0x86, 0x53, 0x96, 0x80, 0xfc, 0x44, 0xc4, 0x2c, + 0x32, 0x05, 0x45, 0x62, 0xd0, 0xf4, 0x77, 0x2a, 0xf2, 0x27, 0x95, 0x5c, 0x24, 0x9a, 0xc5, 0xb0, + 0x27, 0x18, 0xfc, 0x4d, 0xa0, 0xc2, 0x29, 0xc4, 0xf4, 0xa6, 0xae, 0xf3, 0xcd, 0x71, 0xab, 0xa7, + 0xf3, 0x85, 0xd2, 0x20, 0x4f, 0x87, 0x67, 0x81, 0xf7, 0xce, 0x3d, 0x32, 0x33, 0x4d, 0xa8, 0xa6, + 0x0d, 0xa7, 0xed, 0x74, 0xab, 0x27, 0x0f, 0xf1, 0xf6, 0xa5, 0x5d, 0x5b, 0x63, 0x31, 0x8b, 0x4c, + 0x41, 0x61, 0x43, 0xe3, 0xb4, 0x87, 0x5f, 0x8e, 0x3f, 0x40, 0xa8, 0x9f, 0x83, 0xa6, 0xbe, 0xb7, + 0x5c, 0xb5, 0x4a, 0xd9, 0xaa, 0xe5, 0x6e, 0x6b, 0xc1, 0xb5, 0xab, 0xf7, 0xca, 0x2d, 0x2b, 0x01, + 0x61, 0xe3, 0xc0, 0xba, 0x13, 0x7c, 0xfb, 0x4a, 0x70, 0x61, 0xb8, 0x73, 0x01, 0xa1, 0x5f, 0xcb, + 0xcd, 0xcb, 0xe6, 0x14, 0x58, 0xab, 0xce, 0x57, 0xc7, 0xad, 0x17, 0xb8, 0x67, 0x4c, 0x69, 0xef, + 0xed, 0x5e, 0x10, 0xfc, 0x6f, 0x41, 0x8c, 0xda, 0xc6, 0x38, 0xce, 0x3b, 0x1d, 0x5d, 0x55, 0x0a, + 0x21, 0x46, 0x6e, 0x85, 0x69, 0x88, 0x55, 0xe3, 0xa0, 0x7d, 0xd8, 0xad, 0x9e, 0xdc, 0xff, 0x8f, + 0x14, 0xfe, 0xdd, 0xdc, 0xb7, 0x32, 0x34, 0x0e, 0xc1, 0xc6, 0xa8, 0xf3, 0x73, 0x37, 0x83, 0x49, + 0xe7, 0xbd, 0x76, 0x6b, 0x09, 0x9f, 0xc0, 0x39, 0xcc, 0x21, 0xd4, 0x5c, 0xe6, 0x39, 0xda, 0xc5, + 0x66, 0xe6, 0xb3, 0x33, 0x53, 0xbf, 0x28, 0x70, 0xfe, 0x71, 0xb6, 0x6a, 0xd5, 0x8a, 0x95, 0x60, + 0xc7, 0xc7, 0x7b, 0xec, 0xd6, 0x05, 0x48, 0x03, 0x3c, 0xe5, 0x4a, 0xfb, 0x4c, 0x2b, 0xbb, 0x8d, + 0x8a, 0x7f, 0x2f, 0x1f, 0xad, 0x3e, 0xda, 0xbd, 0x0e, 0x6e, 0xf2, 0x5e, 0xdb, 0x2d, 0x33, 0x91, + 0xf6, 0x1b, 0x87, 0x6d, 0xa7, 0x7b, 0x67, 0xbb, 0x94, 0xe1, 0x28, 0xed, 0x07, 0xf6, 0x26, 0x27, + 0x06, 0x8d, 0xf2, 0x1e, 0x31, 0xb0, 0xc4, 0xc0, 0x3f, 0x5b, 0xae, 0x51, 0xe9, 0x62, 0x8d, 0x4a, + 0x97, 0x6b, 0x54, 0xfa, 0x9c, 0x21, 0x67, 0x99, 0x21, 0xe7, 0x22, 0x43, 0xce, 0x65, 0x86, 0x9c, + 0xef, 0x19, 0x72, 0xbe, 0xfc, 0x40, 0xa5, 0x37, 0xe8, 0xf6, 0x7f, 0xfc, 0x57, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xdf, 0x1d, 0xe9, 0x86, 0x1d, 0x04, 0x00, 0x00, +} + +func (m *ClusterCIDR) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterCIDR) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterCIDRList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterCIDRList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterCIDRList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterCIDRSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterCIDRSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterCIDRSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.IPv6) + copy(dAtA[i:], m.IPv6) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPv6))) + i-- + dAtA[i] = 0x22 + i -= len(m.IPv4) + copy(dAtA[i:], m.IPv4) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IPv4))) + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.PerNodeHostBits)) + i-- + dAtA[i] = 0x10 + if m.NodeSelector != nil { + { + size, err := m.NodeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ClusterCIDR) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterCIDRList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterCIDRSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NodeSelector != nil { + l = m.NodeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.PerNodeHostBits)) + l = len(m.IPv4) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IPv6) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ClusterCIDR) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterCIDR{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ClusterCIDRSpec", "ClusterCIDRSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterCIDRList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterCIDR{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterCIDR", "ClusterCIDR", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterCIDRList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterCIDRSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClusterCIDRSpec{`, + `NodeSelector:` + strings.Replace(fmt.Sprintf("%v", this.NodeSelector), "NodeSelector", "v11.NodeSelector", 1) + `,`, + `PerNodeHostBits:` + fmt.Sprintf("%v", this.PerNodeHostBits) + `,`, + `IPv4:` + fmt.Sprintf("%v", this.IPv4) + `,`, + `IPv6:` + fmt.Sprintf("%v", this.IPv6) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ClusterCIDR) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterCIDR: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterCIDRList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterCIDRList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterCIDRList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterCIDR{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterCIDRSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterCIDRSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterCIDRSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = &v11.NodeSelector{} + } + if err := m.NodeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PerNodeHostBits", wireType) + } + m.PerNodeHostBits = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PerNodeHostBits |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPv4", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IPv4 = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPv6", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IPv6 = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/staging/src/k8s.io/api/networking/v1alpha1/generated.proto b/staging/src/k8s.io/api/networking/v1alpha1/generated.proto new file mode 100644 index 00000000000..51779369bed --- /dev/null +++ b/staging/src/k8s.io/api/networking/v1alpha1/generated.proto @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.api.networking.v1alpha1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/api/networking/v1alpha1"; + +// ClusterCIDR represents a single configuration for per-Node Pod CIDR +// allocations when the MultiCIDRRangeAllocator is enabled (see the config for +// kube-controller-manager). A cluster may have any number of ClusterCIDR +// resources, all of which will be considered when allocating a CIDR for a +// Node. A ClusterCIDR is eligible to be used for a given Node when the node +// selector matches the node in question and has free CIDRs to allocate. In +// case of multiple matching ClusterCIDR resources, the allocator will attempt +// to break ties using internal heuristics, but any ClusterCIDR whose node +// selector matches the Node may be used. +message ClusterCIDR { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is the desired state of the ClusterCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ClusterCIDRSpec spec = 2; +} + +// ClusterCIDRList contains a list of ClusterCIDR. +message ClusterCIDRList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ClusterCIDRs. + repeated ClusterCIDR items = 2; +} + +// ClusterCIDRSpec defines the desired state of ClusterCIDR. +message ClusterCIDRSpec { + // NodeSelector defines which nodes the config is applicable to. + // An empty or nil NodeSelector selects all nodes. + // This field is immutable. + // +optional + optional k8s.io.api.core.v1.NodeSelector nodeSelector = 1; + + // PerNodeHostBits defines the number of host bits to be configured per node. + // A subnet mask determines how much of the address is used for network bits + // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the + // address into 24 bits for the network portion and 8 bits for the host portion. + // To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). + // Minimum value is 4 (16 IPs). + // This field is immutable. + // +required + optional int32 perNodeHostBits = 2; + + // IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). + // At least one of IPv4 and IPv6 must be specified. + // This field is immutable. + // +optional + optional string ipv4 = 3; + + // IPv6 defines an IPv6 IP block in CIDR notation(e.g. "fd12:3456:789a:1::/64"). + // At least one of IPv4 and IPv6 must be specified. + // This field is immutable. + // +optional + optional string ipv6 = 4; +} + diff --git a/staging/src/k8s.io/api/networking/v1alpha1/register.go b/staging/src/k8s.io/api/networking/v1alpha1/register.go new file mode 100644 index 00000000000..12c0cf7bd46 --- /dev/null +++ b/staging/src/k8s.io/api/networking/v1alpha1/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package. +const GroupName = "networking.k8s.io" + +// SchemeGroupVersion is group version used to register these objects. +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource. +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder holds functions that add things to a scheme. + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + + // AddToScheme adds the types of this group into the given scheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ClusterCIDR{}, + &ClusterCIDRList{}, + ) + // Add the watch version that applies. + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/staging/src/k8s.io/api/networking/v1alpha1/types.go b/staging/src/k8s.io/api/networking/v1alpha1/types.go new file mode 100644 index 00000000000..645b0890329 --- /dev/null +++ b/staging/src/k8s.io/api/networking/v1alpha1/types.go @@ -0,0 +1,95 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.25 + +// ClusterCIDR represents a single configuration for per-Node Pod CIDR +// allocations when the MultiCIDRRangeAllocator is enabled (see the config for +// kube-controller-manager). A cluster may have any number of ClusterCIDR +// resources, all of which will be considered when allocating a CIDR for a +// Node. A ClusterCIDR is eligible to be used for a given Node when the node +// selector matches the node in question and has free CIDRs to allocate. In +// case of multiple matching ClusterCIDR resources, the allocator will attempt +// to break ties using internal heuristics, but any ClusterCIDR whose node +// selector matches the Node may be used. +type ClusterCIDR struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is the desired state of the ClusterCIDR. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ClusterCIDRSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// ClusterCIDRSpec defines the desired state of ClusterCIDR. +type ClusterCIDRSpec struct { + // NodeSelector defines which nodes the config is applicable to. + // An empty or nil NodeSelector selects all nodes. + // This field is immutable. + // +optional + NodeSelector *v1.NodeSelector `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` + + // PerNodeHostBits defines the number of host bits to be configured per node. + // A subnet mask determines how much of the address is used for network bits + // and host bits. For example an IPv4 address of 192.168.0.0/24, splits the + // address into 24 bits for the network portion and 8 bits for the host portion. + // To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). + // Minimum value is 4 (16 IPs). + // This field is immutable. + // +required + PerNodeHostBits int32 `json:"perNodeHostBits" protobuf:"varint,2,opt,name=perNodeHostBits"` + + // IPv4 defines an IPv4 IP block in CIDR notation(e.g. "10.0.0.0/8"). + // At least one of IPv4 and IPv6 must be specified. + // This field is immutable. + // +optional + IPv4 string `json:"ipv4" protobuf:"bytes,3,opt,name=ipv4"` + + // IPv6 defines an IPv6 IP block in CIDR notation(e.g. "fd12:3456:789a:1::/64"). + // At least one of IPv4 and IPv6 must be specified. + // This field is immutable. + // +optional + IPv6 string `json:"ipv6" protobuf:"bytes,4,opt,name=ipv6"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:prerelease-lifecycle-gen:introduced=1.25 + +// ClusterCIDRList contains a list of ClusterCIDR. +type ClusterCIDRList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ClusterCIDRs. + Items []ClusterCIDR `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/staging/src/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go b/staging/src/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 00000000000..0e2213d9eda --- /dev/null +++ b/staging/src/k8s.io/api/networking/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,62 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ClusterCIDR = map[string]string{ + "": "ClusterCIDR represents a single configuration for per-Node Pod CIDR allocations when the MultiCIDRRangeAllocator is enabled (see the config for kube-controller-manager). A cluster may have any number of ClusterCIDR resources, all of which will be considered when allocating a CIDR for a Node. A ClusterCIDR is eligible to be used for a given Node when the node selector matches the node in question and has free CIDRs to allocate. In case of multiple matching ClusterCIDR resources, the allocator will attempt to break ties using internal heuristics, but any ClusterCIDR whose node selector matches the Node may be used.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec is the desired state of the ClusterCIDR. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (ClusterCIDR) SwaggerDoc() map[string]string { + return map_ClusterCIDR +} + +var map_ClusterCIDRList = map[string]string{ + "": "ClusterCIDRList contains a list of ClusterCIDR.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of ClusterCIDRs.", +} + +func (ClusterCIDRList) SwaggerDoc() map[string]string { + return map_ClusterCIDRList +} + +var map_ClusterCIDRSpec = map[string]string{ + "": "ClusterCIDRSpec defines the desired state of ClusterCIDR.", + "nodeSelector": "NodeSelector defines which nodes the config is applicable to. An empty or nil NodeSelector selects all nodes. This field is immutable.", + "perNodeHostBits": "PerNodeHostBits defines the number of host bits to be configured per node. A subnet mask determines how much of the address is used for network bits and host bits. For example an IPv4 address of 192.168.0.0/24, splits the address into 24 bits for the network portion and 8 bits for the host portion. To allocate 256 IPs, set this field to 8 (a /24 mask for IPv4 or a /120 for IPv6). Minimum value is 4 (16 IPs). This field is immutable.", + "ipv4": "IPv4 defines an IPv4 IP block in CIDR notation(e.g. \"10.0.0.0/8\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.", + "ipv6": "IPv6 defines an IPv6 IP block in CIDR notation(e.g. \"fd12:3456:789a:1::/64\"). At least one of IPv4 and IPv6 must be specified. This field is immutable.", +} + +func (ClusterCIDRSpec) SwaggerDoc() map[string]string { + return map_ClusterCIDRSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go b/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..e549f316634 --- /dev/null +++ b/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,108 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCIDR) DeepCopyInto(out *ClusterCIDR) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDR. +func (in *ClusterCIDR) DeepCopy() *ClusterCIDR { + if in == nil { + return nil + } + out := new(ClusterCIDR) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterCIDR) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCIDRList) DeepCopyInto(out *ClusterCIDRList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterCIDR, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRList. +func (in *ClusterCIDRList) DeepCopy() *ClusterCIDRList { + if in == nil { + return nil + } + out := new(ClusterCIDRList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterCIDRList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterCIDRSpec) DeepCopyInto(out *ClusterCIDRSpec) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = new(v1.NodeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterCIDRSpec. +func (in *ClusterCIDRSpec) DeepCopy() *ClusterCIDRSpec { + if in == nil { + return nil + } + out := new(ClusterCIDRSpec) + in.DeepCopyInto(out) + return out +} diff --git a/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go b/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go new file mode 100644 index 00000000000..dd6e3b26cb2 --- /dev/null +++ b/staging/src/k8s.io/api/networking/v1alpha1/zz_generated.prerelease-lifecycle.go @@ -0,0 +1,58 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by prerelease-lifecycle-gen. DO NOT EDIT. + +package v1alpha1 + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterCIDR) APILifecycleIntroduced() (major, minor int) { + return 1, 25 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ClusterCIDR) APILifecycleDeprecated() (major, minor int) { + return 1, 28 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ClusterCIDR) APILifecycleRemoved() (major, minor int) { + return 1, 31 +} + +// APILifecycleIntroduced is an autogenerated function, returning the release in which the API struct was introduced as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:introduced" tags in types.go. +func (in *ClusterCIDRList) APILifecycleIntroduced() (major, minor int) { + return 1, 25 +} + +// APILifecycleDeprecated is an autogenerated function, returning the release in which the API struct was or will be deprecated as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:deprecated" tags in types.go or "k8s:prerelease-lifecycle-gen:introduced" plus three minor. +func (in *ClusterCIDRList) APILifecycleDeprecated() (major, minor int) { + return 1, 28 +} + +// APILifecycleRemoved is an autogenerated function, returning the release in which the API is no longer served as int versions of major and minor for comparison. +// It is controlled by "k8s:prerelease-lifecycle-gen:removed" tags in types.go or "k8s:prerelease-lifecycle-gen:deprecated" plus three minor. +func (in *ClusterCIDRList) APILifecycleRemoved() (major, minor int) { + return 1, 31 +} diff --git a/staging/src/k8s.io/api/roundtrip_test.go b/staging/src/k8s.io/api/roundtrip_test.go index e91ee93407f..6cdf8fcf243 100644 --- a/staging/src/k8s.io/api/roundtrip_test.go +++ b/staging/src/k8s.io/api/roundtrip_test.go @@ -52,6 +52,7 @@ import ( flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1" networkingv1 "k8s.io/api/networking/v1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" networkingv1beta1 "k8s.io/api/networking/v1beta1" nodev1 "k8s.io/api/node/v1" nodev1alpha1 "k8s.io/api/node/v1alpha1" @@ -110,6 +111,7 @@ var groups = []runtime.SchemeBuilder{ imagepolicyv1alpha1.SchemeBuilder, networkingv1.SchemeBuilder, networkingv1beta1.SchemeBuilder, + networkingv1alpha1.SchemeBuilder, nodev1.SchemeBuilder, nodev1alpha1.SchemeBuilder, nodev1beta1.SchemeBuilder, diff --git a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.json b/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.json new file mode 100644 index 00000000000..59fa006b52c --- /dev/null +++ b/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.json @@ -0,0 +1,75 @@ +{ + "kind": "ClusterCIDR", + "apiVersion": "networking.k8s.io/v1alpha1", + "metadata": { + "name": "nameValue", + "generateName": "generateNameValue", + "namespace": "namespaceValue", + "selfLink": "selfLinkValue", + "uid": "uidValue", + "resourceVersion": "resourceVersionValue", + "generation": 7, + "creationTimestamp": "2008-01-01T01:01:01Z", + "deletionTimestamp": "2009-01-01T01:01:01Z", + "deletionGracePeriodSeconds": 10, + "labels": { + "labelsKey": "labelsValue" + }, + "annotations": { + "annotationsKey": "annotationsValue" + }, + "ownerReferences": [ + { + "apiVersion": "apiVersionValue", + "kind": "kindValue", + "name": "nameValue", + "uid": "uidValue", + "controller": true, + "blockOwnerDeletion": true + } + ], + "finalizers": [ + "finalizersValue" + ], + "managedFields": [ + { + "manager": "managerValue", + "operation": "operationValue", + "apiVersion": "apiVersionValue", + "time": "2004-01-01T01:01:01Z", + "fieldsType": "fieldsTypeValue", + "fieldsV1": {}, + "subresource": "subresourceValue" + } + ] + }, + "spec": { + "nodeSelector": { + "nodeSelectorTerms": [ + { + "matchExpressions": [ + { + "key": "keyValue", + "operator": "operatorValue", + "values": [ + "valuesValue" + ] + } + ], + "matchFields": [ + { + "key": "keyValue", + "operator": "operatorValue", + "values": [ + "valuesValue" + ] + } + ] + } + ] + }, + "perNodeHostBits": 2, + "ipv4": "ipv4Value", + "ipv6": "ipv6Value" + } +} \ No newline at end of file diff --git a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.pb b/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.pb new file mode 100644 index 00000000000..a4e9113897a Binary files /dev/null and b/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.pb differ diff --git a/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.yaml b/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.yaml new file mode 100644 index 00000000000..fe7a1341fe1 --- /dev/null +++ b/staging/src/k8s.io/api/testdata/HEAD/networking.k8s.io.v1alpha1.ClusterCIDR.yaml @@ -0,0 +1,50 @@ +apiVersion: networking.k8s.io/v1alpha1 +kind: ClusterCIDR +metadata: + annotations: + annotationsKey: annotationsValue + creationTimestamp: "2008-01-01T01:01:01Z" + deletionGracePeriodSeconds: 10 + deletionTimestamp: "2009-01-01T01:01:01Z" + finalizers: + - finalizersValue + generateName: generateNameValue + generation: 7 + labels: + labelsKey: labelsValue + managedFields: + - apiVersion: apiVersionValue + fieldsType: fieldsTypeValue + fieldsV1: {} + manager: managerValue + operation: operationValue + subresource: subresourceValue + time: "2004-01-01T01:01:01Z" + name: nameValue + namespace: namespaceValue + ownerReferences: + - apiVersion: apiVersionValue + blockOwnerDeletion: true + controller: true + kind: kindValue + name: nameValue + uid: uidValue + resourceVersion: resourceVersionValue + selfLink: selfLinkValue + uid: uidValue +spec: + ipv4: ipv4Value + ipv6: ipv6Value + nodeSelector: + nodeSelectorTerms: + - matchExpressions: + - key: keyValue + operator: operatorValue + values: + - valuesValue + matchFields: + - key: keyValue + operator: operatorValue + values: + - valuesValue + perNodeHostBits: 2 diff --git a/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go b/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go index 7f2d309092a..6fa25411cfd 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/internal/internal.go @@ -9593,6 +9593,41 @@ var schemaYAML = typed.YAMLObject(`types: - name: number type: scalar: numeric +- name: io.k8s.api.networking.v1alpha1.ClusterCIDR + map: + fields: + - name: apiVersion + type: + scalar: string + - name: kind + type: + scalar: string + - name: metadata + type: + namedType: io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta + default: {} + - name: spec + type: + namedType: io.k8s.api.networking.v1alpha1.ClusterCIDRSpec + default: {} +- name: io.k8s.api.networking.v1alpha1.ClusterCIDRSpec + map: + fields: + - name: ipv4 + type: + scalar: string + default: "" + - name: ipv6 + type: + scalar: string + default: "" + - name: nodeSelector + type: + namedType: io.k8s.api.core.v1.NodeSelector + - name: perNodeHostBits + type: + scalar: numeric + default: 0 - name: io.k8s.api.networking.v1beta1.HTTPIngressPath map: fields: diff --git a/staging/src/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go b/staging/src/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go new file mode 100644 index 00000000000..5cadee3353f --- /dev/null +++ b/staging/src/k8s.io/client-go/applyconfigurations/meta/v1/listmeta.go @@ -0,0 +1,66 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1 + +// ListMetaApplyConfiguration represents an declarative configuration of the ListMeta type for use +// with apply. +type ListMetaApplyConfiguration struct { + SelfLink *string `json:"selfLink,omitempty"` + ResourceVersion *string `json:"resourceVersion,omitempty"` + Continue *string `json:"continue,omitempty"` + RemainingItemCount *int64 `json:"remainingItemCount,omitempty"` +} + +// ListMetaApplyConfiguration constructs an declarative configuration of the ListMeta type for use with +// apply. +func ListMeta() *ListMetaApplyConfiguration { + return &ListMetaApplyConfiguration{} +} + +// WithSelfLink sets the SelfLink field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the SelfLink field is set to the value of the last call. +func (b *ListMetaApplyConfiguration) WithSelfLink(value string) *ListMetaApplyConfiguration { + b.SelfLink = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ListMetaApplyConfiguration) WithResourceVersion(value string) *ListMetaApplyConfiguration { + b.ResourceVersion = &value + return b +} + +// WithContinue sets the Continue field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Continue field is set to the value of the last call. +func (b *ListMetaApplyConfiguration) WithContinue(value string) *ListMetaApplyConfiguration { + b.Continue = &value + return b +} + +// WithRemainingItemCount sets the RemainingItemCount field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the RemainingItemCount field is set to the value of the last call. +func (b *ListMetaApplyConfiguration) WithRemainingItemCount(value int64) *ListMetaApplyConfiguration { + b.RemainingItemCount = &value + return b +} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go b/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go new file mode 100644 index 00000000000..ad0eae9198e --- /dev/null +++ b/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidr.go @@ -0,0 +1,247 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + managedfields "k8s.io/apimachinery/pkg/util/managedfields" + internal "k8s.io/client-go/applyconfigurations/internal" + v1 "k8s.io/client-go/applyconfigurations/meta/v1" +) + +// ClusterCIDRApplyConfiguration represents an declarative configuration of the ClusterCIDR type for use +// with apply. +type ClusterCIDRApplyConfiguration struct { + v1.TypeMetaApplyConfiguration `json:",inline"` + *v1.ObjectMetaApplyConfiguration `json:"metadata,omitempty"` + Spec *ClusterCIDRSpecApplyConfiguration `json:"spec,omitempty"` +} + +// ClusterCIDR constructs an declarative configuration of the ClusterCIDR type for use with +// apply. +func ClusterCIDR(name string) *ClusterCIDRApplyConfiguration { + b := &ClusterCIDRApplyConfiguration{} + b.WithName(name) + b.WithKind("ClusterCIDR") + b.WithAPIVersion("networking.k8s.io/v1alpha1") + return b +} + +// ExtractClusterCIDR extracts the applied configuration owned by fieldManager from +// clusterCIDR. If no managedFields are found in clusterCIDR for fieldManager, a +// ClusterCIDRApplyConfiguration is returned with only the Name, Namespace (if applicable), +// APIVersion and Kind populated. It is possible that no managed fields were found for because other +// field managers have taken ownership of all the fields previously owned by fieldManager, or because +// the fieldManager never owned fields any fields. +// clusterCIDR must be a unmodified ClusterCIDR API object that was retrieved from the Kubernetes API. +// ExtractClusterCIDR provides a way to perform a extract/modify-in-place/apply workflow. +// Note that an extracted apply configuration will contain fewer fields than what the fieldManager previously +// applied if another fieldManager has updated or force applied any of the previously applied fields. +// Experimental! +func ExtractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string) (*ClusterCIDRApplyConfiguration, error) { + return extractClusterCIDR(clusterCIDR, fieldManager, "") +} + +// ExtractClusterCIDRStatus is the same as ExtractClusterCIDR except +// that it extracts the status subresource applied configuration. +// Experimental! +func ExtractClusterCIDRStatus(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string) (*ClusterCIDRApplyConfiguration, error) { + return extractClusterCIDR(clusterCIDR, fieldManager, "status") +} + +func extractClusterCIDR(clusterCIDR *networkingv1alpha1.ClusterCIDR, fieldManager string, subresource string) (*ClusterCIDRApplyConfiguration, error) { + b := &ClusterCIDRApplyConfiguration{} + err := managedfields.ExtractInto(clusterCIDR, internal.Parser().Type("io.k8s.api.networking.v1alpha1.ClusterCIDR"), fieldManager, b, subresource) + if err != nil { + return nil, err + } + b.WithName(clusterCIDR.Name) + + b.WithKind("ClusterCIDR") + b.WithAPIVersion("networking.k8s.io/v1alpha1") + return b, nil +} + +// WithKind sets the Kind field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Kind field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithKind(value string) *ClusterCIDRApplyConfiguration { + b.Kind = &value + return b +} + +// WithAPIVersion sets the APIVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the APIVersion field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithAPIVersion(value string) *ClusterCIDRApplyConfiguration { + b.APIVersion = &value + return b +} + +// WithName sets the Name field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Name field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithName(value string) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Name = &value + return b +} + +// WithGenerateName sets the GenerateName field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the GenerateName field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithGenerateName(value string) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.GenerateName = &value + return b +} + +// WithNamespace sets the Namespace field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Namespace field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithNamespace(value string) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Namespace = &value + return b +} + +// WithUID sets the UID field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the UID field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithUID(value types.UID) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.UID = &value + return b +} + +// WithResourceVersion sets the ResourceVersion field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the ResourceVersion field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithResourceVersion(value string) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.ResourceVersion = &value + return b +} + +// WithGeneration sets the Generation field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Generation field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithGeneration(value int64) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.Generation = &value + return b +} + +// WithCreationTimestamp sets the CreationTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the CreationTimestamp field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithCreationTimestamp(value metav1.Time) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.CreationTimestamp = &value + return b +} + +// WithDeletionTimestamp sets the DeletionTimestamp field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionTimestamp field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithDeletionTimestamp(value metav1.Time) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionTimestamp = &value + return b +} + +// WithDeletionGracePeriodSeconds sets the DeletionGracePeriodSeconds field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the DeletionGracePeriodSeconds field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithDeletionGracePeriodSeconds(value int64) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + b.DeletionGracePeriodSeconds = &value + return b +} + +// WithLabels puts the entries into the Labels field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Labels field, +// overwriting an existing map entries in Labels field with the same key. +func (b *ClusterCIDRApplyConfiguration) WithLabels(entries map[string]string) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Labels == nil && len(entries) > 0 { + b.Labels = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Labels[k] = v + } + return b +} + +// WithAnnotations puts the entries into the Annotations field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, the entries provided by each call will be put on the Annotations field, +// overwriting an existing map entries in Annotations field with the same key. +func (b *ClusterCIDRApplyConfiguration) WithAnnotations(entries map[string]string) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + if b.Annotations == nil && len(entries) > 0 { + b.Annotations = make(map[string]string, len(entries)) + } + for k, v := range entries { + b.Annotations[k] = v + } + return b +} + +// WithOwnerReferences adds the given value to the OwnerReferences field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the OwnerReferences field. +func (b *ClusterCIDRApplyConfiguration) WithOwnerReferences(values ...*v1.OwnerReferenceApplyConfiguration) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + if values[i] == nil { + panic("nil value passed to WithOwnerReferences") + } + b.OwnerReferences = append(b.OwnerReferences, *values[i]) + } + return b +} + +// WithFinalizers adds the given value to the Finalizers field in the declarative configuration +// and returns the receiver, so that objects can be build by chaining "With" function invocations. +// If called multiple times, values provided by each call will be appended to the Finalizers field. +func (b *ClusterCIDRApplyConfiguration) WithFinalizers(values ...string) *ClusterCIDRApplyConfiguration { + b.ensureObjectMetaApplyConfigurationExists() + for i := range values { + b.Finalizers = append(b.Finalizers, values[i]) + } + return b +} + +func (b *ClusterCIDRApplyConfiguration) ensureObjectMetaApplyConfigurationExists() { + if b.ObjectMetaApplyConfiguration == nil { + b.ObjectMetaApplyConfiguration = &v1.ObjectMetaApplyConfiguration{} + } +} + +// WithSpec sets the Spec field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the Spec field is set to the value of the last call. +func (b *ClusterCIDRApplyConfiguration) WithSpec(value *ClusterCIDRSpecApplyConfiguration) *ClusterCIDRApplyConfiguration { + b.Spec = value + return b +} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go b/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go new file mode 100644 index 00000000000..8d5fa406b09 --- /dev/null +++ b/staging/src/k8s.io/client-go/applyconfigurations/networking/v1alpha1/clustercidrspec.go @@ -0,0 +1,70 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by applyconfiguration-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/client-go/applyconfigurations/core/v1" +) + +// ClusterCIDRSpecApplyConfiguration represents an declarative configuration of the ClusterCIDRSpec type for use +// with apply. +type ClusterCIDRSpecApplyConfiguration struct { + NodeSelector *v1.NodeSelectorApplyConfiguration `json:"nodeSelector,omitempty"` + PerNodeHostBits *int32 `json:"perNodeHostBits,omitempty"` + IPv4 *string `json:"ipv4,omitempty"` + IPv6 *string `json:"ipv6,omitempty"` +} + +// ClusterCIDRSpecApplyConfiguration constructs an declarative configuration of the ClusterCIDRSpec type for use with +// apply. +func ClusterCIDRSpec() *ClusterCIDRSpecApplyConfiguration { + return &ClusterCIDRSpecApplyConfiguration{} +} + +// WithNodeSelector sets the NodeSelector field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the NodeSelector field is set to the value of the last call. +func (b *ClusterCIDRSpecApplyConfiguration) WithNodeSelector(value *v1.NodeSelectorApplyConfiguration) *ClusterCIDRSpecApplyConfiguration { + b.NodeSelector = value + return b +} + +// WithPerNodeHostBits sets the PerNodeHostBits field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the PerNodeHostBits field is set to the value of the last call. +func (b *ClusterCIDRSpecApplyConfiguration) WithPerNodeHostBits(value int32) *ClusterCIDRSpecApplyConfiguration { + b.PerNodeHostBits = &value + return b +} + +// WithIPv4 sets the IPv4 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPv4 field is set to the value of the last call. +func (b *ClusterCIDRSpecApplyConfiguration) WithIPv4(value string) *ClusterCIDRSpecApplyConfiguration { + b.IPv4 = &value + return b +} + +// WithIPv6 sets the IPv6 field in the declarative configuration to the given value +// and returns the receiver, so that objects can be built by chaining "With" function invocations. +// If called multiple times, the IPv6 field is set to the value of the last call. +func (b *ClusterCIDRSpecApplyConfiguration) WithIPv6(value string) *ClusterCIDRSpecApplyConfiguration { + b.IPv6 = &value + return b +} diff --git a/staging/src/k8s.io/client-go/applyconfigurations/utils.go b/staging/src/k8s.io/client-go/applyconfigurations/utils.go index fb88f847032..5a5763a78c2 100644 --- a/staging/src/k8s.io/client-go/applyconfigurations/utils.go +++ b/staging/src/k8s.io/client-go/applyconfigurations/utils.go @@ -46,6 +46,7 @@ import ( flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" imagepolicyv1alpha1 "k8s.io/api/imagepolicy/v1alpha1" networkingv1 "k8s.io/api/networking/v1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" networkingv1beta1 "k8s.io/api/networking/v1beta1" nodev1 "k8s.io/api/node/v1" nodev1alpha1 "k8s.io/api/node/v1alpha1" @@ -91,6 +92,7 @@ import ( applyconfigurationsimagepolicyv1alpha1 "k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1" applyconfigurationsmetav1 "k8s.io/client-go/applyconfigurations/meta/v1" applyconfigurationsnetworkingv1 "k8s.io/client-go/applyconfigurations/networking/v1" + applyconfigurationsnetworkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" applyconfigurationsnetworkingv1beta1 "k8s.io/client-go/applyconfigurations/networking/v1beta1" applyconfigurationsnodev1 "k8s.io/client-go/applyconfigurations/node/v1" applyconfigurationsnodev1alpha1 "k8s.io/client-go/applyconfigurations/node/v1alpha1" @@ -1206,6 +1208,12 @@ func ForKind(kind schema.GroupVersionKind) interface{} { case networkingv1.SchemeGroupVersion.WithKind("ServiceBackendPort"): return &applyconfigurationsnetworkingv1.ServiceBackendPortApplyConfiguration{} + // Group=networking.k8s.io, Version=v1alpha1 + case networkingv1alpha1.SchemeGroupVersion.WithKind("ClusterCIDR"): + return &applyconfigurationsnetworkingv1alpha1.ClusterCIDRApplyConfiguration{} + case networkingv1alpha1.SchemeGroupVersion.WithKind("ClusterCIDRSpec"): + return &applyconfigurationsnetworkingv1alpha1.ClusterCIDRSpecApplyConfiguration{} + // Group=networking.k8s.io, Version=v1beta1 case networkingv1beta1.SchemeGroupVersion.WithKind("HTTPIngressPath"): return &applyconfigurationsnetworkingv1beta1.HTTPIngressPathApplyConfiguration{} diff --git a/staging/src/k8s.io/client-go/informers/generic.go b/staging/src/k8s.io/client-go/informers/generic.go index 4c2e53c2575..92eda45a79d 100644 --- a/staging/src/k8s.io/client-go/informers/generic.go +++ b/staging/src/k8s.io/client-go/informers/generic.go @@ -47,6 +47,7 @@ import ( flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" networkingv1 "k8s.io/api/networking/v1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" networkingv1beta1 "k8s.io/api/networking/v1beta1" nodev1 "k8s.io/api/node/v1" nodev1alpha1 "k8s.io/api/node/v1alpha1" @@ -272,6 +273,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case networkingv1.SchemeGroupVersion.WithResource("networkpolicies"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1().NetworkPolicies().Informer()}, nil + // Group=networking.k8s.io, Version=v1alpha1 + case networkingv1alpha1.SchemeGroupVersion.WithResource("clustercidrs"): + return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1alpha1().ClusterCIDRs().Informer()}, nil + // Group=networking.k8s.io, Version=v1beta1 case networkingv1beta1.SchemeGroupVersion.WithResource("ingresses"): return &genericInformer{resource: resource.GroupResource(), informer: f.Networking().V1beta1().Ingresses().Informer()}, nil diff --git a/staging/src/k8s.io/client-go/informers/networking/interface.go b/staging/src/k8s.io/client-go/informers/networking/interface.go index 4a028d5d10e..1c775c465b6 100644 --- a/staging/src/k8s.io/client-go/informers/networking/interface.go +++ b/staging/src/k8s.io/client-go/informers/networking/interface.go @@ -21,6 +21,7 @@ package networking import ( internalinterfaces "k8s.io/client-go/informers/internalinterfaces" v1 "k8s.io/client-go/informers/networking/v1" + v1alpha1 "k8s.io/client-go/informers/networking/v1alpha1" v1beta1 "k8s.io/client-go/informers/networking/v1beta1" ) @@ -28,6 +29,8 @@ import ( type Interface interface { // V1 provides access to shared informers for resources in V1. V1() v1.Interface + // V1alpha1 provides access to shared informers for resources in V1alpha1. + V1alpha1() v1alpha1.Interface // V1beta1 provides access to shared informers for resources in V1beta1. V1beta1() v1beta1.Interface } @@ -48,6 +51,11 @@ func (g *group) V1() v1.Interface { return v1.New(g.factory, g.namespace, g.tweakListOptions) } +// V1alpha1 returns a new v1alpha1.Interface. +func (g *group) V1alpha1() v1alpha1.Interface { + return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions) +} + // V1beta1 returns a new v1beta1.Interface. func (g *group) V1beta1() v1beta1.Interface { return v1beta1.New(g.factory, g.namespace, g.tweakListOptions) diff --git a/staging/src/k8s.io/client-go/informers/networking/v1alpha1/clustercidr.go b/staging/src/k8s.io/client-go/informers/networking/v1alpha1/clustercidr.go new file mode 100644 index 00000000000..cefd0f8a1ee --- /dev/null +++ b/staging/src/k8s.io/client-go/informers/networking/v1alpha1/clustercidr.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + time "time" + + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" + kubernetes "k8s.io/client-go/kubernetes" + v1alpha1 "k8s.io/client-go/listers/networking/v1alpha1" + cache "k8s.io/client-go/tools/cache" +) + +// ClusterCIDRInformer provides access to a shared informer and lister for +// ClusterCIDRs. +type ClusterCIDRInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1alpha1.ClusterCIDRLister +} + +type clusterCIDRInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// NewClusterCIDRInformer constructs a new informer for ClusterCIDR type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewClusterCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredClusterCIDRInformer(client, resyncPeriod, indexers, nil) +} + +// NewFilteredClusterCIDRInformer constructs a new informer for ClusterCIDR type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredClusterCIDRInformer(client kubernetes.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha1().ClusterCIDRs().List(context.TODO(), options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.NetworkingV1alpha1().ClusterCIDRs().Watch(context.TODO(), options) + }, + }, + &networkingv1alpha1.ClusterCIDR{}, + resyncPeriod, + indexers, + ) +} + +func (f *clusterCIDRInformer) defaultInformer(client kubernetes.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredClusterCIDRInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *clusterCIDRInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&networkingv1alpha1.ClusterCIDR{}, f.defaultInformer) +} + +func (f *clusterCIDRInformer) Lister() v1alpha1.ClusterCIDRLister { + return v1alpha1.NewClusterCIDRLister(f.Informer().GetIndexer()) +} diff --git a/staging/src/k8s.io/client-go/informers/networking/v1alpha1/interface.go b/staging/src/k8s.io/client-go/informers/networking/v1alpha1/interface.go new file mode 100644 index 00000000000..c51b748801f --- /dev/null +++ b/staging/src/k8s.io/client-go/informers/networking/v1alpha1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + internalinterfaces "k8s.io/client-go/informers/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // ClusterCIDRs returns a ClusterCIDRInformer. + ClusterCIDRs() ClusterCIDRInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// ClusterCIDRs returns a ClusterCIDRInformer. +func (v *version) ClusterCIDRs() ClusterCIDRInformer { + return &clusterCIDRInformer{factory: v.factory, tweakListOptions: v.tweakListOptions} +} diff --git a/staging/src/k8s.io/client-go/kubernetes/clientset.go b/staging/src/k8s.io/client-go/kubernetes/clientset.go index e46c0537f78..0ea0c3c4cd6 100644 --- a/staging/src/k8s.io/client-go/kubernetes/clientset.go +++ b/staging/src/k8s.io/client-go/kubernetes/clientset.go @@ -53,6 +53,7 @@ import ( flowcontrolv1beta1 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta1" flowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" + networkingv1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1" networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" nodev1 "k8s.io/client-go/kubernetes/typed/node/v1" nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" @@ -104,6 +105,7 @@ type Interface interface { FlowcontrolV1beta1() flowcontrolv1beta1.FlowcontrolV1beta1Interface FlowcontrolV1beta2() flowcontrolv1beta2.FlowcontrolV1beta2Interface NetworkingV1() networkingv1.NetworkingV1Interface + NetworkingV1alpha1() networkingv1alpha1.NetworkingV1alpha1Interface NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface NodeV1() nodev1.NodeV1Interface NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface @@ -155,6 +157,7 @@ type Clientset struct { flowcontrolV1beta1 *flowcontrolv1beta1.FlowcontrolV1beta1Client flowcontrolV1beta2 *flowcontrolv1beta2.FlowcontrolV1beta2Client networkingV1 *networkingv1.NetworkingV1Client + networkingV1alpha1 *networkingv1alpha1.NetworkingV1alpha1Client networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client nodeV1 *nodev1.NodeV1Client nodeV1alpha1 *nodev1alpha1.NodeV1alpha1Client @@ -322,6 +325,11 @@ func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return c.networkingV1 } +// NetworkingV1alpha1 retrieves the NetworkingV1alpha1Client +func (c *Clientset) NetworkingV1alpha1() networkingv1alpha1.NetworkingV1alpha1Interface { + return c.networkingV1alpha1 +} + // NetworkingV1beta1 retrieves the NetworkingV1beta1Client func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface { return c.networkingV1beta1 @@ -561,6 +569,10 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, if err != nil { return nil, err } + cs.networkingV1alpha1, err = networkingv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } cs.networkingV1beta1, err = networkingv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) if err != nil { return nil, err @@ -672,6 +684,7 @@ func New(c rest.Interface) *Clientset { cs.flowcontrolV1beta1 = flowcontrolv1beta1.New(c) cs.flowcontrolV1beta2 = flowcontrolv1beta2.New(c) cs.networkingV1 = networkingv1.New(c) + cs.networkingV1alpha1 = networkingv1alpha1.New(c) cs.networkingV1beta1 = networkingv1beta1.New(c) cs.nodeV1 = nodev1.New(c) cs.nodeV1alpha1 = nodev1alpha1.New(c) diff --git a/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go b/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go index 9ab84ff5dc3..3e468bf9058 100644 --- a/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go +++ b/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go @@ -84,6 +84,8 @@ import ( fakeflowcontrolv1beta2 "k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" fakenetworkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1/fake" + networkingv1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1" + fakenetworkingv1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake" networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" fakenetworkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake" nodev1 "k8s.io/client-go/kubernetes/typed/node/v1" @@ -317,6 +319,11 @@ func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return &fakenetworkingv1.FakeNetworkingV1{Fake: &c.Fake} } +// NetworkingV1alpha1 retrieves the NetworkingV1alpha1Client +func (c *Clientset) NetworkingV1alpha1() networkingv1alpha1.NetworkingV1alpha1Interface { + return &fakenetworkingv1alpha1.FakeNetworkingV1alpha1{Fake: &c.Fake} +} + // NetworkingV1beta1 retrieves the NetworkingV1beta1Client func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface { return &fakenetworkingv1beta1.FakeNetworkingV1beta1{Fake: &c.Fake} diff --git a/staging/src/k8s.io/client-go/kubernetes/fake/register.go b/staging/src/k8s.io/client-go/kubernetes/fake/register.go index 7c2f1c11a9d..751d43aaa73 100644 --- a/staging/src/k8s.io/client-go/kubernetes/fake/register.go +++ b/staging/src/k8s.io/client-go/kubernetes/fake/register.go @@ -49,6 +49,7 @@ import ( flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" networkingv1 "k8s.io/api/networking/v1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" networkingv1beta1 "k8s.io/api/networking/v1beta1" nodev1 "k8s.io/api/node/v1" nodev1alpha1 "k8s.io/api/node/v1alpha1" @@ -105,6 +106,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ flowcontrolv1beta1.AddToScheme, flowcontrolv1beta2.AddToScheme, networkingv1.AddToScheme, + networkingv1alpha1.AddToScheme, networkingv1beta1.AddToScheme, nodev1.AddToScheme, nodev1alpha1.AddToScheme, diff --git a/staging/src/k8s.io/client-go/kubernetes/scheme/register.go b/staging/src/k8s.io/client-go/kubernetes/scheme/register.go index 24046233b20..ea01840ef7b 100644 --- a/staging/src/k8s.io/client-go/kubernetes/scheme/register.go +++ b/staging/src/k8s.io/client-go/kubernetes/scheme/register.go @@ -49,6 +49,7 @@ import ( flowcontrolv1beta1 "k8s.io/api/flowcontrol/v1beta1" flowcontrolv1beta2 "k8s.io/api/flowcontrol/v1beta2" networkingv1 "k8s.io/api/networking/v1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" networkingv1beta1 "k8s.io/api/networking/v1beta1" nodev1 "k8s.io/api/node/v1" nodev1alpha1 "k8s.io/api/node/v1alpha1" @@ -105,6 +106,7 @@ var localSchemeBuilder = runtime.SchemeBuilder{ flowcontrolv1beta1.AddToScheme, flowcontrolv1beta2.AddToScheme, networkingv1.AddToScheme, + networkingv1alpha1.AddToScheme, networkingv1beta1.AddToScheme, nodev1.AddToScheme, nodev1alpha1.AddToScheme, diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go new file mode 100644 index 00000000000..9df76351db8 --- /dev/null +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/clustercidr.go @@ -0,0 +1,197 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + json "encoding/json" + "fmt" + "time" + + v1alpha1 "k8s.io/api/networking/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ClusterCIDRsGetter has a method to return a ClusterCIDRInterface. +// A group's client should implement this interface. +type ClusterCIDRsGetter interface { + ClusterCIDRs() ClusterCIDRInterface +} + +// ClusterCIDRInterface has methods to work with ClusterCIDR resources. +type ClusterCIDRInterface interface { + Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (*v1alpha1.ClusterCIDR, error) + Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (*v1alpha1.ClusterCIDR, error) + Delete(ctx context.Context, name string, opts v1.DeleteOptions) error + DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.ClusterCIDR, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.ClusterCIDRList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) + Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) + ClusterCIDRExpansion +} + +// clusterCIDRs implements ClusterCIDRInterface +type clusterCIDRs struct { + client rest.Interface +} + +// newClusterCIDRs returns a ClusterCIDRs +func newClusterCIDRs(c *NetworkingV1alpha1Client) *clusterCIDRs { + return &clusterCIDRs{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterCIDR, and returns the corresponding clusterCIDR object, and an error if there is any. +func (c *clusterCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterCIDR, err error) { + result = &v1alpha1.ClusterCIDR{} + err = c.client.Get(). + Resource("clustercidrs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterCIDRs that match those selectors. +func (c *clusterCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterCIDRList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ClusterCIDRList{} + err = c.client.Get(). + Resource("clustercidrs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterCIDRs. +func (c *clusterCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clustercidrs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} + +// Create takes the representation of a clusterCIDR and creates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. +func (c *clusterCIDRs) Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (result *v1alpha1.ClusterCIDR, err error) { + result = &v1alpha1.ClusterCIDR{} + err = c.client.Post(). + Resource("clustercidrs"). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterCIDR). + Do(ctx). + Into(result) + return +} + +// Update takes the representation of a clusterCIDR and updates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. +func (c *clusterCIDRs) Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (result *v1alpha1.ClusterCIDR, err error) { + result = &v1alpha1.ClusterCIDR{} + err = c.client.Put(). + Resource("clustercidrs"). + Name(clusterCIDR.Name). + VersionedParams(&opts, scheme.ParameterCodec). + Body(clusterCIDR). + Do(ctx). + Into(result) + return +} + +// Delete takes name of the clusterCIDR and deletes it. Returns an error if one occurs. +func (c *clusterCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clustercidrs"). + Name(name). + Body(&opts). + Do(ctx). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + var timeout time.Duration + if listOpts.TimeoutSeconds != nil { + timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clustercidrs"). + VersionedParams(&listOpts, scheme.ParameterCodec). + Timeout(timeout). + Body(&opts). + Do(ctx). + Error() +} + +// Patch applies the patch and returns the patched clusterCIDR. +func (c *clusterCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) { + result = &v1alpha1.ClusterCIDR{} + err = c.client.Patch(pt). + Resource("clustercidrs"). + Name(name). + SubResource(subresources...). + VersionedParams(&opts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied clusterCIDR. +func (c *clusterCIDRs) Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) { + if clusterCIDR == nil { + return nil, fmt.Errorf("clusterCIDR provided to Apply must not be nil") + } + patchOpts := opts.ToPatchOptions() + data, err := json.Marshal(clusterCIDR) + if err != nil { + return nil, err + } + name := clusterCIDR.Name + if name == nil { + return nil, fmt.Errorf("clusterCIDR.Name must be provided to Apply") + } + result = &v1alpha1.ClusterCIDR{} + err = c.client.Patch(types.ApplyPatchType). + Resource("clustercidrs"). + Name(*name). + VersionedParams(&patchOpts, scheme.ParameterCodec). + Body(data). + Do(ctx). + Into(result) + return +} diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/doc.go new file mode 100644 index 00000000000..df51baa4d4c --- /dev/null +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/doc.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/doc.go new file mode 100644 index 00000000000..16f44399065 --- /dev/null +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go new file mode 100644 index 00000000000..ca0352d390c --- /dev/null +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_clustercidr.go @@ -0,0 +1,146 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + "context" + json "encoding/json" + "fmt" + + v1alpha1 "k8s.io/api/networking/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + networkingv1alpha1 "k8s.io/client-go/applyconfigurations/networking/v1alpha1" + testing "k8s.io/client-go/testing" +) + +// FakeClusterCIDRs implements ClusterCIDRInterface +type FakeClusterCIDRs struct { + Fake *FakeNetworkingV1alpha1 +} + +var clustercidrsResource = schema.GroupVersionResource{Group: "networking.k8s.io", Version: "v1alpha1", Resource: "clustercidrs"} + +var clustercidrsKind = schema.GroupVersionKind{Group: "networking.k8s.io", Version: "v1alpha1", Kind: "ClusterCIDR"} + +// Get takes name of the clusterCIDR, and returns the corresponding clusterCIDR object, and an error if there is any. +func (c *FakeClusterCIDRs) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.ClusterCIDR, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootGetAction(clustercidrsResource, name), &v1alpha1.ClusterCIDR{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterCIDR), err +} + +// List takes label and field selectors, and returns the list of ClusterCIDRs that match those selectors. +func (c *FakeClusterCIDRs) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.ClusterCIDRList, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootListAction(clustercidrsResource, clustercidrsKind, opts), &v1alpha1.ClusterCIDRList{}) + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &v1alpha1.ClusterCIDRList{ListMeta: obj.(*v1alpha1.ClusterCIDRList).ListMeta} + for _, item := range obj.(*v1alpha1.ClusterCIDRList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested clusterCIDRs. +func (c *FakeClusterCIDRs) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewRootWatchAction(clustercidrsResource, opts)) +} + +// Create takes the representation of a clusterCIDR and creates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. +func (c *FakeClusterCIDRs) Create(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.CreateOptions) (result *v1alpha1.ClusterCIDR, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootCreateAction(clustercidrsResource, clusterCIDR), &v1alpha1.ClusterCIDR{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterCIDR), err +} + +// Update takes the representation of a clusterCIDR and updates it. Returns the server's representation of the clusterCIDR, and an error, if there is any. +func (c *FakeClusterCIDRs) Update(ctx context.Context, clusterCIDR *v1alpha1.ClusterCIDR, opts v1.UpdateOptions) (result *v1alpha1.ClusterCIDR, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootUpdateAction(clustercidrsResource, clusterCIDR), &v1alpha1.ClusterCIDR{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterCIDR), err +} + +// Delete takes name of the clusterCIDR and deletes it. Returns an error if one occurs. +func (c *FakeClusterCIDRs) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewRootDeleteActionWithOptions(clustercidrsResource, name, opts), &v1alpha1.ClusterCIDR{}) + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeClusterCIDRs) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error { + action := testing.NewRootDeleteCollectionAction(clustercidrsResource, listOpts) + + _, err := c.Fake.Invokes(action, &v1alpha1.ClusterCIDRList{}) + return err +} + +// Patch applies the patch and returns the patched clusterCIDR. +func (c *FakeClusterCIDRs) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.ClusterCIDR, err error) { + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clustercidrsResource, name, pt, data, subresources...), &v1alpha1.ClusterCIDR{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterCIDR), err +} + +// Apply takes the given apply declarative configuration, applies it and returns the applied clusterCIDR. +func (c *FakeClusterCIDRs) Apply(ctx context.Context, clusterCIDR *networkingv1alpha1.ClusterCIDRApplyConfiguration, opts v1.ApplyOptions) (result *v1alpha1.ClusterCIDR, err error) { + if clusterCIDR == nil { + return nil, fmt.Errorf("clusterCIDR provided to Apply must not be nil") + } + data, err := json.Marshal(clusterCIDR) + if err != nil { + return nil, err + } + name := clusterCIDR.Name + if name == nil { + return nil, fmt.Errorf("clusterCIDR.Name must be provided to Apply") + } + obj, err := c.Fake. + Invokes(testing.NewRootPatchSubresourceAction(clustercidrsResource, *name, types.ApplyPatchType, data), &v1alpha1.ClusterCIDR{}) + if obj == nil { + return nil, err + } + return obj.(*v1alpha1.ClusterCIDR), err +} diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go new file mode 100644 index 00000000000..96979aa881d --- /dev/null +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake/fake_networking_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1alpha1 "k8s.io/client-go/kubernetes/typed/networking/v1alpha1" + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" +) + +type FakeNetworkingV1alpha1 struct { + *testing.Fake +} + +func (c *FakeNetworkingV1alpha1) ClusterCIDRs() v1alpha1.ClusterCIDRInterface { + return &FakeClusterCIDRs{c} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeNetworkingV1alpha1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go new file mode 100644 index 00000000000..ab41abb7d01 --- /dev/null +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type ClusterCIDRExpansion interface{} diff --git a/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go new file mode 100644 index 00000000000..ccb5933163c --- /dev/null +++ b/staging/src/k8s.io/client-go/kubernetes/typed/networking/v1alpha1/networking_client.go @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + v1alpha1 "k8s.io/api/networking/v1alpha1" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type NetworkingV1alpha1Interface interface { + RESTClient() rest.Interface + ClusterCIDRsGetter +} + +// NetworkingV1alpha1Client is used to interact with features provided by the networking.k8s.io group. +type NetworkingV1alpha1Client struct { + restClient rest.Interface +} + +func (c *NetworkingV1alpha1Client) ClusterCIDRs() ClusterCIDRInterface { + return newClusterCIDRs(c) +} + +// NewForConfig creates a new NetworkingV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*NetworkingV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new NetworkingV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*NetworkingV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &NetworkingV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new NetworkingV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NetworkingV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NetworkingV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *NetworkingV1alpha1Client { + return &NetworkingV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NetworkingV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/staging/src/k8s.io/client-go/listers/networking/v1alpha1/clustercidr.go b/staging/src/k8s.io/client-go/listers/networking/v1alpha1/clustercidr.go new file mode 100644 index 00000000000..dca9d7bf0cb --- /dev/null +++ b/staging/src/k8s.io/client-go/listers/networking/v1alpha1/clustercidr.go @@ -0,0 +1,68 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/networking/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" +) + +// ClusterCIDRLister helps list ClusterCIDRs. +// All objects returned here must be treated as read-only. +type ClusterCIDRLister interface { + // List lists all ClusterCIDRs in the indexer. + // Objects returned here must be treated as read-only. + List(selector labels.Selector) (ret []*v1alpha1.ClusterCIDR, err error) + // Get retrieves the ClusterCIDR from the index for a given name. + // Objects returned here must be treated as read-only. + Get(name string) (*v1alpha1.ClusterCIDR, error) + ClusterCIDRListerExpansion +} + +// clusterCIDRLister implements the ClusterCIDRLister interface. +type clusterCIDRLister struct { + indexer cache.Indexer +} + +// NewClusterCIDRLister returns a new ClusterCIDRLister. +func NewClusterCIDRLister(indexer cache.Indexer) ClusterCIDRLister { + return &clusterCIDRLister{indexer: indexer} +} + +// List lists all ClusterCIDRs in the indexer. +func (s *clusterCIDRLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterCIDR, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1alpha1.ClusterCIDR)) + }) + return ret, err +} + +// Get retrieves the ClusterCIDR from the index for a given name. +func (s *clusterCIDRLister) Get(name string) (*v1alpha1.ClusterCIDR, error) { + obj, exists, err := s.indexer.GetByKey(name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1alpha1.Resource("clustercidr"), name) + } + return obj.(*v1alpha1.ClusterCIDR), nil +} diff --git a/staging/src/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go b/staging/src/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go new file mode 100644 index 00000000000..cdc328231a0 --- /dev/null +++ b/staging/src/k8s.io/client-go/listers/networking/v1alpha1/expansion_generated.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1alpha1 + +// ClusterCIDRListerExpansion allows custom methods to be added to +// ClusterCIDRLister. +type ClusterCIDRListerExpansion interface{} diff --git a/staging/src/k8s.io/kubectl/pkg/describe/describe.go b/staging/src/k8s.io/kubectl/pkg/describe/describe.go index a1efdd0af7b..484ed09f67c 100644 --- a/staging/src/k8s.io/kubectl/pkg/describe/describe.go +++ b/staging/src/k8s.io/kubectl/pkg/describe/describe.go @@ -33,7 +33,6 @@ import ( "unicode" "github.com/fatih/camelcase" - appsv1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" @@ -46,6 +45,7 @@ import ( discoveryv1beta1 "k8s.io/api/discovery/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" networkingv1beta1 "k8s.io/api/networking/v1beta1" policyv1 "k8s.io/api/policy/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -213,6 +213,7 @@ func describerMap(clientConfig *rest.Config) (map[schema.GroupKind]ResourceDescr {Group: networkingv1beta1.GroupName, Kind: "IngressClass"}: &IngressClassDescriber{c}, {Group: networkingv1.GroupName, Kind: "Ingress"}: &IngressDescriber{c}, {Group: networkingv1.GroupName, Kind: "IngressClass"}: &IngressClassDescriber{c}, + {Group: networkingv1alpha1.GroupName, Kind: "ClusterCIDR"}: &ClusterCIDRDescriber{c}, {Group: batchv1.GroupName, Kind: "Job"}: &JobDescriber{c}, {Group: batchv1.GroupName, Kind: "CronJob"}: &CronJobDescriber{c}, {Group: batchv1beta1.GroupName, Kind: "CronJob"}: &CronJobDescriber{c}, @@ -2853,6 +2854,63 @@ func (i *IngressClassDescriber) describeIngressClassV1(ic *networkingv1.IngressC }) } +// ClusterCIDRDescriber generates information about a ClusterCIDR. +type ClusterCIDRDescriber struct { + client clientset.Interface +} + +func (c *ClusterCIDRDescriber) Describe(namespace, name string, describerSettings DescriberSettings) (string, error) { + var events *corev1.EventList + + ccV1alpha1, err := c.client.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), name, metav1.GetOptions{}) + if err == nil { + if describerSettings.ShowEvents { + events, _ = searchEvents(c.client.CoreV1(), ccV1alpha1, describerSettings.ChunkSize) + } + return c.describeClusterCIDRV1alpha1(ccV1alpha1, events) + } + return "", err +} + +func (c *ClusterCIDRDescriber) describeClusterCIDRV1alpha1(cc *networkingv1alpha1.ClusterCIDR, events *corev1.EventList) (string, error) { + return tabbedString(func(out io.Writer) error { + w := NewPrefixWriter(out) + w.Write(LEVEL_0, "Name:\t%v\n", cc.Name) + printLabelsMultiline(w, "Labels", cc.Labels) + printAnnotationsMultiline(w, "Annotations", cc.Annotations) + + w.Write(LEVEL_0, "NodeSelector:\n") + if cc.Spec.NodeSelector != nil { + w.Write(LEVEL_1, "NodeSelector Terms:") + if len(cc.Spec.NodeSelector.NodeSelectorTerms) == 0 { + w.WriteLine("") + } else { + w.WriteLine("") + for i, term := range cc.Spec.NodeSelector.NodeSelectorTerms { + printNodeSelectorTermsMultilineWithIndent(w, LEVEL_2, fmt.Sprintf("Term %v", i), "\t", term.MatchExpressions) + } + } + } + + if cc.Spec.PerNodeHostBits != 0 { + w.Write(LEVEL_0, "PerNodeHostBits:\t%s\n", fmt.Sprint(cc.Spec.PerNodeHostBits)) + } + + if cc.Spec.IPv4 != "" { + w.Write(LEVEL_0, "IPv4:\t%s\n", cc.Spec.IPv4) + } + + if cc.Spec.IPv6 != "" { + w.Write(LEVEL_0, "IPv6:\t%s\n", cc.Spec.IPv6) + } + + if events != nil { + DescribeEvents(events, w) + } + return nil + }) +} + // ServiceDescriber generates information about a service. type ServiceDescriber struct { clientset.Interface diff --git a/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go b/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go index fe6b06c2895..5225ec2db13 100644 --- a/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go +++ b/staging/src/k8s.io/kubectl/pkg/describe/describe_test.go @@ -25,6 +25,7 @@ import ( "testing" "time" + "github.com/google/go-cmp/cmp" appsv1 "k8s.io/api/apps/v1" autoscalingv1 "k8s.io/api/autoscaling/v1" autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" @@ -34,6 +35,7 @@ import ( discoveryv1 "k8s.io/api/discovery/v1" discoveryv1beta1 "k8s.io/api/discovery/v1beta1" networkingv1 "k8s.io/api/networking/v1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" networkingv1beta1 "k8s.io/api/networking/v1beta1" policyv1 "k8s.io/api/policy/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -5371,6 +5373,64 @@ Events: ` + "\n", } } +func TestDescribeClusterCIDR(t *testing.T) { + + testcases := map[string]struct { + input *fake.Clientset + output string + }{ + "ClusterCIDR v1alpha1": { + input: fake.NewSimpleClientset(&networkingv1alpha1.ClusterCIDR{ + ObjectMeta: metav1.ObjectMeta{ + Name: "foo.123", + }, + Spec: networkingv1alpha1.ClusterCIDRSpec{ + PerNodeHostBits: int32(8), + IPv4: "10.1.0.0/16", + IPv6: "fd00:1:1::/64", + NodeSelector: &corev1.NodeSelector{ + NodeSelectorTerms: []corev1.NodeSelectorTerm{ + { + MatchExpressions: []corev1.NodeSelectorRequirement{ + { + Key: "foo", + Operator: "In", + Values: []string{"bar"}}, + }, + }, + }, + }, + }, + }), + + output: `Name: foo.123 +Labels: +Annotations: +NodeSelector: + NodeSelector Terms: + Term 0: foo in [bar] +PerNodeHostBits: 8 +IPv4: 10.1.0.0/16 +IPv6: fd00:1:1::/64 +Events: ` + "\n", + }, + } + + for name, tc := range testcases { + t.Run(name, func(t *testing.T) { + c := &describeClient{T: t, Namespace: "foo", Interface: tc.input} + d := ClusterCIDRDescriber{c} + out, err := d.Describe("bar", "foo.123", DescriberSettings{ShowEvents: true}) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if out != tc.output { + t.Errorf("expected :\n%s\nbut got output:\n%s diff:\n%s", tc.output, out, cmp.Diff(tc.output, out)) + } + }) + } +} + func TestControllerRef(t *testing.T) { var replicas int32 = 1 f := fake.NewSimpleClientset( diff --git a/test/integration/clustercidr/ipam_test.go b/test/integration/clustercidr/ipam_test.go new file mode 100644 index 00000000000..167370e157c --- /dev/null +++ b/test/integration/clustercidr/ipam_test.go @@ -0,0 +1,249 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clustercidr + +import ( + "context" + "net" + "reflect" + "testing" + "time" + + v1 "k8s.io/api/core/v1" + networkingv1alpha1 "k8s.io/api/networking/v1alpha1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + utilfeature "k8s.io/apiserver/pkg/util/feature" + "k8s.io/client-go/informers" + clientset "k8s.io/client-go/kubernetes" + featuregatetesting "k8s.io/component-base/featuregate/testing" + "k8s.io/kubernetes/cmd/kube-apiserver/app/options" + "k8s.io/kubernetes/pkg/controller/nodeipam" + "k8s.io/kubernetes/pkg/controller/nodeipam/ipam" + "k8s.io/kubernetes/pkg/features" + "k8s.io/kubernetes/test/integration/framework" + netutils "k8s.io/utils/net" +) + +func TestIPAMMultiCIDRRangeAllocatorType(t *testing.T) { + + // set the feature gate to enable MultiCIDRRangeAllocator + defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.MultiCIDRRangeAllocator, true)() + + _, kubeConfig, tearDownFn := framework.StartTestServer(t, framework.TestServerSetup{ + ModifyServerRunOptions: func(opts *options.ServerRunOptions) { + // Disable ServiceAccount admission plugin as we don't have serviceaccount controller running. + opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition"} + opts.APIEnablement.RuntimeConfig.Set("networking.k8s.io/v1alpha1=true") + }, + }) + defer tearDownFn() + + clientSet := clientset.NewForConfigOrDie(kubeConfig) + sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour) + + ipamController := booststrapMultiCIDRRangeAllocator(t, clientSet, sharedInformer) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + go ipamController.Run(ctx.Done()) + sharedInformer.Start(ctx.Done()) + + tests := []struct { + name string + clusterCIDR *networkingv1alpha1.ClusterCIDR + node *v1.Node + expectedPodCIDRs []string + }{ + { + name: "Default dualstack Pod CIDRs assigned to a node, node labels matching no ClusterCIDR nodeSelectors", + clusterCIDR: nil, + node: makeNode("default-node", map[string]string{"label": "unmatched"}), + expectedPodCIDRs: []string{"10.96.0.0/24", "fd00:10:96::/120"}, + }, + { + name: "Dualstack Pod CIDRs assigned to a node from a CC created during bootstrap", + clusterCIDR: nil, + node: makeNode("bootstrap-node", map[string]string{"bootstrap": "true"}), + expectedPodCIDRs: []string{"10.2.1.0/24", "fd00:20:96::100/120"}, + }, + { + name: "Single stack IPv4 Pod CIDR assigned to a node", + clusterCIDR: makeClusterCIDR("ipv4-cc", "10.0.0.0/16", "", nodeSelector(map[string][]string{"ipv4": {"true"}, "singlestack": {"true"}})), + node: makeNode("ipv4-node", map[string]string{"ipv4": "true", "singlestack": "true"}), + expectedPodCIDRs: []string{"10.0.0.0/24"}, + }, + { + name: "Single stack IPv6 Pod CIDR assigned to a node", + clusterCIDR: makeClusterCIDR("ipv6-cc", "", "fd00:20:100::/112", nodeSelector(map[string][]string{"ipv6": {"true"}})), + node: makeNode("ipv6-node", map[string]string{"ipv6": "true"}), + expectedPodCIDRs: []string{"fd00:20:100::/120"}, + }, + { + name: "DualStack Pod CIDRs assigned to a node", + clusterCIDR: makeClusterCIDR("dualstack-cc", "192.168.0.0/16", "fd00:30:100::/112", nodeSelector(map[string][]string{"ipv4": {"true"}, "ipv6": {"true"}})), + node: makeNode("dualstack-node", map[string]string{"ipv4": "true", "ipv6": "true"}), + expectedPodCIDRs: []string{"192.168.0.0/24", "fd00:30:100::/120"}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.clusterCIDR != nil { + // Create the test ClusterCIDR + if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), test.clusterCIDR, metav1.CreateOptions{}); err != nil { + t.Fatal(err) + } + + // Wait for the ClusterCIDR to be created + if err := wait.PollImmediate(time.Second, 5*time.Second, func() (bool, error) { + cc, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Get(context.TODO(), test.clusterCIDR.Name, metav1.GetOptions{}) + if err != nil { + return false, err + } + return cc != nil, nil + }); err != nil { + t.Fatalf("failed while waiting for ClusterCIDR %q to be created: %v", test.clusterCIDR.Name, err) + } + } + + // Sleep for one second to make sure the controller process the new created ClusterCIDR. + time.Sleep(1 * time.Second) + + if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), test.node, metav1.CreateOptions{}); err != nil { + t.Fatal(err) + } + if gotPodCIDRs, err := nodePodCIDRs(clientSet, test.node.Name); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(test.expectedPodCIDRs, gotPodCIDRs) { + t.Errorf("unexpected result, expected Pod CIDRs %v but got %v", test.expectedPodCIDRs, gotPodCIDRs) + } + }) + } +} + +func booststrapMultiCIDRRangeAllocator(t *testing.T, + clientSet clientset.Interface, + sharedInformer informers.SharedInformerFactory, +) *nodeipam.Controller { + _, clusterCIDRv4, _ := netutils.ParseCIDRSloppy("10.96.0.0/12") // allows up to 8K nodes + _, clusterCIDRv6, _ := netutils.ParseCIDRSloppy("fd00:10:96::/112") // allows up to 8K nodes + _, serviceCIDR, _ := netutils.ParseCIDRSloppy("10.94.0.0/24") // does not matter for test - pick upto 250 services + _, secServiceCIDR, _ := netutils.ParseCIDRSloppy("2001:db2::/120") // does not matter for test - pick upto 250 services + + // order is ipv4 - ipv6 by convention for dual stack + clusterCIDRs := []*net.IPNet{clusterCIDRv4, clusterCIDRv6} + nodeMaskCIDRs := []int{24, 120} + + // set the current state of the informer, we can preseed nodes and ClusterCIDRs so we + // can simulate the bootstrap + initialCC := makeClusterCIDR("initial-cc", "10.2.0.0/16", "fd00:20:96::/112", nodeSelector(map[string][]string{"bootstrap": {"true"}})) + if _, err := clientSet.NetworkingV1alpha1().ClusterCIDRs().Create(context.TODO(), initialCC, metav1.CreateOptions{}); err != nil { + t.Fatal(err) + } + + initialNode := makeNode("initial-node", map[string]string{"bootstrap": "true"}) + if _, err := clientSet.CoreV1().Nodes().Create(context.TODO(), initialNode, metav1.CreateOptions{}); err != nil { + t.Fatal(err) + } + + ipamController, err := nodeipam.NewNodeIpamController( + sharedInformer.Core().V1().Nodes(), + sharedInformer.Networking().V1alpha1().ClusterCIDRs(), + nil, + clientSet, + clusterCIDRs, + serviceCIDR, + secServiceCIDR, + nodeMaskCIDRs, + ipam.MultiCIDRRangeAllocatorType, + ) + if err != nil { + t.Fatal(err) + } + + return ipamController +} + +func makeNode(name string, labels map[string]string) *v1.Node { + return &v1.Node{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Labels: labels, + }, + Status: v1.NodeStatus{ + Capacity: v1.ResourceList{ + v1.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI), + v1.ResourceCPU: resource.MustParse("4"), + v1.ResourceMemory: resource.MustParse("32Gi"), + }, + Phase: v1.NodeRunning, + Conditions: []v1.NodeCondition{ + {Type: v1.NodeReady, Status: v1.ConditionTrue}, + }, + }, + } +} + +func makeClusterCIDR(name, ipv4CIDR, ipv6CIDR string, nodeSelector *v1.NodeSelector) *networkingv1alpha1.ClusterCIDR { + return &networkingv1alpha1.ClusterCIDR{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Spec: networkingv1alpha1.ClusterCIDRSpec{ + PerNodeHostBits: 8, + IPv4: ipv4CIDR, + IPv6: ipv6CIDR, + NodeSelector: nodeSelector, + }, + } +} + +func nodeSelector(labels map[string][]string) *v1.NodeSelector { + testNodeSelector := &v1.NodeSelector{} + + for key, values := range labels { + nst := v1.NodeSelectorTerm{ + MatchExpressions: []v1.NodeSelectorRequirement{ + { + Key: key, + Operator: v1.NodeSelectorOpIn, + Values: values, + }, + }, + } + testNodeSelector.NodeSelectorTerms = append(testNodeSelector.NodeSelectorTerms, nst) + } + + return testNodeSelector +} + +func nodePodCIDRs(c clientset.Interface, name string) ([]string, error) { + var node *v1.Node + nodePollErr := wait.PollImmediate(time.Second, 5*time.Second, func() (bool, error) { + var err error + node, err = c.CoreV1().Nodes().Get(context.TODO(), name, metav1.GetOptions{}) + if err != nil { + return false, err + } + return len(node.Spec.PodCIDRs) > 0, nil + }) + + return node.Spec.PodCIDRs, nodePollErr +} diff --git a/test/integration/clustercidr/main_test.go b/test/integration/clustercidr/main_test.go new file mode 100644 index 00000000000..cf920a27032 --- /dev/null +++ b/test/integration/clustercidr/main_test.go @@ -0,0 +1,27 @@ +/* +Copyright 2022 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clustercidr + +import ( + "testing" + + "k8s.io/kubernetes/test/integration/framework" +) + +func TestMain(m *testing.M) { + framework.EtcdMain(m.Run) +} diff --git a/test/integration/etcd/data.go b/test/integration/etcd/data.go index 567c4a544a9..a82e8de048a 100644 --- a/test/integration/etcd/data.go +++ b/test/integration/etcd/data.go @@ -234,6 +234,13 @@ func GetEtcdStorageDataForNamespace(namespace string) map[schema.GroupVersionRes }, // -- + // k8s.io/kubernetes/pkg/apis/networking/v1alpha1 + gvr("networking.k8s.io", "v1alpha1", "clustercidrs"): { + Stub: `{"metadata": {"name": "clustercidr1"}, "spec": {"perNodeHostBits": 8, "ipv4": "192.168.4.0/24", "ipv6": "fd00:1::/120", "nodeSelector": null}}`, + ExpectedEtcdPath: "/registry/clustercidrs/clustercidr1", + }, + // -- + // k8s.io/kubernetes/pkg/apis/policy/v1 gvr("policy", "v1", "poddisruptionbudgets"): { Stub: `{"metadata": {"name": "pdbv1"}, "spec": {"selector": {"matchLabels": {"anokkey": "anokvalue"}}}}`, diff --git a/test/integration/ipamperf/ipam_test.go b/test/integration/ipamperf/ipam_test.go index 90e931358b5..a0f35627978 100644 --- a/test/integration/ipamperf/ipam_test.go +++ b/test/integration/ipamperf/ipam_test.go @@ -50,8 +50,10 @@ func setupAllocator(kubeConfig *restclient.Config, config *Config, clusterCIDR, sharedInformer := informers.NewSharedInformerFactory(clientSet, 1*time.Hour) ipamController, err := nodeipam.NewNodeIpamController( - sharedInformer.Core().V1().Nodes(), config.Cloud, clientSet, - []*net.IPNet{clusterCIDR}, serviceCIDR, nil, []int{subnetMaskSize}, config.AllocatorType, + sharedInformer.Core().V1().Nodes(), + sharedInformer.Networking().V1alpha1().ClusterCIDRs(), + config.Cloud, clientSet, []*net.IPNet{clusterCIDR}, serviceCIDR, nil, + []int{subnetMaskSize}, config.AllocatorType, ) if err != nil { return nil, shutdownFunc, err diff --git a/vendor/modules.txt b/vendor/modules.txt index 7ab21e5547c..56049f95757 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1392,6 +1392,7 @@ k8s.io/api/flowcontrol/v1beta1 k8s.io/api/flowcontrol/v1beta2 k8s.io/api/imagepolicy/v1alpha1 k8s.io/api/networking/v1 +k8s.io/api/networking/v1alpha1 k8s.io/api/networking/v1beta1 k8s.io/api/node/v1 k8s.io/api/node/v1alpha1 @@ -1722,6 +1723,7 @@ k8s.io/client-go/applyconfigurations/imagepolicy/v1alpha1 k8s.io/client-go/applyconfigurations/internal k8s.io/client-go/applyconfigurations/meta/v1 k8s.io/client-go/applyconfigurations/networking/v1 +k8s.io/client-go/applyconfigurations/networking/v1alpha1 k8s.io/client-go/applyconfigurations/networking/v1beta1 k8s.io/client-go/applyconfigurations/node/v1 k8s.io/client-go/applyconfigurations/node/v1alpha1 @@ -1787,6 +1789,7 @@ k8s.io/client-go/informers/flowcontrol/v1beta2 k8s.io/client-go/informers/internalinterfaces k8s.io/client-go/informers/networking k8s.io/client-go/informers/networking/v1 +k8s.io/client-go/informers/networking/v1alpha1 k8s.io/client-go/informers/networking/v1beta1 k8s.io/client-go/informers/node k8s.io/client-go/informers/node/v1 @@ -1870,6 +1873,8 @@ k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2 k8s.io/client-go/kubernetes/typed/flowcontrol/v1beta2/fake k8s.io/client-go/kubernetes/typed/networking/v1 k8s.io/client-go/kubernetes/typed/networking/v1/fake +k8s.io/client-go/kubernetes/typed/networking/v1alpha1 +k8s.io/client-go/kubernetes/typed/networking/v1alpha1/fake k8s.io/client-go/kubernetes/typed/networking/v1beta1 k8s.io/client-go/kubernetes/typed/networking/v1beta1/fake k8s.io/client-go/kubernetes/typed/node/v1 @@ -1926,6 +1931,7 @@ k8s.io/client-go/listers/flowcontrol/v1alpha1 k8s.io/client-go/listers/flowcontrol/v1beta1 k8s.io/client-go/listers/flowcontrol/v1beta2 k8s.io/client-go/listers/networking/v1 +k8s.io/client-go/listers/networking/v1alpha1 k8s.io/client-go/listers/networking/v1beta1 k8s.io/client-go/listers/node/v1 k8s.io/client-go/listers/node/v1alpha1